1 /* 2 * Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "code/codeCache.hpp" 27 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp" 28 #include "gc_implementation/shared/gcTimer.hpp" 29 #include "gc_implementation/shenandoah/preservedMarks.inline.hpp" 30 #include "gc_implementation/shenandoah/shenandoahForwarding.hpp" 31 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp" 32 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp" 33 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp" 34 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp" 35 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp" 36 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp" 37 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp" 38 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp" 39 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp" 40 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp" 41 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp" 42 #include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp" 43 #include "gc_implementation/shenandoah/shenandoahUtils.hpp" 44 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp" 45 #include "gc_implementation/shenandoah/shenandoahVMOperations.hpp" 46 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp" 47 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp" 48 #include "memory/metaspace.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "runtime/biasedLocking.hpp" 51 #include "runtime/thread.hpp" 52 #include "utilities/copy.hpp" 53 #include "utilities/growableArray.hpp" 54 #include "utilities/workgroup.hpp" 55 56 ShenandoahMarkCompact::ShenandoahMarkCompact() : 57 _gc_timer(NULL), 58 _preserved_marks(new PreservedMarksSet(true)) {} 59 60 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) { 61 _gc_timer = gc_timer; 62 } 63 64 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) { 65 ShenandoahHeap* heap = ShenandoahHeap::heap(); 66 67 if (ShenandoahVerify) { 68 heap->verifier()->verify_before_fullgc(); 69 } 70 71 if (VerifyBeforeGC) { 72 Universe::verify(); 73 } 74 75 heap->set_full_gc_in_progress(true); 76 77 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 78 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 79 80 { 81 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 82 heap->pre_full_gc_dump(_gc_timer); 83 } 84 85 { 86 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_prepare); 87 // Full GC is supposed to recover from any GC state: 88 89 // 0. Remember if we have forwarded objects 90 bool has_forwarded_objects = heap->has_forwarded_objects(); 91 92 // a. Cancel concurrent mark, if in progress 93 if (heap->is_concurrent_mark_in_progress()) { 94 heap->concurrent_mark()->cancel(); 95 heap->stop_concurrent_marking(); 96 } 97 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 98 99 // b1. Cancel evacuation, if in progress 100 if (heap->is_evacuation_in_progress()) { 101 heap->set_evacuation_in_progress(false); 102 } 103 assert(!heap->is_evacuation_in_progress(), "sanity"); 104 105 // b2. Cancel update-refs, if in progress 106 if (heap->is_update_refs_in_progress()) { 107 heap->set_update_refs_in_progress(false); 108 } 109 assert(!heap->is_update_refs_in_progress(), "sanity"); 110 111 // c. Reset the bitmaps for new marking 112 heap->reset_mark_bitmap(); 113 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 114 assert(!heap->marking_context()->is_complete(), "sanity"); 115 116 // d. Abandon reference discovery and clear all discovered references. 117 ReferenceProcessor *rp = heap->ref_processor(); 118 rp->disable_discovery(); 119 rp->abandon_partial_discovery(); 120 rp->verify_no_references_recorded(); 121 122 // e. Set back forwarded objects bit back, in case some steps above dropped it. 123 heap->set_has_forwarded_objects(has_forwarded_objects); 124 125 // f. Sync pinned region status from the CP marks 126 heap->sync_pinned_region_status(); 127 128 // The rest of prologue: 129 BiasedLocking::preserve_marks(); 130 _preserved_marks->init(heap->workers()->active_workers()); 131 } 132 133 heap->make_parsable(true); 134 135 CodeCache::gc_prologue(); 136 137 OrderAccess::fence(); 138 139 phase1_mark_heap(); 140 141 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 142 // Coming out of Full GC, we would not have any forwarded objects. 143 // This also prevents read barrier from kicking in while adjusting pointers in phase3. 144 heap->set_has_forwarded_objects(false); 145 146 heap->set_full_gc_move_in_progress(true); 147 148 // Setup workers for the rest 149 OrderAccess::fence(); 150 151 // Initialize worker slices 152 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 153 for (uint i = 0; i < heap->max_workers(); i++) { 154 worker_slices[i] = new ShenandoahHeapRegionSet(); 155 } 156 157 { 158 // The rest of code performs region moves, where region status is undefined 159 // until all phases run together. 160 ShenandoahHeapLocker lock(heap->lock()); 161 162 phase2_calculate_target_addresses(worker_slices); 163 164 OrderAccess::fence(); 165 166 phase3_update_references(); 167 168 phase4_compact_objects(worker_slices); 169 } 170 171 { 172 // Epilogue 173 SharedRestorePreservedMarksTaskExecutor exec(heap->workers()); 174 _preserved_marks->restore(&exec); 175 BiasedLocking::restore_marks(); 176 _preserved_marks->reclaim(); 177 178 JvmtiExport::gc_epilogue(); 179 } 180 181 // Resize metaspace 182 MetaspaceGC::compute_new_size(); 183 184 // Free worker slices 185 for (uint i = 0; i < heap->max_workers(); i++) { 186 delete worker_slices[i]; 187 } 188 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices, mtGC); 189 190 CodeCache::gc_epilogue(); 191 JvmtiExport::gc_epilogue(); 192 193 heap->set_full_gc_move_in_progress(false); 194 heap->set_full_gc_in_progress(false); 195 196 if (ShenandoahVerify) { 197 heap->verifier()->verify_after_fullgc(); 198 } 199 200 if (VerifyAfterGC) { 201 Universe::verify(); 202 } 203 204 { 205 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 206 heap->post_full_gc_dump(_gc_timer); 207 } 208 209 if (UseTLAB) { 210 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs); 211 heap->resize_all_tlabs(); 212 } 213 } 214 215 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 216 private: 217 ShenandoahMarkingContext* const _ctx; 218 219 public: 220 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 221 222 void heap_region_do(ShenandoahHeapRegion *r) { 223 _ctx->capture_top_at_mark_start(r); 224 r->clear_live_data(); 225 r->set_concurrent_iteration_safe_limit(r->top()); 226 } 227 }; 228 229 void ShenandoahMarkCompact::phase1_mark_heap() { 230 ShenandoahHeap* heap = ShenandoahHeap::heap(); 231 GCTraceTime time("Phase 1: Mark live objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); 232 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 233 234 ShenandoahPrepareForMarkClosure cl; 235 heap->heap_region_iterate(&cl); 236 237 ShenandoahConcurrentMark* cm = heap->concurrent_mark(); 238 239 heap->set_process_references(heap->heuristics()->can_process_references()); 240 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 241 242 ReferenceProcessor* rp = heap->ref_processor(); 243 // enable ("weak") refs discovery 244 rp->enable_discovery(true /*verify_no_refs*/, true); 245 rp->setup_policy(true); // forcefully purge all soft references 246 rp->set_active_mt_degree(heap->workers()->active_workers()); 247 248 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots); 249 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots); 250 cm->finish_mark_from_roots(/* full_gc = */ true); 251 252 heap->mark_complete_marking_context(); 253 } 254 255 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 256 private: 257 PreservedMarks* const _preserved_marks; 258 ShenandoahHeap* const _heap; 259 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 260 int _empty_regions_pos; 261 ShenandoahHeapRegion* _to_region; 262 ShenandoahHeapRegion* _from_region; 263 HeapWord* _compact_point; 264 265 public: 266 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 267 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 268 ShenandoahHeapRegion* to_region) : 269 _preserved_marks(preserved_marks), 270 _heap(ShenandoahHeap::heap()), 271 _empty_regions(empty_regions), 272 _empty_regions_pos(0), 273 _to_region(to_region), 274 _from_region(NULL), 275 _compact_point(to_region->bottom()) {} 276 277 void set_from_region(ShenandoahHeapRegion* from_region) { 278 _from_region = from_region; 279 } 280 281 void finish_region() { 282 assert(_to_region != NULL, "should not happen"); 283 _to_region->set_new_top(_compact_point); 284 } 285 286 bool is_compact_same_region() { 287 return _from_region == _to_region; 288 } 289 290 int empty_regions_pos() { 291 return _empty_regions_pos; 292 } 293 294 void do_object(oop p) { 295 assert(_from_region != NULL, "must set before work"); 296 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 297 assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked"); 298 299 size_t obj_size = p->size(); 300 if (_compact_point + obj_size > _to_region->end()) { 301 finish_region(); 302 303 // Object doesn't fit. Pick next empty region and start compacting there. 304 ShenandoahHeapRegion* new_to_region; 305 if (_empty_regions_pos < _empty_regions.length()) { 306 new_to_region = _empty_regions.at(_empty_regions_pos); 307 _empty_regions_pos++; 308 } else { 309 // Out of empty region? Compact within the same region. 310 new_to_region = _from_region; 311 } 312 313 assert(new_to_region != _to_region, "must not reuse same to-region"); 314 assert(new_to_region != NULL, "must not be NULL"); 315 _to_region = new_to_region; 316 _compact_point = _to_region->bottom(); 317 } 318 319 // Object fits into current region, record new location: 320 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 321 shenandoah_assert_not_forwarded(NULL, p); 322 _preserved_marks->push_if_necessary(p, p->mark()); 323 p->forward_to(oop(_compact_point)); 324 _compact_point += obj_size; 325 } 326 }; 327 328 class ShenandoahPrepareForCompactionTask : public AbstractGangTask { 329 private: 330 PreservedMarksSet* const _preserved_marks; 331 ShenandoahHeap* const _heap; 332 ShenandoahHeapRegionSet** const _worker_slices; 333 ShenandoahRegionIterator _heap_regions; 334 335 ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) { 336 ShenandoahHeapRegion* from_region = _heap_regions.next(); 337 338 // Look for next candidate for this slice: 339 while (from_region != NULL) { 340 // Empty region: get it into the slice to defragment the slice itself. 341 // We could have skipped this without violating correctness, but we really 342 // want to compact all live regions to the start of the heap, which sometimes 343 // means moving them into the fully empty regions. 344 if (from_region->is_empty()) break; 345 346 // Can move the region, and this is not the humongous region. Humongous 347 // moves are special cased here, because their moves are handled separately. 348 if (from_region->is_stw_move_allowed() && !from_region->is_humongous()) break; 349 350 from_region = _heap_regions.next(); 351 } 352 353 if (from_region != NULL) { 354 assert(slice != NULL, "sanity"); 355 assert(!from_region->is_humongous(), "this path cannot handle humongous regions"); 356 assert(from_region->is_empty() || from_region->is_stw_move_allowed(), "only regions that can be moved in mark-compact"); 357 slice->add_region(from_region); 358 } 359 360 return from_region; 361 } 362 363 public: 364 ShenandoahPrepareForCompactionTask(PreservedMarksSet* preserved_marks, ShenandoahHeapRegionSet** worker_slices) : 365 AbstractGangTask("Shenandoah Prepare For Compaction Task"), 366 _preserved_marks(preserved_marks), 367 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 368 } 369 370 void work(uint worker_id) { 371 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 372 ShenandoahHeapRegion* from_region = next_from_region(slice); 373 374 // No work? 375 if (from_region == NULL) { 376 return; 377 } 378 379 // Sliding compaction. Walk all regions in the slice, and compact them. 380 // Remember empty regions and reuse them as needed. 381 ResourceMark rm; 382 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 383 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 384 while (from_region != NULL) { 385 cl.set_from_region(from_region); 386 if (from_region->has_live()) { 387 _heap->marked_object_iterate(from_region, &cl); 388 } 389 390 // Compacted the region to somewhere else? From-region is empty then. 391 if (!cl.is_compact_same_region()) { 392 empty_regions.append(from_region); 393 } 394 from_region = next_from_region(slice); 395 } 396 cl.finish_region(); 397 398 // Mark all remaining regions as empty 399 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 400 ShenandoahHeapRegion* r = empty_regions.at(pos); 401 r->set_new_top(r->bottom()); 402 } 403 } 404 }; 405 406 void ShenandoahMarkCompact::calculate_target_humongous_objects() { 407 ShenandoahHeap* heap = ShenandoahHeap::heap(); 408 409 // Compute the new addresses for humongous objects. We need to do this after addresses 410 // for regular objects are calculated, and we know what regions in heap suffix are 411 // available for humongous moves. 412 // 413 // Scan the heap backwards, because we are compacting humongous regions towards the end. 414 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 415 // humongous start there. 416 // 417 // The complication is potential non-movable regions during the scan. If such region is 418 // detected, then sliding restarts towards that non-movable region. 419 420 size_t to_begin = heap->num_regions(); 421 size_t to_end = heap->num_regions(); 422 423 for (size_t c = heap->num_regions(); c > 0; c--) { 424 ShenandoahHeapRegion *r = heap->get_region(c - 1); 425 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 426 // To-region candidate: record this, and continue scan 427 to_begin = r->region_number(); 428 continue; 429 } 430 431 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 432 // From-region candidate: movable humongous region 433 oop old_obj = oop(r->bottom()); 434 size_t words_size = old_obj->size(); 435 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 436 437 size_t start = to_end - num_regions; 438 439 if (start >= to_begin && start != r->region_number()) { 440 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 441 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark()); 442 old_obj->forward_to(oop(heap->get_region(start)->bottom())); 443 to_end = start; 444 continue; 445 } 446 } 447 448 // Failed to fit. Scan starting from current region. 449 to_begin = r->region_number(); 450 to_end = r->region_number(); 451 } 452 } 453 454 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 455 private: 456 ShenandoahHeap* const _heap; 457 458 public: 459 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 460 void heap_region_do(ShenandoahHeapRegion* r) { 461 if (r->is_trash()) { 462 r->recycle(); 463 } 464 if (r->is_cset()) { 465 r->make_regular_bypass(); 466 } 467 if (r->is_empty_uncommitted()) { 468 r->make_committed_bypass(); 469 } 470 assert (r->is_committed(), err_msg("only committed regions in heap now, see region " SIZE_FORMAT, r->region_number())); 471 472 // Record current region occupancy: this communicates empty regions are free 473 // to the rest of Full GC code. 474 r->set_new_top(r->top()); 475 } 476 }; 477 478 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 479 private: 480 ShenandoahHeap* const _heap; 481 ShenandoahMarkingContext* const _ctx; 482 483 public: 484 ShenandoahTrashImmediateGarbageClosure() : 485 _heap(ShenandoahHeap::heap()), 486 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 487 488 void heap_region_do(ShenandoahHeapRegion* r) { 489 if (r->is_humongous_start()) { 490 oop humongous_obj = oop(r->bottom()); 491 if (!_ctx->is_marked(humongous_obj)) { 492 assert(!r->has_live(), 493 err_msg("Region " SIZE_FORMAT " is not marked, should not have live", r->region_number())); 494 _heap->trash_humongous_region_at(r); 495 } else { 496 assert(r->has_live(), 497 err_msg("Region " SIZE_FORMAT " should have live", r->region_number())); 498 } 499 } else if (r->is_humongous_continuation()) { 500 // If we hit continuation, the non-live humongous starts should have been trashed already 501 assert(r->humongous_start_region()->has_live(), 502 err_msg("Region " SIZE_FORMAT " should have live", r->region_number())); 503 } else if (r->is_regular()) { 504 if (!r->has_live()) { 505 r->make_trash_immediate(); 506 } 507 } 508 } 509 }; 510 511 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 512 ShenandoahHeap* heap = ShenandoahHeap::heap(); 513 GCTraceTime time("Phase 2: Compute new object addresses", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); 514 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 515 516 // About to figure out which regions can be compacted, make sure pinning status 517 // had been updated in GC prologue. 518 heap->assert_pinned_region_status(); 519 520 { 521 // Trash the immediately collectible regions before computing addresses 522 ShenandoahTrashImmediateGarbageClosure tigcl; 523 heap->heap_region_iterate(&tigcl); 524 525 // Make sure regions are in good state: committed, active, clean. 526 // This is needed because we are potentially sliding the data through them. 527 ShenandoahEnsureHeapActiveClosure ecl; 528 heap->heap_region_iterate(&ecl); 529 } 530 531 // Compute the new addresses for regular objects 532 { 533 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 534 ShenandoahPrepareForCompactionTask prepare_task(_preserved_marks, worker_slices); 535 heap->workers()->run_task(&prepare_task); 536 } 537 538 // Compute the new addresses for humongous objects 539 { 540 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 541 calculate_target_humongous_objects(); 542 } 543 } 544 545 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure { 546 private: 547 ShenandoahHeap* const _heap; 548 ShenandoahMarkingContext* const _ctx; 549 550 template <class T> 551 inline void do_oop_work(T* p) { 552 T o = oopDesc::load_heap_oop(p); 553 if (! oopDesc::is_null(o)) { 554 oop obj = oopDesc::decode_heap_oop_not_null(o); 555 assert(_ctx->is_marked(obj), "must be marked"); 556 if (obj->is_forwarded()) { 557 oop forw = obj->forwardee(); 558 oopDesc::encode_store_heap_oop(p, forw); 559 } 560 } 561 } 562 563 public: 564 ShenandoahAdjustPointersClosure() : 565 _heap(ShenandoahHeap::heap()), 566 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 567 568 void do_oop(oop* p) { do_oop_work(p); } 569 void do_oop(narrowOop* p) { do_oop_work(p); } 570 }; 571 572 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 573 private: 574 ShenandoahHeap* const _heap; 575 ShenandoahAdjustPointersClosure _cl; 576 577 public: 578 ShenandoahAdjustPointersObjectClosure() : 579 _heap(ShenandoahHeap::heap()) { 580 } 581 void do_object(oop p) { 582 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 583 p->oop_iterate(&_cl); 584 } 585 }; 586 587 class ShenandoahAdjustPointersTask : public AbstractGangTask { 588 private: 589 ShenandoahHeap* const _heap; 590 ShenandoahRegionIterator _regions; 591 592 public: 593 ShenandoahAdjustPointersTask() : 594 AbstractGangTask("Shenandoah Adjust Pointers Task"), 595 _heap(ShenandoahHeap::heap()) { 596 } 597 598 void work(uint worker_id) { 599 ShenandoahAdjustPointersObjectClosure obj_cl; 600 ShenandoahHeapRegion* r = _regions.next(); 601 while (r != NULL) { 602 if (!r->is_humongous_continuation() && r->has_live()) { 603 _heap->marked_object_iterate(r, &obj_cl); 604 } 605 r = _regions.next(); 606 } 607 } 608 }; 609 610 class ShenandoahAdjustRootPointersTask : public AbstractGangTask { 611 private: 612 ShenandoahRootProcessor* _rp; 613 PreservedMarksSet* _preserved_marks; 614 public: 615 ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp, PreservedMarksSet* preserved_marks) : 616 AbstractGangTask("Shenandoah Adjust Root Pointers Task"), 617 _rp(rp), 618 _preserved_marks(preserved_marks) {} 619 620 void work(uint worker_id) { 621 ShenandoahAdjustPointersClosure cl; 622 CLDToOopClosure adjust_cld_closure(&cl, true); 623 MarkingCodeBlobClosure adjust_code_closure(&cl, 624 CodeBlobToOopClosure::FixRelocations); 625 626 _rp->process_all_roots(&cl, &cl, 627 &adjust_cld_closure, 628 &adjust_code_closure, NULL, worker_id); 629 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 630 } 631 }; 632 633 void ShenandoahMarkCompact::phase3_update_references() { 634 ShenandoahHeap* heap = ShenandoahHeap::heap(); 635 GCTraceTime time("Phase 3: Adjust pointers", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); 636 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 637 638 WorkGang* workers = heap->workers(); 639 uint nworkers = workers->active_workers(); 640 { 641 COMPILER2_PRESENT(DerivedPointerTable::clear()); 642 ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots); 643 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 644 workers->run_task(&task); 645 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 646 } 647 648 ShenandoahAdjustPointersTask adjust_pointers_task; 649 workers->run_task(&adjust_pointers_task); 650 } 651 652 class ShenandoahCompactObjectsClosure : public ObjectClosure { 653 private: 654 ShenandoahHeap* const _heap; 655 656 public: 657 ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {} 658 659 void do_object(oop p) { 660 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 661 size_t size = (size_t)p->size(); 662 if (p->is_forwarded()) { 663 HeapWord* compact_from = (HeapWord*) p; 664 HeapWord* compact_to = (HeapWord*) p->forwardee(); 665 Copy::aligned_conjoint_words(compact_from, compact_to, size); 666 oop new_obj = oop(compact_to); 667 new_obj->init_mark(); 668 } 669 } 670 }; 671 672 class ShenandoahCompactObjectsTask : public AbstractGangTask { 673 private: 674 ShenandoahHeap* const _heap; 675 ShenandoahHeapRegionSet** const _worker_slices; 676 677 public: 678 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 679 AbstractGangTask("Shenandoah Compact Objects Task"), 680 _heap(ShenandoahHeap::heap()), 681 _worker_slices(worker_slices) { 682 } 683 684 void work(uint worker_id) { 685 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 686 687 ShenandoahCompactObjectsClosure cl; 688 ShenandoahHeapRegion* r = slice.next(); 689 while (r != NULL) { 690 assert(!r->is_humongous(), "must not get humongous regions here"); 691 if (r->has_live()) { 692 _heap->marked_object_iterate(r, &cl); 693 } 694 r->set_top(r->new_top()); 695 r = slice.next(); 696 } 697 } 698 }; 699 700 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 701 private: 702 ShenandoahHeap* const _heap; 703 size_t _live; 704 705 public: 706 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 707 _heap->free_set()->clear(); 708 } 709 710 void heap_region_do(ShenandoahHeapRegion* r) { 711 assert (!r->is_cset(), "cset regions should have been demoted already"); 712 713 // Need to reset the complete-top-at-mark-start pointer here because 714 // the complete marking bitmap is no longer valid. This ensures 715 // size-based iteration in marked_object_iterate(). 716 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 717 // pinned regions. 718 if (!r->is_pinned()) { 719 _heap->complete_marking_context()->reset_top_at_mark_start(r); 720 } 721 722 size_t live = r->used(); 723 724 // Make empty regions that have been allocated into regular 725 if (r->is_empty() && live > 0) { 726 r->make_regular_bypass(); 727 } 728 729 // Reclaim regular regions that became empty 730 if (r->is_regular() && live == 0) { 731 r->make_trash(); 732 } 733 734 // Recycle all trash regions 735 if (r->is_trash()) { 736 live = 0; 737 r->recycle(); 738 } 739 740 r->set_live_data(live); 741 r->reset_alloc_metadata_to_shared(); 742 _live += live; 743 } 744 745 size_t get_live() { 746 return _live; 747 } 748 }; 749 750 void ShenandoahMarkCompact::compact_humongous_objects() { 751 // Compact humongous regions, based on their fwdptr objects. 752 // 753 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 754 // humongous regions are already compacted, and do not require further moves, which alleviates 755 // sliding costs. We may consider doing this in parallel in future. 756 757 ShenandoahHeap* heap = ShenandoahHeap::heap(); 758 759 for (size_t c = heap->num_regions(); c > 0; c--) { 760 ShenandoahHeapRegion* r = heap->get_region(c - 1); 761 if (r->is_humongous_start()) { 762 oop old_obj = oop(r->bottom()); 763 if (!old_obj->is_forwarded()) { 764 // No need to move the object, it stays at the same slot 765 continue; 766 } 767 size_t words_size = old_obj->size(); 768 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 769 770 size_t old_start = r->region_number(); 771 size_t old_end = old_start + num_regions - 1; 772 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); 773 size_t new_end = new_start + num_regions - 1; 774 assert(old_start != new_start, "must be real move"); 775 assert(r->is_stw_move_allowed(), err_msg("Region " SIZE_FORMAT " should be movable", r->region_number())); 776 777 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 778 heap->get_region(new_start)->bottom(), 779 ShenandoahHeapRegion::region_size_words()*num_regions); 780 781 oop new_obj = oop(heap->get_region(new_start)->bottom()); 782 new_obj->init_mark(); 783 784 { 785 for (size_t c = old_start; c <= old_end; c++) { 786 ShenandoahHeapRegion* r = heap->get_region(c); 787 r->make_regular_bypass(); 788 r->set_top(r->bottom()); 789 } 790 791 for (size_t c = new_start; c <= new_end; c++) { 792 ShenandoahHeapRegion* r = heap->get_region(c); 793 if (c == new_start) { 794 r->make_humongous_start_bypass(); 795 } else { 796 r->make_humongous_cont_bypass(); 797 } 798 799 // Trailing region may be non-full, record the remainder there 800 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 801 if ((c == new_end) && (remainder != 0)) { 802 r->set_top(r->bottom() + remainder); 803 } else { 804 r->set_top(r->end()); 805 } 806 807 r->reset_alloc_metadata_to_shared(); 808 } 809 } 810 } 811 } 812 } 813 814 // This is slightly different to ShHeap::reset_next_mark_bitmap: 815 // we need to remain able to walk pinned regions. 816 // Since pinned region do not move and don't get compacted, we will get holes with 817 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 818 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 819 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 820 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 821 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask { 822 private: 823 ShenandoahRegionIterator _regions; 824 825 public: 826 ShenandoahMCResetCompleteBitmapTask() : 827 AbstractGangTask("Parallel Reset Bitmap Task") { 828 } 829 830 void work(uint worker_id) { 831 ShenandoahHeapRegion* region = _regions.next(); 832 ShenandoahHeap* heap = ShenandoahHeap::heap(); 833 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 834 while (region != NULL) { 835 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 836 ctx->clear_bitmap(region); 837 } 838 region = _regions.next(); 839 } 840 } 841 }; 842 843 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 844 ShenandoahHeap* heap = ShenandoahHeap::heap(); 845 GCTraceTime time("Phase 4: Move objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id()); 846 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 847 848 // Compact regular objects first 849 { 850 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 851 ShenandoahCompactObjectsTask compact_task(worker_slices); 852 heap->workers()->run_task(&compact_task); 853 } 854 855 // Compact humongous objects after regular object moves 856 { 857 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 858 compact_humongous_objects(); 859 } 860 861 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 862 // and must ensure the bitmap is in sync. 863 { 864 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 865 ShenandoahMCResetCompleteBitmapTask task; 866 heap->workers()->run_task(&task); 867 } 868 869 // Bring regions in proper states after the collection, and set heap properties. 870 { 871 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 872 873 ShenandoahPostCompactClosure post_compact; 874 heap->heap_region_iterate(&post_compact); 875 heap->set_used(post_compact.get_live()); 876 877 heap->collection_set()->clear(); 878 heap->free_set()->rebuild(); 879 } 880 881 heap->clear_cancelled_gc(); 882 }