1 /* 2 * Copyright (c) 2014, 2017, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "classfile/javaClasses.inline.hpp" 27 #include "code/codeCache.hpp" 28 #include "gc/shared/gcTraceTime.inline.hpp" 29 #include "gc/shenandoah/brooksPointer.hpp" 30 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 31 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 32 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 33 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 34 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 35 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 36 #include "gc/shenandoah/shenandoahHeap.hpp" 37 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 38 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 39 #include "gc/shenandoah/shenandoahStringDedup.hpp" 40 #include "gc/shenandoah/shenandoahUtils.hpp" 41 #include "gc/shenandoah/shenandoahVerifier.hpp" 42 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 43 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 44 #include "oops/oop.inline.hpp" 45 #include "runtime/biasedLocking.hpp" 46 #include "runtime/thread.hpp" 47 #include "utilities/copy.hpp" 48 #include "gc/shared/taskqueue.inline.hpp" 49 #include "gc/shared/workgroup.hpp" 50 51 class ShenandoahMarkCompactBarrierSet : public ShenandoahBarrierSet { 52 public: 53 ShenandoahMarkCompactBarrierSet(ShenandoahHeap* heap) : ShenandoahBarrierSet(heap) { 54 } 55 oop read_barrier(oop src) { 56 return src; 57 } 58 #ifdef ASSERT 59 bool is_safe(oop o) { 60 if (o == NULL) return true; 61 if (! oopDesc::unsafe_equals(o, read_barrier(o))) { 62 return false; 63 } 64 return true; 65 } 66 bool is_safe(narrowOop o) { 67 oop obj = oopDesc::decode_heap_oop(o); 68 return is_safe(obj); 69 } 70 #endif 71 }; 72 73 class ShenandoahClearRegionStatusClosure: public ShenandoahHeapRegionClosure { 74 private: 75 ShenandoahHeap* _heap; 76 77 public: 78 ShenandoahClearRegionStatusClosure() : _heap(ShenandoahHeap::heap()) {} 79 80 bool heap_region_do(ShenandoahHeapRegion *r) { 81 _heap->set_next_top_at_mark_start(r->bottom(), r->top()); 82 r->clear_live_data(); 83 r->set_concurrent_iteration_safe_limit(r->top()); 84 return false; 85 } 86 }; 87 88 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 89 private: 90 ShenandoahHeap* _heap; 91 92 public: 93 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 94 bool heap_region_do(ShenandoahHeapRegion* r) { 95 if (r->is_trash()) { 96 r->recycle(); 97 } 98 if (r->is_empty()) { 99 r->make_regular_bypass(); 100 } 101 assert (r->is_active(), "only active regions in heap now"); 102 return false; 103 } 104 }; 105 106 STWGCTimer* ShenandoahMarkCompact::_gc_timer = NULL; 107 108 void ShenandoahMarkCompact::initialize() { 109 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 110 } 111 112 void ShenandoahMarkCompact::do_mark_compact(GCCause::Cause gc_cause) { 113 ShenandoahHeap* heap = ShenandoahHeap::heap(); 114 115 // Default, use number of parallel GC threads 116 WorkGang* workers = heap->workers(); 117 uint nworkers = ShenandoahWorkerPolicy::calc_workers_for_fullgc(); 118 ShenandoahWorkerScope full_gc_worker_scope(workers, nworkers); 119 120 { 121 ShenandoahGCSession session(/* is_full_gc */true); 122 heap->set_full_gc_in_progress(true); 123 124 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 125 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 126 127 { 128 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 129 heap->pre_full_gc_dump(_gc_timer); 130 } 131 132 { 133 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 134 // Full GC is supposed to recover from any GC state: 135 136 // a. Cancel concurrent mark, if in progress 137 if (heap->concurrent_mark_in_progress()) { 138 heap->concurrentMark()->cancel(); 139 heap->stop_concurrent_marking(); 140 } 141 assert(!heap->concurrent_mark_in_progress(), "sanity"); 142 143 // b. Cancel evacuation, if in progress 144 if (heap->is_evacuation_in_progress()) { 145 heap->set_evacuation_in_progress_at_safepoint(false); 146 } 147 assert(!heap->is_evacuation_in_progress(), "sanity"); 148 149 // c. Reset the bitmaps for new marking 150 heap->reset_next_mark_bitmap(heap->workers()); 151 assert(heap->is_next_bitmap_clear(), "sanity"); 152 153 // d. Abandon reference discovery and clear all discovered references. 154 ReferenceProcessor* rp = heap->ref_processor(); 155 rp->disable_discovery(); 156 rp->abandon_partial_discovery(); 157 rp->verify_no_references_recorded(); 158 159 // e. Verify heap before changing the regions 160 if (ShenandoahVerify) { 161 // Full GC should only be called between regular concurrent cycles, therefore 162 // those verifications should be valid. 163 heap->verifier()->verify_before_fullgc(); 164 } 165 166 { 167 ShenandoahHeapLocker lock(heap->lock()); 168 169 // f. Make sure all regions are active. This is needed because we are potentially 170 // sliding the data through them 171 ShenandoahEnsureHeapActiveClosure ecl; 172 heap->heap_region_iterate(&ecl, false, false); 173 174 // g. Clear region statuses, including collection set status 175 ShenandoahClearRegionStatusClosure cl; 176 heap->heap_region_iterate(&cl, false, false); 177 } 178 } 179 180 BarrierSet* old_bs = oopDesc::bs(); 181 ShenandoahMarkCompactBarrierSet bs(heap); 182 oopDesc::set_bs(&bs); 183 184 { 185 GCTraceTime(Info, gc) time("Pause Full", _gc_timer, gc_cause, true); 186 187 if (UseTLAB) { 188 heap->ensure_parsability(true); 189 } 190 191 CodeCache::gc_prologue(); 192 193 // We should save the marks of the currently locked biased monitors. 194 // The marking doesn't preserve the marks of biased objects. 195 //BiasedLocking::preserve_marks(); 196 197 heap->set_need_update_refs(true); 198 199 // Setup workers for phase 1 200 { 201 OrderAccess::fence(); 202 203 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 204 phase1_mark_heap(); 205 } 206 207 // Setup workers for the rest 208 { 209 OrderAccess::fence(); 210 211 ShenandoahHeapRegionSet** copy_queues = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 212 213 { 214 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 215 phase2_calculate_target_addresses(copy_queues); 216 } 217 218 OrderAccess::fence(); 219 220 { 221 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 222 phase3_update_references(); 223 } 224 225 if (ShenandoahStringDedup::is_enabled()) { 226 ShenandoahGCPhase update_str_dedup_table(ShenandoahPhaseTimings::full_gc_update_str_dedup_table); 227 ShenandoahStringDedup::parallel_full_gc_update_or_unlink(); 228 } 229 230 231 { 232 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 233 phase4_compact_objects(copy_queues); 234 } 235 236 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, copy_queues); 237 238 CodeCache::gc_epilogue(); 239 JvmtiExport::gc_epilogue(); 240 } 241 242 // refs processing: clean slate 243 // rp.enqueue_discovered_references(); 244 245 if (ShenandoahVerify) { 246 heap->verifier()->verify_after_fullgc(); 247 } 248 249 heap->set_bytes_allocated_since_cm(0); 250 251 heap->set_need_update_refs(false); 252 253 heap->set_full_gc_in_progress(false); 254 } 255 256 { 257 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 258 heap->post_full_gc_dump(_gc_timer); 259 } 260 261 if (UseTLAB) { 262 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs); 263 heap->resize_all_tlabs(); 264 } 265 266 oopDesc::set_bs(old_bs); 267 } 268 269 270 if (UseShenandoahMatrix && PrintShenandoahMatrix) { 271 LogTarget(Info, gc) lt; 272 LogStream ls(lt); 273 heap->connection_matrix()->print_on(&ls); 274 } 275 } 276 277 void ShenandoahMarkCompact::phase1_mark_heap() { 278 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 279 ShenandoahHeap* _heap = ShenandoahHeap::heap(); 280 281 ShenandoahConcurrentMark* cm = _heap->concurrentMark(); 282 283 // Do not trust heuristics, because this can be our last resort collection. 284 // Only ignore processing references and class unloading if explicitly disabled. 285 cm->set_process_references(ShenandoahRefProcFrequency != 0); 286 cm->set_unload_classes(ShenandoahUnloadClassesFrequency != 0); 287 288 ReferenceProcessor* rp = _heap->ref_processor(); 289 // enable ("weak") refs discovery 290 rp->enable_discovery(true /*verify_no_refs*/); 291 rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle 292 rp->set_active_mt_degree(_heap->workers()->active_workers()); 293 294 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots); 295 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots); 296 cm->shared_finish_mark_from_roots(/* full_gc = */ true); 297 298 _heap->swap_mark_bitmaps(); 299 300 if (UseShenandoahMatrix && PrintShenandoahMatrix) { 301 LogTarget(Info, gc) lt; 302 LogStream ls(lt); 303 _heap->connection_matrix()->print_on(&ls); 304 } 305 306 if (VerifyDuringGC) { 307 HandleMark hm; // handle scope 308 _heap->prepare_for_verify(); 309 // Note: we can verify only the heap here. When an object is 310 // marked, the previous value of the mark word (including 311 // identity hash values, ages, etc) is preserved, and the mark 312 // word is set to markOop::marked_value - effectively removing 313 // any hash values from the mark word. These hash values are 314 // used when verifying the dictionaries and so removing them 315 // from the mark word can make verification of the dictionaries 316 // fail. At the end of the GC, the original mark word values 317 // (including hash values) are restored to the appropriate 318 // objects. 319 _heap->verify(VerifyOption_G1UseMarkWord); 320 } 321 } 322 323 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure { 324 private: 325 ShenandoahHeap* _heap; 326 public: 327 ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) { 328 } 329 330 bool heap_region_do(ShenandoahHeapRegion* r) { 331 if (r->is_humongous_start()) { 332 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size()); 333 if (! _heap->is_marked_complete(humongous_obj)) { 334 _heap->trash_humongous_region_at(r); 335 } 336 } 337 return false; 338 } 339 }; 340 341 342 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 343 344 private: 345 346 ShenandoahHeap* _heap; 347 ShenandoahHeapRegionSet* _to_regions; 348 ShenandoahHeapRegion* _to_region; 349 ShenandoahHeapRegion* _from_region; 350 HeapWord* _compact_point; 351 352 public: 353 354 ShenandoahPrepareForCompactionObjectClosure(ShenandoahHeapRegionSet* to_regions, ShenandoahHeapRegion* to_region) : 355 _heap(ShenandoahHeap::heap()), 356 _to_regions(to_regions), 357 _to_region(to_region), 358 _from_region(NULL), 359 _compact_point(to_region->bottom()) { 360 } 361 362 void set_from_region(ShenandoahHeapRegion* from_region) { 363 _from_region = from_region; 364 } 365 366 ShenandoahHeapRegion* to_region() const { 367 return _to_region; 368 } 369 HeapWord* compact_point() const { 370 return _compact_point; 371 } 372 void do_object(oop p) { 373 assert(_from_region != NULL, "must set before work"); 374 assert(_heap->is_marked_complete(p), "must be marked"); 375 assert(! _heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked"); 376 size_t obj_size = p->size() + BrooksPointer::word_size(); 377 if (_compact_point + obj_size > _to_region->end()) { 378 // Object doesn't fit. Pick next to-region and start compacting there. 379 _to_region->set_new_top(_compact_point); 380 ShenandoahHeapRegion* new_to_region = _to_regions->current(); 381 _to_regions->next(); 382 if (new_to_region == NULL) { 383 new_to_region = _from_region; 384 } 385 assert(new_to_region != _to_region, "must not reuse same to-region"); 386 assert(new_to_region != NULL, "must not be NULL"); 387 _to_region = new_to_region; 388 _compact_point = _to_region->bottom(); 389 } 390 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 391 assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)), 392 "expect forwarded oop"); 393 BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size()); 394 _compact_point += obj_size; 395 } 396 }; 397 398 class ShenandoahPrepareForCompactionTask : public AbstractGangTask { 399 private: 400 401 ShenandoahHeapRegionSet** _copy_queues; 402 ShenandoahHeapRegionSet* _from_regions; 403 404 ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* copy_queue) { 405 ShenandoahHeapRegion* from_region = _from_regions->claim_next(); 406 407 // Note: During Full GC after cancelled conc GC, we might have incoming regions 408 // in the collection set. Otherwise we would have just taken care of regular regions. 409 while (from_region != NULL && !(from_region->is_regular() || from_region->is_cset())) { 410 from_region = _from_regions->claim_next(); 411 } 412 if (from_region != NULL) { 413 assert(copy_queue != NULL, "sanity"); 414 assert(from_region->is_regular() || from_region->is_cset(), "only regular/cset regions in mark-compact"); 415 copy_queue->add_region(from_region); 416 } 417 return from_region; 418 } 419 420 public: 421 ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet* from_regions, ShenandoahHeapRegionSet** copy_queues) : 422 AbstractGangTask("Shenandoah Prepare For Compaction Task"), 423 _from_regions(from_regions), _copy_queues(copy_queues) { 424 } 425 426 void work(uint worker_id) { 427 ShenandoahHeap* heap = ShenandoahHeap::heap(); 428 ShenandoahHeapRegionSet* copy_queue = _copy_queues[worker_id]; 429 ShenandoahHeapRegion* from_region = next_from_region(copy_queue); 430 if (from_region == NULL) return; 431 ShenandoahHeapRegionSet* to_regions = new ShenandoahHeapRegionSet(ShenandoahHeap::heap()->num_regions()); 432 ShenandoahPrepareForCompactionObjectClosure cl(to_regions, from_region); 433 while (from_region != NULL) { 434 assert(from_region != NULL, "sanity"); 435 cl.set_from_region(from_region); 436 heap->marked_object_iterate(from_region, &cl); 437 if (from_region != cl.to_region()) { 438 assert(from_region != NULL, "sanity"); 439 to_regions->add_region(from_region); 440 } 441 from_region = next_from_region(copy_queue); 442 } 443 assert(cl.to_region() != NULL, "should not happen"); 444 cl.to_region()->set_new_top(cl.compact_point()); 445 while (to_regions->count() > 0) { 446 ShenandoahHeapRegion* r = to_regions->current(); 447 to_regions->next(); 448 assert(r != NULL, "should not happen"); 449 r->set_new_top(r->bottom()); 450 } 451 delete to_regions; 452 } 453 }; 454 455 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** copy_queues) { 456 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 457 ShenandoahHeap* heap = ShenandoahHeap::heap(); 458 459 { 460 ShenandoahHeapLocker lock(heap->lock()); 461 462 ShenandoahMCReclaimHumongousRegionClosure cl; 463 heap->heap_region_iterate(&cl); 464 465 // After some humongous regions were reclaimed, we need to ensure their 466 // backing storage is active. This is needed because we are potentially 467 // sliding the data through them. 468 ShenandoahEnsureHeapActiveClosure ecl; 469 heap->heap_region_iterate(&ecl, false, false); 470 } 471 472 // Initialize copy queues. 473 for (uint i = 0; i < heap->max_workers(); i++) { 474 copy_queues[i] = new ShenandoahHeapRegionSet(heap->num_regions()); 475 } 476 477 ShenandoahHeapRegionSet* from_regions = heap->regions(); 478 from_regions->clear_current_index(); 479 ShenandoahPrepareForCompactionTask prepare_task(from_regions, copy_queues); 480 heap->workers()->run_task(&prepare_task); 481 } 482 483 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure { 484 private: 485 ShenandoahHeap* _heap; 486 size_t _new_obj_offset; 487 public: 488 489 ShenandoahAdjustPointersClosure() : 490 _heap(ShenandoahHeap::heap()), 491 _new_obj_offset(SIZE_MAX) { 492 } 493 494 private: 495 template <class T> 496 inline void do_oop_work(T* p) { 497 T o = oopDesc::load_heap_oop(p); 498 if (! oopDesc::is_null(o)) { 499 oop obj = oopDesc::decode_heap_oop_not_null(o); 500 assert(_heap->is_marked_complete(obj), "must be marked"); 501 oop forw = oop(BrooksPointer::get_raw(obj)); 502 oopDesc::encode_store_heap_oop(p, forw); 503 if (UseShenandoahMatrix) { 504 if (_heap->is_in_reserved(p)) { 505 assert(_heap->is_in_reserved(forw), "must be in heap"); 506 assert (_new_obj_offset != SIZE_MAX, "should be set"); 507 // We're moving a to a', which points to b, about to be moved to b'. 508 // We already know b' from the fwd pointer of b. 509 // In the object closure, we see a, and we know a' (by looking at its 510 // fwd ptr). We store the offset in the OopClosure, which is going 511 // to visit all of a's fields, and then, when we see each field, we 512 // subtract the offset from each field address to get the final ptr. 513 _heap->connection_matrix()->set_connected(((HeapWord*) p) - _new_obj_offset, forw); 514 } 515 } 516 } 517 } 518 public: 519 void do_oop(oop* p) { 520 do_oop_work(p); 521 } 522 void do_oop(narrowOop* p) { 523 do_oop_work(p); 524 } 525 void set_new_obj_offset(size_t new_obj_offset) { 526 _new_obj_offset = new_obj_offset; 527 } 528 }; 529 530 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 531 private: 532 ShenandoahAdjustPointersClosure _cl; 533 ShenandoahHeap* _heap; 534 public: 535 ShenandoahAdjustPointersObjectClosure() : 536 _heap(ShenandoahHeap::heap()) { 537 } 538 void do_object(oop p) { 539 assert(_heap->is_marked_complete(p), "must be marked"); 540 HeapWord* forw = BrooksPointer::get_raw(p); 541 _cl.set_new_obj_offset(pointer_delta((HeapWord*) p, forw)); 542 p->oop_iterate(&_cl); 543 } 544 }; 545 546 class ShenandoahAdjustPointersTask : public AbstractGangTask { 547 private: 548 ShenandoahHeapRegionSet* _regions; 549 public: 550 551 ShenandoahAdjustPointersTask(ShenandoahHeapRegionSet* regions) : 552 AbstractGangTask("Shenandoah Adjust Pointers Task"), 553 _regions(regions) { 554 } 555 556 void work(uint worker_id) { 557 ShenandoahHeap* heap = ShenandoahHeap::heap(); 558 ShenandoahHeapRegion* r = _regions->claim_next(); 559 ShenandoahAdjustPointersObjectClosure obj_cl; 560 while (r != NULL) { 561 if (! r->is_humongous_continuation()) { 562 heap->marked_object_iterate(r, &obj_cl); 563 } 564 r = _regions->claim_next(); 565 } 566 } 567 }; 568 569 class ShenandoahAdjustRootPointersTask : public AbstractGangTask { 570 private: 571 ShenandoahRootProcessor* _rp; 572 573 public: 574 575 ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) : 576 AbstractGangTask("Shenandoah Adjust Root Pointers Task"), 577 _rp(rp) { 578 } 579 580 void work(uint worker_id) { 581 ShenandoahAdjustPointersClosure cl; 582 CLDToOopClosure adjust_cld_closure(&cl, true); 583 MarkingCodeBlobClosure adjust_code_closure(&cl, 584 CodeBlobToOopClosure::FixRelocations); 585 586 _rp->process_all_roots(&cl, &cl, 587 &adjust_cld_closure, 588 &adjust_code_closure, worker_id); 589 } 590 }; 591 592 void ShenandoahMarkCompact::phase3_update_references() { 593 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 594 ShenandoahHeap* heap = ShenandoahHeap::heap(); 595 596 if (UseShenandoahMatrix) { 597 heap->connection_matrix()->clear_all(); 598 } 599 600 WorkGang* workers = heap->workers(); 601 uint nworkers = workers->active_workers(); 602 { 603 #if defined(COMPILER2) || INCLUDE_JVMCI 604 DerivedPointerTable::clear(); 605 #endif 606 ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots); 607 ShenandoahAdjustRootPointersTask task(&rp); 608 workers->run_task(&task); 609 #if defined(COMPILER2) || INCLUDE_JVMCI 610 DerivedPointerTable::update_pointers(); 611 #endif 612 } 613 614 ShenandoahHeapRegionSet* regions = heap->regions(); 615 regions->clear_current_index(); 616 ShenandoahAdjustPointersTask adjust_pointers_task(regions); 617 workers->run_task(&adjust_pointers_task); 618 } 619 620 class ShenandoahCompactObjectsClosure : public ObjectClosure { 621 private: 622 ShenandoahHeap* _heap; 623 bool _str_dedup; 624 uint _worker_id; 625 public: 626 ShenandoahCompactObjectsClosure(uint worker_id) : _heap(ShenandoahHeap::heap()), 627 _str_dedup(ShenandoahStringDedup::is_enabled()), _worker_id(worker_id) { 628 } 629 void do_object(oop p) { 630 assert(_heap->is_marked_complete(p), "must be marked"); 631 size_t size = (size_t)p->size(); 632 HeapWord* compact_to = BrooksPointer::get_raw(p); 633 HeapWord* compact_from = (HeapWord*) p; 634 if (compact_from != compact_to) { 635 Copy::aligned_conjoint_words(compact_from, compact_to, size); 636 } 637 oop new_obj = oop(compact_to); 638 // new_obj->init_mark(); 639 BrooksPointer::initialize(new_obj); 640 641 // String Dedup support 642 if(_str_dedup && java_lang_String::is_instance_inlined(new_obj)) { 643 new_obj->incr_age(); 644 if (ShenandoahStringDedup::is_candidate(new_obj)) { 645 ShenandoahStringDedup::enqueue_from_safepoint(new_obj, _worker_id); 646 } 647 } 648 } 649 }; 650 651 class ShenandoahCompactObjectsTask : public AbstractGangTask { 652 ShenandoahHeapRegionSet** _regions; 653 public: 654 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** regions) : 655 AbstractGangTask("Shenandoah Compact Objects Task"), 656 _regions(regions) { 657 } 658 void work(uint worker_id) { 659 ShenandoahHeap* heap = ShenandoahHeap::heap(); 660 ShenandoahHeapRegionSet* copy_queue = _regions[worker_id]; 661 copy_queue->clear_current_index(); 662 ShenandoahCompactObjectsClosure cl(worker_id); 663 ShenandoahHeapRegion* r = copy_queue->current(); 664 copy_queue->next(); 665 while (r != NULL) { 666 assert(! r->is_humongous(), "must not get humongous regions here"); 667 heap->marked_object_iterate(r, &cl); 668 r->set_top(r->new_top()); 669 r = copy_queue->current(); 670 copy_queue->next(); 671 } 672 } 673 }; 674 675 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 676 size_t _live; 677 ShenandoahHeap* _heap; 678 public: 679 680 ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) { 681 _heap->clear_free_regions(); 682 } 683 684 bool heap_region_do(ShenandoahHeapRegion* r) { 685 // Need to reset the complete-top-at-mark-start pointer here because 686 // the complete marking bitmap is no longer valid. This ensures 687 // size-based iteration in marked_object_iterate(). 688 _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom()); 689 690 size_t live = r->used(); 691 692 // Turn any lingering non-empty cset regions into regular regions. 693 // This must be the leftover from the cancelled concurrent GC. 694 if (r->is_cset() && live != 0) { 695 r->make_regular_bypass(); 696 } 697 698 // Reclaim regular/cset regions that became empty 699 if ((r->is_regular() || r->is_cset()) && live == 0) { 700 r->make_trash(); 701 } 702 703 // Recycle all trash regions 704 if (r->is_trash()) { 705 live = 0; 706 r->recycle(); 707 } 708 709 // Finally, add all suitable regions into the free set 710 if (r->is_alloc_allowed()) { 711 if (_heap->collection_set()->is_in(r)) { 712 _heap->collection_set()->remove_region(r); 713 } 714 _heap->add_free_region(r); 715 } 716 717 r->set_live_data(live); 718 r->reset_alloc_stats_to_shared(); 719 _live += live; 720 return false; 721 } 722 723 size_t get_live() { return _live; } 724 725 }; 726 727 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** copy_queues) { 728 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 729 ShenandoahHeap* heap = ShenandoahHeap::heap(); 730 ShenandoahCompactObjectsTask compact_task(copy_queues); 731 heap->workers()->run_task(&compact_task); 732 733 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 734 // and must ensure the bitmap is in sync. 735 heap->reset_complete_mark_bitmap(heap->workers()); 736 737 { 738 ShenandoahHeapLocker lock(heap->lock()); 739 ShenandoahPostCompactClosure post_compact; 740 heap->heap_region_iterate(&post_compact); 741 742 heap->set_used(post_compact.get_live()); 743 } 744 745 heap->collection_set()->clear(); 746 heap->clear_cancelled_concgc(); 747 748 // Also clear the next bitmap in preparation for next marking. 749 heap->reset_next_mark_bitmap(heap->workers()); 750 751 for (uint i = 0; i < heap->max_workers(); i++) { 752 delete copy_queues[i]; 753 } 754 755 }