1 /* 2 * Copyright (c) 2014, 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "code/codeCache.hpp" 25 #include "gc/shared/gcTraceTime.inline.hpp" 26 #include "gc/shared/isGCActiveMark.hpp" 27 #include "gc/shenandoah/brooksPointer.hpp" 28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 29 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 30 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 31 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 32 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 33 #include "gc/shenandoah/shenandoahHeap.hpp" 34 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 35 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 36 #include "gc/shenandoah/vm_operations_shenandoah.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/thread.hpp" 40 #include "utilities/copy.hpp" 41 #include "gc/shared/taskqueue.inline.hpp" 42 #include "gc/shared/workgroup.hpp" 43 44 class ShenandoahMarkCompactBarrierSet : public ShenandoahBarrierSet { 45 public: 46 ShenandoahMarkCompactBarrierSet(ShenandoahHeap* heap) : ShenandoahBarrierSet(heap) { 47 } 48 oop read_barrier(oop src) { 49 return src; 50 } 51 #ifdef ASSERT 52 bool is_safe(oop o) { 53 if (o == NULL) return true; 54 if (! oopDesc::unsafe_equals(o, read_barrier(o))) { 55 return false; 56 } 57 return true; 58 } 59 bool is_safe(narrowOop o) { 60 oop obj = oopDesc::decode_heap_oop(o); 61 return is_safe(obj); 62 } 63 #endif 64 }; 65 66 class ClearInCollectionSetHeapRegionClosure: public ShenandoahHeapRegionClosure { 67 private: 68 ShenandoahHeap* _heap; 69 public: 70 71 ClearInCollectionSetHeapRegionClosure() : _heap(ShenandoahHeap::heap()) { 72 } 73 74 bool doHeapRegion(ShenandoahHeapRegion* r) { 75 _heap->set_next_top_at_mark_start(r->bottom(), r->top()); 76 r->clear_live_data(); 77 r->set_concurrent_iteration_safe_limit(r->top()); 78 return false; 79 } 80 }; 81 82 STWGCTimer* ShenandoahMarkCompact::_gc_timer = NULL; 83 84 void ShenandoahMarkCompact::initialize() { 85 _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer(); 86 } 87 88 void ShenandoahMarkCompact::do_mark_compact(GCCause::Cause gc_cause) { 89 90 ShenandoahHeap* _heap = ShenandoahHeap::heap(); 91 ShenandoahCollectorPolicy* policy = _heap->shenandoahPolicy(); 92 93 _gc_timer->register_gc_start(); 94 95 _heap->set_full_gc_in_progress(true); 96 97 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 98 IsGCActiveMark is_active; 99 100 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 101 102 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc); 103 104 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps); 105 _heap->pre_full_gc_dump(_gc_timer); 106 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps); 107 108 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_prepare); 109 110 // Full GC is supposed to recover from any GC state: 111 112 // a. Cancel concurrent mark, if in progress 113 if (_heap->concurrent_mark_in_progress()) { 114 _heap->concurrentMark()->cancel(); 115 _heap->stop_concurrent_marking(); 116 } 117 assert(!_heap->concurrent_mark_in_progress(), "sanity"); 118 119 // b. Cancel evacuation, if in progress 120 if (_heap->is_evacuation_in_progress()) { 121 _heap->set_evacuation_in_progress_at_safepoint(false); 122 } 123 assert(!_heap->is_evacuation_in_progress(), "sanity"); 124 125 // c. Reset the bitmaps for new marking 126 _heap->reset_next_mark_bitmap(_heap->workers()); 127 assert(_heap->is_next_bitmap_clear(), "sanity"); 128 129 ClearInCollectionSetHeapRegionClosure cl; 130 _heap->heap_region_iterate(&cl, false, false); 131 132 /* 133 if (ShenandoahVerify) { 134 // Full GC should only be called between regular concurrent cycles, therefore 135 // those verifications should be valid. 136 _heap->verify_heap_after_evacuation(); 137 _heap->verify_heap_after_update_refs(); 138 } 139 */ 140 141 BarrierSet* old_bs = oopDesc::bs(); 142 ShenandoahMarkCompactBarrierSet bs(_heap); 143 oopDesc::set_bs(&bs); 144 145 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_prepare); 146 147 { 148 GCTraceTime(Info, gc) time("Pause Full", _gc_timer, gc_cause, true); 149 150 if (UseTLAB) { 151 _heap->ensure_parsability(true); 152 } 153 154 CodeCache::gc_prologue(); 155 156 // We should save the marks of the currently locked biased monitors. 157 // The marking doesn't preserve the marks of biased objects. 158 //BiasedLocking::preserve_marks(); 159 160 _heap->set_need_update_refs(true); 161 WorkGang* workers = _heap->workers(); 162 163 // Setup workers for phase 1 164 { 165 uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_init_marking( 166 workers->active_workers(), Threads::number_of_non_daemon_threads()); 167 workers->update_active_workers(nworkers); 168 ShenandoahWorkerScope scope(workers, nworkers); 169 170 OrderAccess::fence(); 171 172 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_mark); 173 phase1_mark_heap(); 174 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_mark); 175 } 176 177 // Setup workers for the rest 178 { 179 uint nworkers = ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation( 180 workers->active_workers(), Threads::number_of_non_daemon_threads()); 181 182 ShenandoahWorkerScope scope(workers, nworkers); 183 184 OrderAccess::fence(); 185 186 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_calculate_addresses); 187 ShenandoahHeapRegionSet* copy_queues[_heap->max_workers()]; 188 phase2_calculate_target_addresses(copy_queues); 189 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_calculate_addresses); 190 191 OrderAccess::fence(); 192 193 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_adjust_pointers); 194 phase3_update_references(); 195 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_adjust_pointers); 196 197 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_copy_objects); 198 phase4_compact_objects(copy_queues); 199 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_copy_objects); 200 201 CodeCache::gc_epilogue(); 202 JvmtiExport::gc_epilogue(); 203 } 204 205 // refs processing: clean slate 206 // rp.enqueue_discovered_references(); 207 208 if (ShenandoahVerify) { 209 _heap->verify_heap_after_evacuation(); 210 } 211 212 _heap->set_bytes_allocated_since_cm(0); 213 214 _heap->set_need_update_refs(false); 215 216 _heap->set_full_gc_in_progress(false); 217 } 218 219 _gc_timer->register_gc_end(); 220 221 policy->record_full_gc(); 222 223 policy->record_phase_start(ShenandoahCollectorPolicy::full_gc_heapdumps); 224 _heap->post_full_gc_dump(_gc_timer); 225 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc_heapdumps); 226 227 policy->record_phase_end(ShenandoahCollectorPolicy::full_gc); 228 229 oopDesc::set_bs(old_bs); 230 } 231 232 #ifdef ASSERT 233 class VerifyNotForwardedPointersClosure : public MetadataAwareOopClosure { 234 private: 235 template <class T> 236 inline void do_oop_work(T* p) { 237 T o = oopDesc::load_heap_oop(p); 238 if (! oopDesc::is_null(o)) { 239 oop obj = oopDesc::decode_heap_oop_not_null(o); 240 assert(oopDesc::unsafe_equals(obj, ShenandoahBarrierSet::resolve_oop_static_not_null(obj)), 241 "expect forwarded oop"); 242 ShenandoahHeap* heap = ShenandoahHeap::heap(); 243 if (! heap->is_marked_complete(obj)) { 244 tty->print_cr("ref region humongous? %s", BOOL_TO_STR(heap->heap_region_containing(p)->is_humongous())); 245 } 246 assert(heap->is_marked_complete(obj), "must be marked"); 247 assert(! heap->allocated_after_complete_mark_start((HeapWord*) obj), "must be truly marked"); 248 } 249 } 250 public: 251 void do_oop(oop* p) { 252 do_oop_work(p); 253 } 254 void do_oop(narrowOop* p) { 255 do_oop_work(p); 256 } 257 }; 258 259 class ShenandoahMCVerifyAfterMarkingObjectClosure : public ObjectClosure { 260 public: 261 void do_object(oop p) { 262 ShenandoahHeap* heap = ShenandoahHeap::heap(); 263 assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)), 264 "expect forwarded oop"); 265 assert(heap->is_marked_complete(p), "must be marked"); 266 assert(! heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked"); 267 VerifyNotForwardedPointersClosure cl; 268 p->oop_iterate(&cl); 269 } 270 }; 271 272 class ShenandoahMCVerifyAfterMarkingRegionClosure : public ShenandoahHeapRegionClosure { 273 bool doHeapRegion(ShenandoahHeapRegion* r) { 274 ShenandoahMCVerifyAfterMarkingObjectClosure cl; 275 if (! r->is_humongous_continuation()) { 276 ShenandoahHeap::heap()->marked_object_iterate(r, &cl); 277 } 278 return false; 279 } 280 }; 281 282 #endif 283 284 void ShenandoahMarkCompact::phase1_mark_heap() { 285 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 286 ShenandoahHeap* _heap = ShenandoahHeap::heap(); 287 288 ShenandoahConcurrentMark* cm = _heap->concurrentMark(); 289 290 cm->set_process_references(true); 291 cm->set_unload_classes(true); 292 293 ReferenceProcessor* rp = _heap->ref_processor(); 294 // enable ("weak") refs discovery 295 rp->enable_discovery(true /*verify_no_refs*/); 296 rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle 297 rp->set_active_mt_degree(_heap->workers()->active_workers()); 298 299 COMPILER2_PRESENT(DerivedPointerTable::clear()); 300 cm->update_roots(); 301 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 302 303 cm->mark_roots(); 304 cm->shared_finish_mark_from_roots(/* full_gc = */ true); 305 306 _heap->swap_mark_bitmaps(); 307 308 if (UseShenandoahMatrix) { 309 if (PrintShenandoahMatrix) { 310 outputStream* log = Log(gc)::info_stream(); 311 _heap->connection_matrix()->print_on(log); 312 } 313 if (VerifyShenandoahMatrix) { 314 _heap->verify_matrix(); 315 } 316 } 317 318 if (VerifyDuringGC) { 319 HandleMark hm; // handle scope 320 // Universe::heap()->prepare_for_verify(); 321 _heap->prepare_for_verify(); 322 // Note: we can verify only the heap here. When an object is 323 // marked, the previous value of the mark word (including 324 // identity hash values, ages, etc) is preserved, and the mark 325 // word is set to markOop::marked_value - effectively removing 326 // any hash values from the mark word. These hash values are 327 // used when verifying the dictionaries and so removing them 328 // from the mark word can make verification of the dictionaries 329 // fail. At the end of the GC, the original mark word values 330 // (including hash values) are restored to the appropriate 331 // objects. 332 // Universe::heap()->verify(VerifySilently, VerifyOption_G1UseMarkWord); 333 _heap->verify(VerifyOption_G1UseMarkWord); 334 } 335 336 #ifdef ASSERT 337 ShenandoahMCVerifyAfterMarkingRegionClosure cl; 338 _heap->heap_region_iterate(&cl); 339 #endif 340 } 341 342 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure { 343 private: 344 ShenandoahHeap* _heap; 345 public: 346 ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) { 347 } 348 349 bool doHeapRegion(ShenandoahHeapRegion* r) { 350 if (r->is_humongous_start()) { 351 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size()); 352 if (! _heap->is_marked_complete(humongous_obj)) { 353 _heap->reclaim_humongous_region_at(r); 354 } 355 } 356 return false; 357 } 358 }; 359 360 361 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 362 363 private: 364 365 ShenandoahHeap* _heap; 366 ShenandoahHeapRegionSet* _to_regions; 367 ShenandoahHeapRegion* _to_region; 368 ShenandoahHeapRegion* _from_region; 369 HeapWord* _compact_point; 370 371 public: 372 373 ShenandoahPrepareForCompactionObjectClosure(ShenandoahHeapRegionSet* to_regions, ShenandoahHeapRegion* to_region) : 374 _heap(ShenandoahHeap::heap()), 375 _to_regions(to_regions), 376 _to_region(to_region), 377 _from_region(NULL), 378 _compact_point(to_region->bottom()) { 379 } 380 381 void set_from_region(ShenandoahHeapRegion* from_region) { 382 _from_region = from_region; 383 } 384 385 ShenandoahHeapRegion* to_region() const { 386 return _to_region; 387 } 388 HeapWord* compact_point() const { 389 return _compact_point; 390 } 391 void do_object(oop p) { 392 assert(_from_region != NULL, "must set before work"); 393 assert(_heap->is_marked_complete(p), "must be marked"); 394 assert(! _heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked"); 395 size_t size = p->size(); 396 size_t obj_size = size + BrooksPointer::word_size(); 397 if (_compact_point + obj_size > _to_region->end()) { 398 // Object doesn't fit. Pick next to-region and start compacting there. 399 _to_region->set_new_top(_compact_point); 400 ShenandoahHeapRegion* new_to_region = _to_regions->current(); 401 _to_regions->next(); 402 if (new_to_region == NULL) { 403 new_to_region = _from_region; 404 } 405 assert(new_to_region != _to_region, "must not reuse same to-region"); 406 assert(new_to_region != NULL, "must not be NULL"); 407 _to_region = new_to_region; 408 _compact_point = _to_region->bottom(); 409 } 410 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 411 assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)), 412 "expect forwarded oop"); 413 BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size()); 414 _compact_point += obj_size; 415 } 416 }; 417 418 class ShenandoahPrepareForCompactionTask : public AbstractGangTask { 419 private: 420 421 ShenandoahHeapRegionSet** _copy_queues; 422 ShenandoahHeapRegionSet* _from_regions; 423 424 ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* copy_queue) { 425 ShenandoahHeapRegion* from_region = _from_regions->claim_next(); 426 while (from_region != NULL && (from_region->is_humongous() || from_region->is_pinned())) { 427 from_region = _from_regions->claim_next(); 428 } 429 if (from_region != NULL) { 430 assert(copy_queue != NULL, "sanity"); 431 assert(! from_region->is_humongous(), "must not get humongous regions here"); 432 assert(! from_region->is_pinned(), "no pinned region in mark-compact"); 433 copy_queue->add_region(from_region); 434 } 435 return from_region; 436 } 437 438 public: 439 ShenandoahPrepareForCompactionTask(ShenandoahHeapRegionSet* from_regions, ShenandoahHeapRegionSet** copy_queues) : 440 AbstractGangTask("Shenandoah Prepare For Compaction Task"), 441 _from_regions(from_regions), _copy_queues(copy_queues) { 442 } 443 444 void work(uint worker_id) { 445 ShenandoahHeap* heap = ShenandoahHeap::heap(); 446 ShenandoahHeapRegionSet* copy_queue = _copy_queues[worker_id]; 447 ShenandoahHeapRegion* from_region = next_from_region(copy_queue); 448 if (from_region == NULL) return; 449 ShenandoahHeapRegionSet* to_regions = new ShenandoahHeapRegionSet(ShenandoahHeap::heap()->max_regions()); 450 ShenandoahPrepareForCompactionObjectClosure cl(to_regions, from_region); 451 while (from_region != NULL) { 452 assert(from_region != NULL, "sanity"); 453 cl.set_from_region(from_region); 454 heap->marked_object_iterate(from_region, &cl); 455 if (from_region != cl.to_region()) { 456 assert(from_region != NULL, "sanity"); 457 to_regions->add_region(from_region); 458 } 459 from_region = next_from_region(copy_queue); 460 } 461 assert(cl.to_region() != NULL, "should not happen"); 462 cl.to_region()->set_new_top(cl.compact_point()); 463 while (to_regions->count() > 0) { 464 ShenandoahHeapRegion* r = to_regions->current(); 465 to_regions->next(); 466 if (r == NULL) { 467 to_regions->print(); 468 } 469 assert(r != NULL, "should not happen"); 470 r->set_new_top(r->bottom()); 471 } 472 delete to_regions; 473 } 474 }; 475 476 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** copy_queues) { 477 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 478 ShenandoahHeap* heap = ShenandoahHeap::heap(); 479 480 ShenandoahMCReclaimHumongousRegionClosure cl; 481 heap->heap_region_iterate(&cl); 482 483 // Initialize copy queues. 484 for (uint i = 0; i < heap->max_workers(); i++) { 485 copy_queues[i] = new ShenandoahHeapRegionSet(heap->max_regions()); 486 } 487 488 ShenandoahHeapRegionSet* from_regions = heap->regions(); 489 from_regions->clear_current_index(); 490 ShenandoahPrepareForCompactionTask prepare_task(from_regions, copy_queues); 491 heap->workers()->run_task(&prepare_task); 492 } 493 494 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure { 495 private: 496 ShenandoahHeap* _heap; 497 498 public: 499 500 ShenandoahAdjustPointersClosure() : _heap(ShenandoahHeap::heap()) { 501 } 502 503 private: 504 template <class T> 505 inline void do_oop_work(T* p) { 506 T o = oopDesc::load_heap_oop(p); 507 if (! oopDesc::is_null(o)) { 508 oop obj = oopDesc::decode_heap_oop_not_null(o); 509 assert(_heap->is_marked_complete(obj), "must be marked"); 510 oop forw = oop(BrooksPointer::get_raw(obj)); 511 oopDesc::encode_store_heap_oop(p, forw); 512 } 513 } 514 public: 515 void do_oop(oop* p) { 516 do_oop_work(p); 517 } 518 void do_oop(narrowOop* p) { 519 do_oop_work(p); 520 } 521 }; 522 523 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 524 private: 525 ShenandoahAdjustPointersClosure* _cl; 526 ShenandoahHeap* _heap; 527 public: 528 ShenandoahAdjustPointersObjectClosure(ShenandoahAdjustPointersClosure* cl) : 529 _cl(cl), _heap(ShenandoahHeap::heap()) { 530 } 531 void do_object(oop p) { 532 assert(_heap->is_marked_complete(p), "must be marked"); 533 p->oop_iterate(_cl); 534 } 535 }; 536 537 class ShenandoahAdjustPointersTask : public AbstractGangTask { 538 private: 539 ShenandoahHeapRegionSet* _regions; 540 public: 541 542 ShenandoahAdjustPointersTask(ShenandoahHeapRegionSet* regions) : 543 AbstractGangTask("Shenandoah Adjust Pointers Task"), 544 _regions(regions) { 545 } 546 547 void work(uint worker_id) { 548 ShenandoahHeap* heap = ShenandoahHeap::heap(); 549 ShenandoahHeapRegion* r = _regions->claim_next(); 550 ShenandoahAdjustPointersClosure cl; 551 ShenandoahAdjustPointersObjectClosure obj_cl(&cl); 552 while (r != NULL) { 553 if (! r->is_humongous_continuation()) { 554 heap->marked_object_iterate(r, &obj_cl); 555 } 556 r = _regions->claim_next(); 557 } 558 } 559 }; 560 561 class ShenandoahAdjustRootPointersTask : public AbstractGangTask { 562 private: 563 ShenandoahRootProcessor* _rp; 564 565 public: 566 567 ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) : 568 AbstractGangTask("Shenandoah Adjust Root Pointers Task"), 569 _rp(rp) { 570 } 571 572 void work(uint worker_id) { 573 ShenandoahAdjustPointersClosure cl; 574 CLDToOopClosure adjust_cld_closure(&cl, true); 575 MarkingCodeBlobClosure adjust_code_closure(&cl, 576 CodeBlobToOopClosure::FixRelocations); 577 578 _rp->process_all_roots(&cl, &cl, 579 &adjust_cld_closure, 580 &adjust_code_closure, worker_id); 581 } 582 }; 583 584 void ShenandoahMarkCompact::phase3_update_references() { 585 GCTraceTime(Info, gc, phases) time("Phase 2: Adjust pointers", _gc_timer); 586 ShenandoahHeap* heap = ShenandoahHeap::heap(); 587 588 // Need cleared claim bits for the roots processing 589 ClassLoaderDataGraph::clear_claimed_marks(); 590 591 WorkGang* workers = heap->workers(); 592 uint nworkers = workers->active_workers(); 593 { 594 COMPILER2_PRESENT(DerivedPointerTable::clear()); 595 596 ShenandoahRootProcessor rp(heap, nworkers); 597 ShenandoahAdjustRootPointersTask task(&rp); 598 workers->run_task(&task); 599 COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); 600 } 601 602 ShenandoahHeapRegionSet* regions = heap->regions(); 603 regions->clear_current_index(); 604 ShenandoahAdjustPointersTask adjust_pointers_task(regions); 605 workers->run_task(&adjust_pointers_task); 606 } 607 608 class ShenandoahCompactObjectsClosure : public ObjectClosure { 609 private: 610 ShenandoahHeap* _heap; 611 public: 612 ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) { 613 } 614 void do_object(oop p) { 615 assert(_heap->is_marked_complete(p), "must be marked"); 616 size_t size = p->size(); 617 HeapWord* compact_to = BrooksPointer::get_raw(p); 618 HeapWord* compact_from = (HeapWord*) p; 619 if (compact_from != compact_to) { 620 Copy::aligned_conjoint_words(compact_from, compact_to, size); 621 } 622 oop new_obj = oop(compact_to); 623 // new_obj->init_mark(); 624 BrooksPointer::initialize(new_obj); 625 } 626 }; 627 628 class ShenandoahCompactObjectsTask : public AbstractGangTask { 629 ShenandoahHeapRegionSet** _regions; 630 public: 631 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** regions) : 632 AbstractGangTask("Shenandoah Compact Objects Task"), 633 _regions(regions) { 634 } 635 void work(uint worker_id) { 636 ShenandoahHeap* heap = ShenandoahHeap::heap(); 637 ShenandoahHeapRegionSet* copy_queue = _regions[worker_id]; 638 copy_queue->clear_current_index(); 639 ShenandoahCompactObjectsClosure cl; 640 ShenandoahHeapRegion* r = copy_queue->current(); 641 copy_queue->next(); 642 while (r != NULL) { 643 assert(! r->is_humongous(), "must not get humongous regions here"); 644 heap->marked_object_iterate(r, &cl); 645 r->set_top(r->new_top()); 646 r = copy_queue->current(); 647 copy_queue->next(); 648 } 649 } 650 }; 651 652 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 653 size_t _live; 654 ShenandoahHeap* _heap; 655 public: 656 657 ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) { 658 _heap->clear_free_regions(); 659 } 660 661 bool doHeapRegion(ShenandoahHeapRegion* r) { 662 // Need to reset the complete-top-at-mark-start pointer here because 663 // the complete marking bitmap is no longer valid. This ensures 664 // size-based iteration in marked_object_iterate(). 665 _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom()); 666 r->set_in_collection_set(false); 667 if (r->is_humongous()) { 668 _live += ShenandoahHeapRegion::RegionSizeBytes; 669 } else { 670 size_t live = r->used(); 671 if (live == 0) { 672 r->recycle(); 673 _heap->add_free_region(r); 674 } 675 r->set_live_data(live); 676 _live += live; 677 } 678 return false; 679 } 680 681 size_t get_live() { return _live; } 682 683 }; 684 685 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** copy_queues) { 686 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 687 ShenandoahHeap* heap = ShenandoahHeap::heap(); 688 ShenandoahCompactObjectsTask compact_task(copy_queues); 689 heap->workers()->run_task(&compact_task); 690 691 heap->clear_cset_fast_test(); 692 693 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 694 // and must ensure the bitmap is in sync. 695 heap->reset_complete_mark_bitmap(heap->workers()); 696 697 { 698 ShenandoahHeap::ShenandoahHeapLock lock(heap); 699 ShenandoahPostCompactClosure post_compact; 700 heap->heap_region_iterate(&post_compact); 701 702 heap->set_used(post_compact.get_live()); 703 704 } 705 706 heap->clear_cancelled_concgc(); 707 708 // Also clear the next bitmap in preparation for next marking. 709 heap->reset_next_mark_bitmap(heap->workers()); 710 711 for (uint i = 0; i < heap->max_workers(); i++) { 712 delete copy_queues[i]; 713 } 714 715 }