61 if (! oopDesc::unsafe_equals(o, read_barrier(o))) {
62 return false;
63 }
64 return true;
65 }
66 bool is_safe(narrowOop o) {
67 oop obj = oopDesc::decode_heap_oop(o);
68 return is_safe(obj);
69 }
70 #endif
71 };
72
73 class ShenandoahClearRegionStatusClosure: public ShenandoahHeapRegionClosure {
74 private:
75 ShenandoahHeap* _heap;
76
77 public:
78 ShenandoahClearRegionStatusClosure() : _heap(ShenandoahHeap::heap()) {}
79
80 bool heap_region_do(ShenandoahHeapRegion *r) {
81 _heap->set_next_top_at_mark_start(r->bottom(), r->top());
82 r->clear_live_data();
83 r->set_concurrent_iteration_safe_limit(r->top());
84 return false;
85 }
86 };
87
88 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
89 private:
90 ShenandoahHeap* _heap;
91
92 public:
93 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
94 bool heap_region_do(ShenandoahHeapRegion* r) {
95 if (r->is_trash()) {
96 r->recycle();
97 }
98 if (r->is_empty()) {
99 r->make_regular_bypass();
100 }
101 assert (r->is_active(), "only active regions in heap now");
130 }
131
132 {
133 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
134 // Full GC is supposed to recover from any GC state:
135
136 // a. Cancel concurrent mark, if in progress
137 if (heap->concurrent_mark_in_progress()) {
138 heap->concurrentMark()->cancel();
139 heap->stop_concurrent_marking();
140 }
141 assert(!heap->concurrent_mark_in_progress(), "sanity");
142
143 // b. Cancel evacuation, if in progress
144 if (heap->is_evacuation_in_progress()) {
145 heap->set_evacuation_in_progress_at_safepoint(false);
146 }
147 assert(!heap->is_evacuation_in_progress(), "sanity");
148
149 // c. Reset the bitmaps for new marking
150 heap->reset_next_mark_bitmap(heap->workers());
151 assert(heap->is_next_bitmap_clear(), "sanity");
152
153 // d. Abandon reference discovery and clear all discovered references.
154 ReferenceProcessor* rp = heap->ref_processor();
155 rp->disable_discovery();
156 rp->abandon_partial_discovery();
157 rp->verify_no_references_recorded();
158
159 // e. Verify heap before changing the regions
160 if (ShenandoahVerify) {
161 // Full GC should only be called between regular concurrent cycles, therefore
162 // those verifications should be valid.
163 heap->verifier()->verify_before_fullgc();
164 }
165
166 {
167 ShenandoahHeapLocker lock(heap->lock());
168
169 // f. Make sure all regions are active. This is needed because we are potentially
170 // sliding the data through them
171 ShenandoahEnsureHeapActiveClosure ecl;
278 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
279 ShenandoahHeap* _heap = ShenandoahHeap::heap();
280
281 ShenandoahConcurrentMark* cm = _heap->concurrentMark();
282
283 // Do not trust heuristics, because this can be our last resort collection.
284 // Only ignore processing references and class unloading if explicitly disabled.
285 cm->set_process_references(ShenandoahRefProcFrequency != 0);
286 cm->set_unload_classes(ShenandoahUnloadClassesFrequency != 0);
287
288 ReferenceProcessor* rp = _heap->ref_processor();
289 // enable ("weak") refs discovery
290 rp->enable_discovery(true /*verify_no_refs*/);
291 rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle
292 rp->set_active_mt_degree(_heap->workers()->active_workers());
293
294 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
295 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
296 cm->shared_finish_mark_from_roots(/* full_gc = */ true);
297
298 _heap->swap_mark_bitmaps();
299
300 if (UseShenandoahMatrix && PrintShenandoahMatrix) {
301 LogTarget(Info, gc) lt;
302 LogStream ls(lt);
303 _heap->connection_matrix()->print_on(&ls);
304 }
305
306 if (VerifyDuringGC) {
307 HandleMark hm; // handle scope
308 _heap->prepare_for_verify();
309 // Note: we can verify only the heap here. When an object is
310 // marked, the previous value of the mark word (including
311 // identity hash values, ages, etc) is preserved, and the mark
312 // word is set to markOop::marked_value - effectively removing
313 // any hash values from the mark word. These hash values are
314 // used when verifying the dictionaries and so removing them
315 // from the mark word can make verification of the dictionaries
316 // fail. At the end of the GC, the original mark word values
317 // (including hash values) are restored to the appropriate
318 // objects.
319 _heap->verify(VerifyOption_G1UseMarkWord);
320 }
321 }
322
323 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure {
324 private:
325 ShenandoahHeap* _heap;
326 public:
327 ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) {
328 }
329
330 bool heap_region_do(ShenandoahHeapRegion* r) {
331 if (r->is_humongous_start()) {
332 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
333 if (! _heap->is_marked_complete(humongous_obj)) {
334 _heap->trash_humongous_region_at(r);
335 }
336 }
337 return false;
338 }
339 };
340
341
342 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
343
344 private:
345
346 ShenandoahHeap* _heap;
347 ShenandoahHeapRegionSet* _to_regions;
348 ShenandoahHeapRegion* _to_region;
349 ShenandoahHeapRegion* _from_region;
350 HeapWord* _compact_point;
351
352 public:
353
354 ShenandoahPrepareForCompactionObjectClosure(ShenandoahHeapRegionSet* to_regions, ShenandoahHeapRegion* to_region) :
355 _heap(ShenandoahHeap::heap()),
356 _to_regions(to_regions),
357 _to_region(to_region),
358 _from_region(NULL),
359 _compact_point(to_region->bottom()) {
360 }
361
362 void set_from_region(ShenandoahHeapRegion* from_region) {
363 _from_region = from_region;
364 }
365
366 ShenandoahHeapRegion* to_region() const {
367 return _to_region;
368 }
369 HeapWord* compact_point() const {
370 return _compact_point;
371 }
372 void do_object(oop p) {
373 assert(_from_region != NULL, "must set before work");
374 assert(_heap->is_marked_complete(p), "must be marked");
375 assert(! _heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
376 size_t obj_size = p->size() + BrooksPointer::word_size();
377 if (_compact_point + obj_size > _to_region->end()) {
378 // Object doesn't fit. Pick next to-region and start compacting there.
379 _to_region->set_new_top(_compact_point);
380 ShenandoahHeapRegion* new_to_region = _to_regions->current();
381 _to_regions->next();
382 if (new_to_region == NULL) {
383 new_to_region = _from_region;
384 }
385 assert(new_to_region != _to_region, "must not reuse same to-region");
386 assert(new_to_region != NULL, "must not be NULL");
387 _to_region = new_to_region;
388 _compact_point = _to_region->bottom();
389 }
390 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
391 assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
392 "expect forwarded oop");
393 BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size());
394 _compact_point += obj_size;
395 }
480 heap->workers()->run_task(&prepare_task);
481 }
482
483 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
484 private:
485 ShenandoahHeap* _heap;
486 size_t _new_obj_offset;
487 public:
488
489 ShenandoahAdjustPointersClosure() :
490 _heap(ShenandoahHeap::heap()),
491 _new_obj_offset(SIZE_MAX) {
492 }
493
494 private:
495 template <class T>
496 inline void do_oop_work(T* p) {
497 T o = oopDesc::load_heap_oop(p);
498 if (! oopDesc::is_null(o)) {
499 oop obj = oopDesc::decode_heap_oop_not_null(o);
500 assert(_heap->is_marked_complete(obj), "must be marked");
501 oop forw = oop(BrooksPointer::get_raw(obj));
502 oopDesc::encode_store_heap_oop(p, forw);
503 if (UseShenandoahMatrix) {
504 if (_heap->is_in_reserved(p)) {
505 assert(_heap->is_in_reserved(forw), "must be in heap");
506 assert (_new_obj_offset != SIZE_MAX, "should be set");
507 // We're moving a to a', which points to b, about to be moved to b'.
508 // We already know b' from the fwd pointer of b.
509 // In the object closure, we see a, and we know a' (by looking at its
510 // fwd ptr). We store the offset in the OopClosure, which is going
511 // to visit all of a's fields, and then, when we see each field, we
512 // subtract the offset from each field address to get the final ptr.
513 _heap->connection_matrix()->set_connected(((HeapWord*) p) - _new_obj_offset, forw);
514 }
515 }
516 }
517 }
518 public:
519 void do_oop(oop* p) {
520 do_oop_work(p);
521 }
522 void do_oop(narrowOop* p) {
523 do_oop_work(p);
524 }
525 void set_new_obj_offset(size_t new_obj_offset) {
526 _new_obj_offset = new_obj_offset;
527 }
528 };
529
530 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
531 private:
532 ShenandoahAdjustPointersClosure _cl;
533 ShenandoahHeap* _heap;
534 public:
535 ShenandoahAdjustPointersObjectClosure() :
536 _heap(ShenandoahHeap::heap()) {
537 }
538 void do_object(oop p) {
539 assert(_heap->is_marked_complete(p), "must be marked");
540 HeapWord* forw = BrooksPointer::get_raw(p);
541 _cl.set_new_obj_offset(pointer_delta((HeapWord*) p, forw));
542 p->oop_iterate(&_cl);
543 }
544 };
545
546 class ShenandoahAdjustPointersTask : public AbstractGangTask {
547 private:
548 ShenandoahHeapRegionSet* _regions;
549 public:
550
551 ShenandoahAdjustPointersTask(ShenandoahHeapRegionSet* regions) :
552 AbstractGangTask("Shenandoah Adjust Pointers Task"),
553 _regions(regions) {
554 }
555
556 void work(uint worker_id) {
557 ShenandoahHeap* heap = ShenandoahHeap::heap();
558 ShenandoahHeapRegion* r = _regions->claim_next();
559 ShenandoahAdjustPointersObjectClosure obj_cl;
610 DerivedPointerTable::update_pointers();
611 #endif
612 }
613
614 ShenandoahHeapRegionSet* regions = heap->regions();
615 regions->clear_current_index();
616 ShenandoahAdjustPointersTask adjust_pointers_task(regions);
617 workers->run_task(&adjust_pointers_task);
618 }
619
620 class ShenandoahCompactObjectsClosure : public ObjectClosure {
621 private:
622 ShenandoahHeap* _heap;
623 bool _str_dedup;
624 uint _worker_id;
625 public:
626 ShenandoahCompactObjectsClosure(uint worker_id) : _heap(ShenandoahHeap::heap()),
627 _str_dedup(ShenandoahStringDedup::is_enabled()), _worker_id(worker_id) {
628 }
629 void do_object(oop p) {
630 assert(_heap->is_marked_complete(p), "must be marked");
631 size_t size = (size_t)p->size();
632 HeapWord* compact_to = BrooksPointer::get_raw(p);
633 HeapWord* compact_from = (HeapWord*) p;
634 if (compact_from != compact_to) {
635 Copy::aligned_conjoint_words(compact_from, compact_to, size);
636 }
637 oop new_obj = oop(compact_to);
638 // new_obj->init_mark();
639 BrooksPointer::initialize(new_obj);
640
641 // String Dedup support
642 if(_str_dedup && java_lang_String::is_instance_inlined(new_obj)) {
643 new_obj->incr_age();
644 if (ShenandoahStringDedup::is_candidate(new_obj)) {
645 ShenandoahStringDedup::enqueue_from_safepoint(new_obj, _worker_id);
646 }
647 }
648 }
649 };
650
665 while (r != NULL) {
666 assert(! r->is_humongous(), "must not get humongous regions here");
667 heap->marked_object_iterate(r, &cl);
668 r->set_top(r->new_top());
669 r = copy_queue->current();
670 copy_queue->next();
671 }
672 }
673 };
674
675 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
676 size_t _live;
677 ShenandoahHeap* _heap;
678 public:
679
680 ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) {
681 _heap->clear_free_regions();
682 }
683
684 bool heap_region_do(ShenandoahHeapRegion* r) {
685 // Need to reset the complete-top-at-mark-start pointer here because
686 // the complete marking bitmap is no longer valid. This ensures
687 // size-based iteration in marked_object_iterate().
688 _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom());
689
690 size_t live = r->used();
691
692 // Turn any lingering non-empty cset regions into regular regions.
693 // This must be the leftover from the cancelled concurrent GC.
694 if (r->is_cset() && live != 0) {
695 r->make_regular_bypass();
696 }
697
698 // Reclaim regular/cset regions that became empty
699 if ((r->is_regular() || r->is_cset()) && live == 0) {
700 r->make_trash();
701 }
702
703 // Recycle all trash regions
704 if (r->is_trash()) {
705 live = 0;
706 r->recycle();
707 }
708
713 }
714 _heap->add_free_region(r);
715 }
716
717 r->set_live_data(live);
718 r->reset_alloc_stats_to_shared();
719 _live += live;
720 return false;
721 }
722
723 size_t get_live() { return _live; }
724
725 };
726
727 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** copy_queues) {
728 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
729 ShenandoahHeap* heap = ShenandoahHeap::heap();
730 ShenandoahCompactObjectsTask compact_task(copy_queues);
731 heap->workers()->run_task(&compact_task);
732
733 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
734 // and must ensure the bitmap is in sync.
735 heap->reset_complete_mark_bitmap(heap->workers());
736
737 {
738 ShenandoahHeapLocker lock(heap->lock());
739 ShenandoahPostCompactClosure post_compact;
740 heap->heap_region_iterate(&post_compact);
741
742 heap->set_used(post_compact.get_live());
743 }
744
745 heap->collection_set()->clear();
746 heap->clear_cancelled_concgc();
747
748 // Also clear the next bitmap in preparation for next marking.
749 heap->reset_next_mark_bitmap(heap->workers());
750
751 for (uint i = 0; i < heap->max_workers(); i++) {
752 delete copy_queues[i];
753 }
754
755 }
|
61 if (! oopDesc::unsafe_equals(o, read_barrier(o))) {
62 return false;
63 }
64 return true;
65 }
66 bool is_safe(narrowOop o) {
67 oop obj = oopDesc::decode_heap_oop(o);
68 return is_safe(obj);
69 }
70 #endif
71 };
72
73 class ShenandoahClearRegionStatusClosure: public ShenandoahHeapRegionClosure {
74 private:
75 ShenandoahHeap* _heap;
76
77 public:
78 ShenandoahClearRegionStatusClosure() : _heap(ShenandoahHeap::heap()) {}
79
80 bool heap_region_do(ShenandoahHeapRegion *r) {
81 _heap->set_top_at_mark_start(r->bottom(), r->top());
82 r->clear_live_data();
83 r->set_concurrent_iteration_safe_limit(r->top());
84 return false;
85 }
86 };
87
88 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
89 private:
90 ShenandoahHeap* _heap;
91
92 public:
93 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
94 bool heap_region_do(ShenandoahHeapRegion* r) {
95 if (r->is_trash()) {
96 r->recycle();
97 }
98 if (r->is_empty()) {
99 r->make_regular_bypass();
100 }
101 assert (r->is_active(), "only active regions in heap now");
130 }
131
132 {
133 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare);
134 // Full GC is supposed to recover from any GC state:
135
136 // a. Cancel concurrent mark, if in progress
137 if (heap->concurrent_mark_in_progress()) {
138 heap->concurrentMark()->cancel();
139 heap->stop_concurrent_marking();
140 }
141 assert(!heap->concurrent_mark_in_progress(), "sanity");
142
143 // b. Cancel evacuation, if in progress
144 if (heap->is_evacuation_in_progress()) {
145 heap->set_evacuation_in_progress_at_safepoint(false);
146 }
147 assert(!heap->is_evacuation_in_progress(), "sanity");
148
149 // c. Reset the bitmaps for new marking
150 heap->reset_mark_bitmap(heap->workers());
151 assert(heap->is_bitmap_clear(), "sanity");
152
153 // d. Abandon reference discovery and clear all discovered references.
154 ReferenceProcessor* rp = heap->ref_processor();
155 rp->disable_discovery();
156 rp->abandon_partial_discovery();
157 rp->verify_no_references_recorded();
158
159 // e. Verify heap before changing the regions
160 if (ShenandoahVerify) {
161 // Full GC should only be called between regular concurrent cycles, therefore
162 // those verifications should be valid.
163 heap->verifier()->verify_before_fullgc();
164 }
165
166 {
167 ShenandoahHeapLocker lock(heap->lock());
168
169 // f. Make sure all regions are active. This is needed because we are potentially
170 // sliding the data through them
171 ShenandoahEnsureHeapActiveClosure ecl;
278 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer);
279 ShenandoahHeap* _heap = ShenandoahHeap::heap();
280
281 ShenandoahConcurrentMark* cm = _heap->concurrentMark();
282
283 // Do not trust heuristics, because this can be our last resort collection.
284 // Only ignore processing references and class unloading if explicitly disabled.
285 cm->set_process_references(ShenandoahRefProcFrequency != 0);
286 cm->set_unload_classes(ShenandoahUnloadClassesFrequency != 0);
287
288 ReferenceProcessor* rp = _heap->ref_processor();
289 // enable ("weak") refs discovery
290 rp->enable_discovery(true /*verify_no_refs*/);
291 rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle
292 rp->set_active_mt_degree(_heap->workers()->active_workers());
293
294 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
295 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
296 cm->shared_finish_mark_from_roots(/* full_gc = */ true);
297
298 if (UseShenandoahMatrix && PrintShenandoahMatrix) {
299 LogTarget(Info, gc) lt;
300 LogStream ls(lt);
301 _heap->connection_matrix()->print_on(&ls);
302 }
303
304 if (VerifyDuringGC) {
305 HandleMark hm; // handle scope
306 _heap->prepare_for_verify();
307 // Note: we can verify only the heap here. When an object is
308 // marked, the previous value of the mark word (including
309 // identity hash values, ages, etc) is preserved, and the mark
310 // word is set to markOop::marked_value - effectively removing
311 // any hash values from the mark word. These hash values are
312 // used when verifying the dictionaries and so removing them
313 // from the mark word can make verification of the dictionaries
314 // fail. At the end of the GC, the original mark word values
315 // (including hash values) are restored to the appropriate
316 // objects.
317 _heap->verify(VerifyOption_G1UseMarkWord);
318 }
319 _heap->set_bitmap_valid(true);
320 }
321
322 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure {
323 private:
324 ShenandoahHeap* _heap;
325 public:
326 ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) {
327 }
328
329 bool heap_region_do(ShenandoahHeapRegion* r) {
330 if (r->is_humongous_start()) {
331 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
332 if (! _heap->is_marked(humongous_obj)) {
333 _heap->trash_humongous_region_at(r);
334 }
335 }
336 return false;
337 }
338 };
339
340
341 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
342
343 private:
344
345 ShenandoahHeap* _heap;
346 ShenandoahHeapRegionSet* _to_regions;
347 ShenandoahHeapRegion* _to_region;
348 ShenandoahHeapRegion* _from_region;
349 HeapWord* _compact_point;
350
351 public:
352
353 ShenandoahPrepareForCompactionObjectClosure(ShenandoahHeapRegionSet* to_regions, ShenandoahHeapRegion* to_region) :
354 _heap(ShenandoahHeap::heap()),
355 _to_regions(to_regions),
356 _to_region(to_region),
357 _from_region(NULL),
358 _compact_point(to_region->bottom()) {
359 }
360
361 void set_from_region(ShenandoahHeapRegion* from_region) {
362 _from_region = from_region;
363 }
364
365 ShenandoahHeapRegion* to_region() const {
366 return _to_region;
367 }
368 HeapWord* compact_point() const {
369 return _compact_point;
370 }
371 void do_object(oop p) {
372 assert(_from_region != NULL, "must set before work");
373 assert(_heap->is_marked(p), "must be marked");
374 assert(! _heap->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
375 size_t obj_size = p->size() + BrooksPointer::word_size();
376 if (_compact_point + obj_size > _to_region->end()) {
377 // Object doesn't fit. Pick next to-region and start compacting there.
378 _to_region->set_new_top(_compact_point);
379 ShenandoahHeapRegion* new_to_region = _to_regions->current();
380 _to_regions->next();
381 if (new_to_region == NULL) {
382 new_to_region = _from_region;
383 }
384 assert(new_to_region != _to_region, "must not reuse same to-region");
385 assert(new_to_region != NULL, "must not be NULL");
386 _to_region = new_to_region;
387 _compact_point = _to_region->bottom();
388 }
389 assert(_compact_point + obj_size <= _to_region->end(), "must fit");
390 assert(oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_oop_static_not_null(p)),
391 "expect forwarded oop");
392 BrooksPointer::set_raw(p, _compact_point + BrooksPointer::word_size());
393 _compact_point += obj_size;
394 }
479 heap->workers()->run_task(&prepare_task);
480 }
481
482 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
483 private:
484 ShenandoahHeap* _heap;
485 size_t _new_obj_offset;
486 public:
487
488 ShenandoahAdjustPointersClosure() :
489 _heap(ShenandoahHeap::heap()),
490 _new_obj_offset(SIZE_MAX) {
491 }
492
493 private:
494 template <class T>
495 inline void do_oop_work(T* p) {
496 T o = oopDesc::load_heap_oop(p);
497 if (! oopDesc::is_null(o)) {
498 oop obj = oopDesc::decode_heap_oop_not_null(o);
499 assert(_heap->is_marked(obj), "must be marked");
500 oop forw = oop(BrooksPointer::get_raw(obj));
501 oopDesc::encode_store_heap_oop(p, forw);
502 if (UseShenandoahMatrix) {
503 if (_heap->is_in_reserved(p)) {
504 assert(_heap->is_in_reserved(forw), "must be in heap");
505 assert (_new_obj_offset != SIZE_MAX, "should be set");
506 // We're moving a to a', which points to b, about to be moved to b'.
507 // We already know b' from the fwd pointer of b.
508 // In the object closure, we see a, and we know a' (by looking at its
509 // fwd ptr). We store the offset in the OopClosure, which is going
510 // to visit all of a's fields, and then, when we see each field, we
511 // subtract the offset from each field address to get the final ptr.
512 _heap->connection_matrix()->set_connected(((HeapWord*) p) - _new_obj_offset, forw);
513 }
514 }
515 }
516 }
517 public:
518 void do_oop(oop* p) {
519 do_oop_work(p);
520 }
521 void do_oop(narrowOop* p) {
522 do_oop_work(p);
523 }
524 void set_new_obj_offset(size_t new_obj_offset) {
525 _new_obj_offset = new_obj_offset;
526 }
527 };
528
529 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
530 private:
531 ShenandoahAdjustPointersClosure _cl;
532 ShenandoahHeap* _heap;
533 public:
534 ShenandoahAdjustPointersObjectClosure() :
535 _heap(ShenandoahHeap::heap()) {
536 }
537 void do_object(oop p) {
538 assert(_heap->is_marked(p), "must be marked");
539 HeapWord* forw = BrooksPointer::get_raw(p);
540 _cl.set_new_obj_offset(pointer_delta((HeapWord*) p, forw));
541 p->oop_iterate(&_cl);
542 }
543 };
544
545 class ShenandoahAdjustPointersTask : public AbstractGangTask {
546 private:
547 ShenandoahHeapRegionSet* _regions;
548 public:
549
550 ShenandoahAdjustPointersTask(ShenandoahHeapRegionSet* regions) :
551 AbstractGangTask("Shenandoah Adjust Pointers Task"),
552 _regions(regions) {
553 }
554
555 void work(uint worker_id) {
556 ShenandoahHeap* heap = ShenandoahHeap::heap();
557 ShenandoahHeapRegion* r = _regions->claim_next();
558 ShenandoahAdjustPointersObjectClosure obj_cl;
609 DerivedPointerTable::update_pointers();
610 #endif
611 }
612
613 ShenandoahHeapRegionSet* regions = heap->regions();
614 regions->clear_current_index();
615 ShenandoahAdjustPointersTask adjust_pointers_task(regions);
616 workers->run_task(&adjust_pointers_task);
617 }
618
619 class ShenandoahCompactObjectsClosure : public ObjectClosure {
620 private:
621 ShenandoahHeap* _heap;
622 bool _str_dedup;
623 uint _worker_id;
624 public:
625 ShenandoahCompactObjectsClosure(uint worker_id) : _heap(ShenandoahHeap::heap()),
626 _str_dedup(ShenandoahStringDedup::is_enabled()), _worker_id(worker_id) {
627 }
628 void do_object(oop p) {
629 assert(_heap->is_marked(p), "must be marked");
630 size_t size = (size_t)p->size();
631 HeapWord* compact_to = BrooksPointer::get_raw(p);
632 HeapWord* compact_from = (HeapWord*) p;
633 if (compact_from != compact_to) {
634 Copy::aligned_conjoint_words(compact_from, compact_to, size);
635 }
636 oop new_obj = oop(compact_to);
637 // new_obj->init_mark();
638 BrooksPointer::initialize(new_obj);
639
640 // String Dedup support
641 if(_str_dedup && java_lang_String::is_instance_inlined(new_obj)) {
642 new_obj->incr_age();
643 if (ShenandoahStringDedup::is_candidate(new_obj)) {
644 ShenandoahStringDedup::enqueue_from_safepoint(new_obj, _worker_id);
645 }
646 }
647 }
648 };
649
664 while (r != NULL) {
665 assert(! r->is_humongous(), "must not get humongous regions here");
666 heap->marked_object_iterate(r, &cl);
667 r->set_top(r->new_top());
668 r = copy_queue->current();
669 copy_queue->next();
670 }
671 }
672 };
673
674 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
675 size_t _live;
676 ShenandoahHeap* _heap;
677 public:
678
679 ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) {
680 _heap->clear_free_regions();
681 }
682
683 bool heap_region_do(ShenandoahHeapRegion* r) {
684 // Need to reset the top-at-mark-start pointer here because
685 // the marking bitmap is no longer valid. This ensures
686 // size-based iteration in marked_object_iterate().
687 _heap->set_top_at_mark_start(r->bottom(), r->bottom());
688
689 size_t live = r->used();
690
691 // Turn any lingering non-empty cset regions into regular regions.
692 // This must be the leftover from the cancelled concurrent GC.
693 if (r->is_cset() && live != 0) {
694 r->make_regular_bypass();
695 }
696
697 // Reclaim regular/cset regions that became empty
698 if ((r->is_regular() || r->is_cset()) && live == 0) {
699 r->make_trash();
700 }
701
702 // Recycle all trash regions
703 if (r->is_trash()) {
704 live = 0;
705 r->recycle();
706 }
707
712 }
713 _heap->add_free_region(r);
714 }
715
716 r->set_live_data(live);
717 r->reset_alloc_stats_to_shared();
718 _live += live;
719 return false;
720 }
721
722 size_t get_live() { return _live; }
723
724 };
725
726 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** copy_queues) {
727 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer);
728 ShenandoahHeap* heap = ShenandoahHeap::heap();
729 ShenandoahCompactObjectsTask compact_task(copy_queues);
730 heap->workers()->run_task(&compact_task);
731
732 // Reset marking bitmap. We're about to reset the top-at-mark-start pointer
733 // and must ensure the bitmap is in sync.
734 heap->reset_mark_bitmap(heap->workers());
735
736 {
737 ShenandoahHeapLocker lock(heap->lock());
738 ShenandoahPostCompactClosure post_compact;
739 heap->heap_region_iterate(&post_compact);
740
741 heap->set_used(post_compact.get_live());
742 }
743
744 heap->collection_set()->clear();
745 heap->clear_cancelled_concgc();
746
747 for (uint i = 0; i < heap->max_workers(); i++) {
748 delete copy_queues[i];
749 }
750
751 }
|