19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "code/codeCache.hpp"
26 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
27 #include "gc_implementation/shared/gcTimer.hpp"
28 #include "gc_implementation/shenandoah/brooksPointer.hpp"
29 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
31 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
32 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
33 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
34 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
35 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
36 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
37 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
38 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
39 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
40 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
41 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
42 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
43 #include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "runtime/biasedLocking.hpp"
46 #include "runtime/thread.hpp"
47 #include "utilities/copy.hpp"
48 #include "utilities/growableArray.hpp"
49 #include "utilities/taskqueue.hpp"
50 #include "utilities/workgroup.hpp"
51
52 class ShenandoahClearRegionStatusClosure: public ShenandoahHeapRegionClosure {
53 private:
54 ShenandoahHeap* const _heap;
55
56 public:
57 ShenandoahClearRegionStatusClosure() : _heap(ShenandoahHeap::heap()) {}
58
59 bool heap_region_do(ShenandoahHeapRegion *r) {
60 _heap->set_next_top_at_mark_start(r->bottom(), r->top());
61 r->clear_live_data();
62 r->set_concurrent_iteration_safe_limit(r->top());
63 return false;
64 }
65 };
66
67 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
68 private:
69 ShenandoahHeap* const _heap;
70
71 public:
72 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
73 bool heap_region_do(ShenandoahHeapRegion* r) {
74 if (r->is_trash()) {
75 r->recycle();
76 }
77 if (r->is_cset()) {
78 r->make_regular_bypass();
79 }
80 if (r->is_empty_uncommitted()) {
81 r->make_committed_bypass();
82 }
83 assert (r->is_committed(), err_msg("only committed regions in heap now, see region " SIZE_FORMAT, r->region_number()));
84
85 // Record current region occupancy: this communicates empty regions are free
86 // to the rest of Full GC code.
87 r->set_new_top(r->top());
88 return false;
89 }
90 };
91
92 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
93 _gc_timer = gc_timer;
94 }
95
96 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
97 ShenandoahHeap* heap = ShenandoahHeap::heap();
98
99 {
100 if (ShenandoahVerify) {
101 heap->verifier()->verify_before_fullgc();
102 }
103
104 heap->set_full_gc_in_progress(true);
105
106 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
107 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
108
109 {
110 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
111 heap->pre_full_gc_dump(_gc_timer);
112 }
113
114 {
115 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_prepare);
116 // Full GC is supposed to recover from any GC state:
117
118 // a. Cancel concurrent mark, if in progress
119 if (heap->is_concurrent_mark_in_progress()) {
120 heap->concurrentMark()->cancel();
121 heap->stop_concurrent_marking();
122 }
123 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
124
125 // b1. Cancel evacuation, if in progress
126 if (heap->is_evacuation_in_progress()) {
127 heap->set_evacuation_in_progress(false);
128 }
129 assert(!heap->is_evacuation_in_progress(), "sanity");
130
131 // b2. Cancel update-refs, if in progress
132 if (heap->is_update_refs_in_progress()) {
133 heap->set_update_refs_in_progress(false);
134 }
135 assert(!heap->is_update_refs_in_progress(), "sanity");
136
137 // c. Reset the bitmaps for new marking
138 heap->reset_next_mark_bitmap();
139 assert(heap->is_next_bitmap_clear(), "sanity");
140
141 // d. Abandon reference discovery and clear all discovered references.
142 ReferenceProcessor *rp = heap->ref_processor();
143 rp->disable_discovery();
144 rp->abandon_partial_discovery();
145 rp->verify_no_references_recorded();
146
147 {
148 ShenandoahHeapLocker lock(heap->lock());
149
150 // f. Make sure all regions are active. This is needed because we are potentially
151 // sliding the data through them
152 ShenandoahEnsureHeapActiveClosure ecl;
153 heap->heap_region_iterate(&ecl, false, false);
154
155 // g. Clear region statuses, including collection set status
156 ShenandoahClearRegionStatusClosure cl;
157 heap->heap_region_iterate(&cl, false, false);
158 }
159 }
160
161 {
162 if (UseTLAB) {
163 heap->make_tlabs_parsable(true);
164 }
165
166 CodeCache::gc_prologue();
167
168 // TODO: We don't necessarily need to update refs. We might want to clean
169 // up managing has_forwarded_objects when diving into degen/full-gc.
170 heap->set_has_forwarded_objects(true);
171
172 OrderAccess::fence();
173
174 phase1_mark_heap();
175
176 // Prevent read-barrier from kicking in while adjusting pointers in phase3.
177 heap->set_has_forwarded_objects(false);
178
179 heap->set_full_gc_move_in_progress(true);
180
181 // Setup workers for the rest
182 {
183 OrderAccess::fence();
184
185 // Initialize worker slices
186 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
187 for (uint i = 0; i < heap->max_workers(); i++) {
188 worker_slices[i] = new ShenandoahHeapRegionSet();
189 }
190
191 phase2_calculate_target_addresses(worker_slices);
192
193 OrderAccess::fence();
194
195 phase3_update_references();
196
209 heap->set_full_gc_move_in_progress(false);
210 heap->set_full_gc_in_progress(false);
211
212 if (ShenandoahVerify) {
213 heap->verifier()->verify_after_fullgc();
214 }
215 }
216
217 {
218 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
219 heap->post_full_gc_dump(_gc_timer);
220 }
221
222 if (UseTLAB) {
223 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
224 heap->resize_all_tlabs();
225 }
226 }
227 }
228
229 void ShenandoahMarkCompact::phase1_mark_heap() {
230 ShenandoahHeap* heap = ShenandoahHeap::heap();
231 GCTraceTime time("Phase 1: Mark live objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
232 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
233
234 ShenandoahConcurrentMark* cm = heap->concurrentMark();
235
236 // Do not trust heuristics, because this can be our last resort collection.
237 // Only ignore processing references and class unloading if explicitly disabled.
238 heap->set_process_references(ShenandoahRefProcFrequency != 0);
239 heap->set_unload_classes(ShenandoahUnloadClassesFrequency != 0);
240
241 ReferenceProcessor* rp = heap->ref_processor();
242 // enable ("weak") refs discovery
243 rp->enable_discovery(true /*verify_no_refs*/, true);
244 rp->setup_policy(true); // snapshot the soft ref policy to be used in this cycle
245 rp->set_active_mt_degree(heap->workers()->active_workers());
246
247 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
248 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
249 cm->shared_finish_mark_from_roots(/* full_gc = */ true);
250
251 heap->swap_mark_bitmaps();
252 }
253
254 class ShenandoahMCReclaimHumongousRegionClosure : public ShenandoahHeapRegionClosure {
255 private:
256 ShenandoahHeap* const _heap;
257 public:
258 ShenandoahMCReclaimHumongousRegionClosure() : _heap(ShenandoahHeap::heap()) {}
259
260 bool heap_region_do(ShenandoahHeapRegion* r) {
261 if (r->is_humongous_start()) {
262 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
263 if (!_heap->is_marked_complete(humongous_obj)) {
264 _heap->trash_humongous_region_at(r);
265 }
266 }
267 return false;
268 }
269 };
270
271 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
272 private:
273 ShenandoahHeap* const _heap;
274 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
275 int _empty_regions_pos;
276 ShenandoahHeapRegion* _to_region;
277 ShenandoahHeapRegion* _from_region;
278 HeapWord* _compact_point;
279
280 public:
281 ShenandoahPrepareForCompactionObjectClosure(GrowableArray<ShenandoahHeapRegion*>& empty_regions, ShenandoahHeapRegion* to_region) :
282 _heap(ShenandoahHeap::heap()),
283 _empty_regions(empty_regions),
284 _empty_regions_pos(0),
285 _to_region(to_region),
286 _from_region(NULL),
287 _compact_point(to_region->bottom()) {}
288
289 void set_from_region(ShenandoahHeapRegion* from_region) {
290 _from_region = from_region;
291 }
292
293 void finish_region() {
294 assert(_to_region != NULL, "should not happen");
295 _to_region->set_new_top(_compact_point);
296 }
297
298 bool is_compact_same_region() {
299 return _from_region == _to_region;
300 }
301
302 int empty_regions_pos() {
303 return _empty_regions_pos;
304 }
305
306 void do_object(oop p) {
307 assert(_from_region != NULL, "must set before work");
308 assert(_heap->is_marked_complete(p), "must be marked");
309 assert(!_heap->allocated_after_complete_mark_start((HeapWord*) p), "must be truly marked");
310
311 size_t obj_size = p->size() + BrooksPointer::word_size();
312 if (_compact_point + obj_size > _to_region->end()) {
313 finish_region();
314
315 // Object doesn't fit. Pick next empty region and start compacting there.
316 ShenandoahHeapRegion* new_to_region;
317 if (_empty_regions_pos < _empty_regions.length()) {
318 new_to_region = _empty_regions.at(_empty_regions_pos);
319 _empty_regions_pos++;
320 } else {
321 // Out of empty region? Compact within the same region.
322 new_to_region = _from_region;
323 }
324
325 assert(new_to_region != _to_region, "must not reuse same to-region");
326 assert(new_to_region != NULL, "must not be NULL");
327 _to_region = new_to_region;
328 _compact_point = _to_region->bottom();
329 }
364 AbstractGangTask("Shenandoah Prepare For Compaction Task"),
365 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
366 }
367
368 void work(uint worker_id) {
369 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
370 ShenandoahHeapRegion* from_region = next_from_region(slice);
371
372 // No work?
373 if (from_region == NULL) {
374 return;
375 }
376
377 // Sliding compaction. Walk all regions in the slice, and compact them.
378 // Remember empty regions and reuse them as needed.
379 ResourceMark rm;
380 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
381 ShenandoahPrepareForCompactionObjectClosure cl(empty_regions, from_region);
382 while (from_region != NULL) {
383 cl.set_from_region(from_region);
384 _heap->marked_object_iterate(from_region, &cl);
385
386 // Compacted the region to somewhere else? From-region is empty then.
387 if (!cl.is_compact_same_region()) {
388 empty_regions.append(from_region);
389 }
390 from_region = next_from_region(slice);
391 }
392 cl.finish_region();
393
394 // Mark all remaining regions as empty
395 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
396 ShenandoahHeapRegion* r = empty_regions.at(pos);
397 r->set_new_top(r->bottom());
398 }
399 }
400 };
401
402 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
403 ShenandoahHeap* heap = ShenandoahHeap::heap();
404
429 oop old_obj = oop(r->bottom() + BrooksPointer::word_size());
430 size_t words_size = old_obj->size() + BrooksPointer::word_size();
431 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
432
433 size_t start = to_end - num_regions;
434
435 if (start >= to_begin && start != r->region_number()) {
436 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
437 BrooksPointer::set_raw(old_obj, heap->get_region(start)->bottom() + BrooksPointer::word_size());
438 to_end = start;
439 continue;
440 }
441 }
442
443 // Failed to fit. Scan starting from current region.
444 to_begin = r->region_number();
445 to_end = r->region_number();
446 }
447 }
448
449 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
450 ShenandoahHeap* heap = ShenandoahHeap::heap();
451 GCTraceTime time("Phase 2: Compute new object addresses", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
452 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
453
454 {
455 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
456
457 {
458 ShenandoahHeapLocker lock(heap->lock());
459
460 ShenandoahMCReclaimHumongousRegionClosure cl;
461 heap->heap_region_iterate(&cl);
462
463 // After some humongous regions were reclaimed, we need to ensure their
464 // backing storage is active. This is needed because we are potentially
465 // sliding the data through them.
466 ShenandoahEnsureHeapActiveClosure ecl;
467 heap->heap_region_iterate(&ecl, false, false);
468 }
469
470 // Compute the new addresses for regular objects
471 ShenandoahPrepareForCompactionTask prepare_task(worker_slices);
472 heap->workers()->run_task(&prepare_task);
473 }
474
475 // Compute the new addresses for humongous objects
476 {
477 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
478 calculate_target_humongous_objects();
479 }
480 }
481
482 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
483 private:
484 ShenandoahHeap* const _heap;
485
486 template <class T>
487 inline void do_oop_work(T* p) {
488 T o = oopDesc::load_heap_oop(p);
489 if (! oopDesc::is_null(o)) {
490 oop obj = oopDesc::decode_heap_oop_not_null(o);
491 assert(_heap->is_marked_complete(obj), "must be marked");
492 oop forw = oop(BrooksPointer::get_raw(obj));
493 oopDesc::encode_store_heap_oop(p, forw);
494 }
495 }
496
497 public:
498 ShenandoahAdjustPointersClosure() : _heap(ShenandoahHeap::heap()) {}
499
500 void do_oop(oop* p) { do_oop_work(p); }
501 void do_oop(narrowOop* p) { do_oop_work(p); }
502 };
503
504 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
505 private:
506 ShenandoahHeap* const _heap;
507 ShenandoahAdjustPointersClosure _cl;
508
509 public:
510 ShenandoahAdjustPointersObjectClosure() :
511 _heap(ShenandoahHeap::heap()) {
512 }
513 void do_object(oop p) {
514 assert(_heap->is_marked_complete(p), "must be marked");
515 p->oop_iterate(&_cl);
516 }
517 };
518
519 class ShenandoahAdjustPointersTask : public AbstractGangTask {
520 private:
521 ShenandoahHeap* const _heap;
522 ShenandoahRegionIterator _regions;
523
524 public:
525 ShenandoahAdjustPointersTask() :
526 AbstractGangTask("Shenandoah Adjust Pointers Task"),
527 _heap(ShenandoahHeap::heap()) {
528 }
529
530 void work(uint worker_id) {
531 ShenandoahAdjustPointersObjectClosure obj_cl;
532 ShenandoahHeapRegion* r = _regions.next();
533 while (r != NULL) {
534 if (!r->is_humongous_continuation()) {
535 _heap->marked_object_iterate(r, &obj_cl);
536 }
537 r = _regions.next();
538 }
539 }
540 };
541
542 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
543 private:
544 ShenandoahRootProcessor* _rp;
545
546 public:
547 ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
548 AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
549 _rp(rp) {}
550
551 void work(uint worker_id) {
552 ShenandoahAdjustPointersClosure cl;
553 CLDToOopClosure adjust_cld_closure(&cl, true);
554 MarkingCodeBlobClosure adjust_code_closure(&cl,
570 {
571 COMPILER2_PRESENT(DerivedPointerTable::clear());
572 ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots);
573 ShenandoahAdjustRootPointersTask task(&rp);
574 workers->run_task(&task);
575 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
576 }
577
578 ShenandoahAdjustPointersTask adjust_pointers_task;
579 workers->run_task(&adjust_pointers_task);
580 }
581
582 class ShenandoahCompactObjectsClosure : public ObjectClosure {
583 private:
584 ShenandoahHeap* const _heap;
585
586 public:
587 ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {}
588
589 void do_object(oop p) {
590 assert(_heap->is_marked_complete(p), "must be marked");
591 size_t size = (size_t)p->size();
592 HeapWord* compact_to = BrooksPointer::get_raw(p);
593 HeapWord* compact_from = (HeapWord*) p;
594 if (compact_from != compact_to) {
595 Copy::aligned_conjoint_words(compact_from, compact_to, size);
596 }
597 oop new_obj = oop(compact_to);
598 BrooksPointer::initialize(new_obj);
599 }
600 };
601
602 class ShenandoahCompactObjectsTask : public AbstractGangTask {
603 private:
604 ShenandoahHeap* const _heap;
605 ShenandoahHeapRegionSet** const _worker_slices;
606
607 public:
608 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
609 AbstractGangTask("Shenandoah Compact Objects Task"),
610 _heap(ShenandoahHeap::heap()),
611 _worker_slices(worker_slices) {
612 }
613
614 void work(uint worker_id) {
615 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
616
617 ShenandoahCompactObjectsClosure cl;
618 ShenandoahHeapRegion* r = slice.next();
619 while (r != NULL) {
620 assert(!r->is_humongous(), "must not get humongous regions here");
621 _heap->marked_object_iterate(r, &cl);
622 r->set_top(r->new_top());
623 r = slice.next();
624 }
625 }
626 };
627
628 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
629 private:
630 ShenandoahHeap* const _heap;
631 size_t _live;
632
633 public:
634 ShenandoahPostCompactClosure() : _live(0), _heap(ShenandoahHeap::heap()) {
635 _heap->free_set()->clear();
636 }
637
638 bool heap_region_do(ShenandoahHeapRegion* r) {
639 assert (!r->is_cset(), "cset regions should have been demoted already");
640
641 // Need to reset the complete-top-at-mark-start pointer here because
642 // the complete marking bitmap is no longer valid. This ensures
643 // size-based iteration in marked_object_iterate().
644 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
645 // pinned regions.
646 if (!r->is_pinned()) {
647 _heap->set_complete_top_at_mark_start(r->bottom(), r->bottom());
648 }
649
650 size_t live = r->used();
651
652 // Make empty regions that have been allocated into regular
653 if (r->is_empty() && live > 0) {
654 r->make_regular_bypass();
655 }
656
657 // Reclaim regular regions that became empty
658 if (r->is_regular() && live == 0) {
659 r->make_trash();
660 }
661
662 // Recycle all trash regions
663 if (r->is_trash()) {
664 live = 0;
665 r->recycle();
666 }
667
746
747 // This is slightly different to ShHeap::reset_next_mark_bitmap:
748 // we need to remain able to walk pinned regions.
749 // Since pinned region do not move and don't get compacted, we will get holes with
750 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
751 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
752 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
753 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
754 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
755 private:
756 ShenandoahRegionIterator _regions;
757
758 public:
759 ShenandoahMCResetCompleteBitmapTask() :
760 AbstractGangTask("Parallel Reset Bitmap Task") {
761 }
762
763 void work(uint worker_id) {
764 ShenandoahHeapRegion* region = _regions.next();
765 ShenandoahHeap* heap = ShenandoahHeap::heap();
766 while (region != NULL) {
767 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned()) {
768 HeapWord* bottom = region->bottom();
769 HeapWord* top = heap->complete_top_at_mark_start(region->bottom());
770 if (top > bottom) {
771 heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
772 }
773 assert(heap->is_complete_bitmap_clear_range(bottom, region->end()), "must be clear");
774 }
775 region = _regions.next();
776 }
777 }
778 };
779
780 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
781 ShenandoahHeap* heap = ShenandoahHeap::heap();
782 GCTraceTime time("Phase 4: Move objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
783 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
784
785 // Compact regular objects first
786 {
787 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
788 ShenandoahCompactObjectsTask compact_task(worker_slices);
789 heap->workers()->run_task(&compact_task);
790 }
791
792 // Compact humongous objects after regular object moves
793 {
794 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
795 compact_humongous_objects();
796 }
797
798 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
799 // and must ensure the bitmap is in sync.
800 ShenandoahMCResetCompleteBitmapTask task;
801 heap->workers()->run_task(&task);
802
803 // Bring regions in proper states after the collection, and set heap properties.
804 {
805 ShenandoahHeapLocker lock(heap->lock());
806 ShenandoahPostCompactClosure post_compact;
807 heap->heap_region_iterate(&post_compact);
808 heap->set_used(post_compact.get_live());
809
810 heap->collection_set()->clear();
811 heap->free_set()->rebuild();
812 }
813
814 heap->clear_cancelled_concgc();
815
816 // Also clear the next bitmap in preparation for next marking.
817 heap->reset_next_mark_bitmap();
818 }
|
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24 #include "precompiled.hpp"
25 #include "code/codeCache.hpp"
26 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
27 #include "gc_implementation/shared/gcTimer.hpp"
28 #include "gc_implementation/shenandoah/brooksPointer.hpp"
29 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
30 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
31 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
32 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
33 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
34 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
35 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
36 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
37 #include "gc_implementation/shenandoah/shenandoahHeap.hpp"
38 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
39 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
40 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
41 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
42 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
43 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
44 #include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "runtime/biasedLocking.hpp"
47 #include "runtime/thread.hpp"
48 #include "utilities/copy.hpp"
49 #include "utilities/growableArray.hpp"
50 #include "utilities/taskqueue.hpp"
51 #include "utilities/workgroup.hpp"
52
53 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) {
54 _gc_timer = gc_timer;
55 }
56
57 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) {
58 ShenandoahHeap* heap = ShenandoahHeap::heap();
59
60 {
61 if (ShenandoahVerify) {
62 heap->verifier()->verify_before_fullgc();
63 }
64
65 heap->set_full_gc_in_progress(true);
66
67 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
68 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped");
69
70 {
71 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
72 heap->pre_full_gc_dump(_gc_timer);
73 }
74
75 {
76 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_prepare);
77 // Full GC is supposed to recover from any GC state:
78
79 // 0. Remember if we have forwarded objects
80 bool has_forwarded_objects = heap->has_forwarded_objects();
81
82 // a. Cancel concurrent mark, if in progress
83 if (heap->is_concurrent_mark_in_progress()) {
84 heap->concurrentMark()->cancel();
85 heap->stop_concurrent_marking();
86 }
87 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
88
89 // b1. Cancel evacuation, if in progress
90 if (heap->is_evacuation_in_progress()) {
91 heap->set_evacuation_in_progress(false);
92 }
93 assert(!heap->is_evacuation_in_progress(), "sanity");
94
95 // b2. Cancel update-refs, if in progress
96 if (heap->is_update_refs_in_progress()) {
97 heap->set_update_refs_in_progress(false);
98 }
99 assert(!heap->is_update_refs_in_progress(), "sanity");
100
101 // c. Reset the bitmaps for new marking
102 heap->reset_next_mark_bitmap();
103 assert(heap->next_marking_context()->is_bitmap_clear(), "sanity");
104
105 // d. Abandon reference discovery and clear all discovered references.
106 ReferenceProcessor *rp = heap->ref_processor();
107 rp->disable_discovery();
108 rp->abandon_partial_discovery();
109 rp->verify_no_references_recorded();
110
111 // e. Set back forwarded objects bit back, in case some steps above dropped it.
112 heap->set_has_forwarded_objects(has_forwarded_objects);
113 }
114
115 {
116 heap->make_parsable(true);
117
118 CodeCache::gc_prologue();
119
120 OrderAccess::fence();
121
122 phase1_mark_heap();
123
124 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
125 // Coming out of Full GC, we would not have any forwarded objects.
126 // This also prevents read barrier from kicking in while adjusting pointers in phase3.
127 heap->set_has_forwarded_objects(false);
128
129 heap->set_full_gc_move_in_progress(true);
130
131 // Setup workers for the rest
132 {
133 OrderAccess::fence();
134
135 // Initialize worker slices
136 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
137 for (uint i = 0; i < heap->max_workers(); i++) {
138 worker_slices[i] = new ShenandoahHeapRegionSet();
139 }
140
141 phase2_calculate_target_addresses(worker_slices);
142
143 OrderAccess::fence();
144
145 phase3_update_references();
146
159 heap->set_full_gc_move_in_progress(false);
160 heap->set_full_gc_in_progress(false);
161
162 if (ShenandoahVerify) {
163 heap->verifier()->verify_after_fullgc();
164 }
165 }
166
167 {
168 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
169 heap->post_full_gc_dump(_gc_timer);
170 }
171
172 if (UseTLAB) {
173 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
174 heap->resize_all_tlabs();
175 }
176 }
177 }
178
179 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
180 private:
181 ShenandoahMarkingContext* const _ctx;
182
183 public:
184 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->next_marking_context()) {}
185
186 bool heap_region_do(ShenandoahHeapRegion *r) {
187 _ctx->set_top_at_mark_start(r->region_number(), r->top());
188 r->clear_live_data();
189 r->set_concurrent_iteration_safe_limit(r->top());
190 return false;
191 }
192 };
193
194 void ShenandoahMarkCompact::phase1_mark_heap() {
195 ShenandoahHeap* heap = ShenandoahHeap::heap();
196 GCTraceTime time("Phase 1: Mark live objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
197 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark);
198
199 {
200 ShenandoahHeapLocker lock(heap->lock());
201 ShenandoahPrepareForMarkClosure cl;
202 heap->heap_region_iterate(&cl, false, false);
203 }
204
205 ShenandoahConcurrentMark* cm = heap->concurrentMark();
206
207 // Do not trust heuristics, because this can be our last resort collection.
208 // Only ignore processing references and class unloading if explicitly disabled.
209 heap->set_process_references(ShenandoahRefProcFrequency != 0);
210 heap->set_unload_classes(ClassUnloading);
211
212 ReferenceProcessor* rp = heap->ref_processor();
213 // enable ("weak") refs discovery
214 rp->enable_discovery(true /*verify_no_refs*/, true);
215 rp->setup_policy(true); // forcefully purge all soft references
216 rp->set_active_mt_degree(heap->workers()->active_workers());
217
218 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots);
219 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots);
220 cm->shared_finish_mark_from_roots(/* full_gc = */ true);
221
222 heap->swap_mark_contexts();
223 }
224
225 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
226 private:
227 ShenandoahHeap* const _heap;
228 GrowableArray<ShenandoahHeapRegion*>& _empty_regions;
229 int _empty_regions_pos;
230 ShenandoahHeapRegion* _to_region;
231 ShenandoahHeapRegion* _from_region;
232 HeapWord* _compact_point;
233
234 public:
235 ShenandoahPrepareForCompactionObjectClosure(GrowableArray<ShenandoahHeapRegion*>& empty_regions, ShenandoahHeapRegion* to_region) :
236 _heap(ShenandoahHeap::heap()),
237 _empty_regions(empty_regions),
238 _empty_regions_pos(0),
239 _to_region(to_region),
240 _from_region(NULL),
241 _compact_point(to_region->bottom()) {}
242
243 void set_from_region(ShenandoahHeapRegion* from_region) {
244 _from_region = from_region;
245 }
246
247 void finish_region() {
248 assert(_to_region != NULL, "should not happen");
249 _to_region->set_new_top(_compact_point);
250 }
251
252 bool is_compact_same_region() {
253 return _from_region == _to_region;
254 }
255
256 int empty_regions_pos() {
257 return _empty_regions_pos;
258 }
259
260 void do_object(oop p) {
261 assert(_from_region != NULL, "must set before work");
262 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
263 assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked");
264
265 size_t obj_size = p->size() + BrooksPointer::word_size();
266 if (_compact_point + obj_size > _to_region->end()) {
267 finish_region();
268
269 // Object doesn't fit. Pick next empty region and start compacting there.
270 ShenandoahHeapRegion* new_to_region;
271 if (_empty_regions_pos < _empty_regions.length()) {
272 new_to_region = _empty_regions.at(_empty_regions_pos);
273 _empty_regions_pos++;
274 } else {
275 // Out of empty region? Compact within the same region.
276 new_to_region = _from_region;
277 }
278
279 assert(new_to_region != _to_region, "must not reuse same to-region");
280 assert(new_to_region != NULL, "must not be NULL");
281 _to_region = new_to_region;
282 _compact_point = _to_region->bottom();
283 }
318 AbstractGangTask("Shenandoah Prepare For Compaction Task"),
319 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) {
320 }
321
322 void work(uint worker_id) {
323 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id];
324 ShenandoahHeapRegion* from_region = next_from_region(slice);
325
326 // No work?
327 if (from_region == NULL) {
328 return;
329 }
330
331 // Sliding compaction. Walk all regions in the slice, and compact them.
332 // Remember empty regions and reuse them as needed.
333 ResourceMark rm;
334 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions());
335 ShenandoahPrepareForCompactionObjectClosure cl(empty_regions, from_region);
336 while (from_region != NULL) {
337 cl.set_from_region(from_region);
338 if (from_region->has_live()) {
339 _heap->marked_object_iterate(from_region, &cl);
340 }
341
342 // Compacted the region to somewhere else? From-region is empty then.
343 if (!cl.is_compact_same_region()) {
344 empty_regions.append(from_region);
345 }
346 from_region = next_from_region(slice);
347 }
348 cl.finish_region();
349
350 // Mark all remaining regions as empty
351 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) {
352 ShenandoahHeapRegion* r = empty_regions.at(pos);
353 r->set_new_top(r->bottom());
354 }
355 }
356 };
357
358 void ShenandoahMarkCompact::calculate_target_humongous_objects() {
359 ShenandoahHeap* heap = ShenandoahHeap::heap();
360
385 oop old_obj = oop(r->bottom() + BrooksPointer::word_size());
386 size_t words_size = old_obj->size() + BrooksPointer::word_size();
387 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
388
389 size_t start = to_end - num_regions;
390
391 if (start >= to_begin && start != r->region_number()) {
392 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan.
393 BrooksPointer::set_raw(old_obj, heap->get_region(start)->bottom() + BrooksPointer::word_size());
394 to_end = start;
395 continue;
396 }
397 }
398
399 // Failed to fit. Scan starting from current region.
400 to_begin = r->region_number();
401 to_end = r->region_number();
402 }
403 }
404
405 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
406 private:
407 ShenandoahHeap* const _heap;
408
409 public:
410 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
411 bool heap_region_do(ShenandoahHeapRegion* r) {
412 if (r->is_trash()) {
413 r->recycle();
414 }
415 if (r->is_cset()) {
416 r->make_regular_bypass();
417 }
418 if (r->is_empty_uncommitted()) {
419 r->make_committed_bypass();
420 }
421 assert (r->is_committed(), err_msg("only committed regions in heap now, see region " SIZE_FORMAT, r->region_number()));
422
423 // Record current region occupancy: this communicates empty regions are free
424 // to the rest of Full GC code.
425 r->set_new_top(r->top());
426 return false;
427 }
428 };
429
430 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure {
431 private:
432 ShenandoahHeap* const _heap;
433 ShenandoahMarkingContext* const _ctx;
434
435 public:
436 ShenandoahTrashImmediateGarbageClosure() :
437 _heap(ShenandoahHeap::heap()),
438 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
439
440 bool heap_region_do(ShenandoahHeapRegion* r) {
441 if (r->is_humongous_start()) {
442 oop humongous_obj = oop(r->bottom() + BrooksPointer::word_size());
443 if (!_ctx->is_marked(humongous_obj)) {
444 assert(!r->has_live(),
445 err_msg("Region " SIZE_FORMAT " is not marked, should not have live", r->region_number()));
446 _heap->trash_humongous_region_at(r);
447 } else {
448 assert(r->has_live(),
449 err_msg("Region " SIZE_FORMAT " should have live", r->region_number()));
450 }
451 } else if (r->is_humongous_continuation()) {
452 // If we hit continuation, the non-live humongous starts should have been trashed already
453 assert(r->humongous_start_region()->has_live(),
454 err_msg("Region " SIZE_FORMAT " should have live", r->region_number()));
455 } else if (r->is_regular()) {
456 if (!r->has_live()) {
457 assert(_ctx->is_bitmap_clear_range(r->bottom(), r->end()),
458 err_msg("Region " SIZE_FORMAT " should not have marks in bitmap", r->region_number()));
459 r->make_trash();
460 }
461 }
462 return false;
463 }
464 };
465
466 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) {
467 ShenandoahHeap* heap = ShenandoahHeap::heap();
468 GCTraceTime time("Phase 2: Compute new object addresses", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
469 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses);
470
471 {
472 ShenandoahHeapLocker lock(heap->lock());
473
474 // Trash the immediately collectible regions before computing addresses
475 ShenandoahTrashImmediateGarbageClosure tigcl;
476 heap->heap_region_iterate(&tigcl, false, false);
477
478 // Make sure regions are in good state: committed, active, clean.
479 // This is needed because we are potentially sliding the data through them.
480 ShenandoahEnsureHeapActiveClosure ecl;
481 heap->heap_region_iterate(&ecl, false, false);
482 }
483
484 // Compute the new addresses for regular objects
485 {
486 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular);
487 ShenandoahPrepareForCompactionTask prepare_task(worker_slices);
488 heap->workers()->run_task(&prepare_task);
489 }
490
491 // Compute the new addresses for humongous objects
492 {
493 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong);
494 calculate_target_humongous_objects();
495 }
496 }
497
498 class ShenandoahAdjustPointersClosure : public MetadataAwareOopClosure {
499 private:
500 ShenandoahHeap* const _heap;
501 ShenandoahMarkingContext* const _ctx;
502
503 template <class T>
504 inline void do_oop_work(T* p) {
505 T o = oopDesc::load_heap_oop(p);
506 if (! oopDesc::is_null(o)) {
507 oop obj = oopDesc::decode_heap_oop_not_null(o);
508 assert(_ctx->is_marked(obj), "must be marked");
509 oop forw = oop(BrooksPointer::get_raw(obj));
510 oopDesc::encode_store_heap_oop(p, forw);
511 }
512 }
513
514 public:
515 ShenandoahAdjustPointersClosure() :
516 _heap(ShenandoahHeap::heap()),
517 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
518
519 void do_oop(oop* p) { do_oop_work(p); }
520 void do_oop(narrowOop* p) { do_oop_work(p); }
521 };
522
523 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
524 private:
525 ShenandoahHeap* const _heap;
526 ShenandoahAdjustPointersClosure _cl;
527
528 public:
529 ShenandoahAdjustPointersObjectClosure() :
530 _heap(ShenandoahHeap::heap()) {
531 }
532 void do_object(oop p) {
533 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
534 p->oop_iterate(&_cl);
535 }
536 };
537
538 class ShenandoahAdjustPointersTask : public AbstractGangTask {
539 private:
540 ShenandoahHeap* const _heap;
541 ShenandoahRegionIterator _regions;
542
543 public:
544 ShenandoahAdjustPointersTask() :
545 AbstractGangTask("Shenandoah Adjust Pointers Task"),
546 _heap(ShenandoahHeap::heap()) {
547 }
548
549 void work(uint worker_id) {
550 ShenandoahAdjustPointersObjectClosure obj_cl;
551 ShenandoahHeapRegion* r = _regions.next();
552 while (r != NULL) {
553 if (!r->is_humongous_continuation() && r->has_live()) {
554 _heap->marked_object_iterate(r, &obj_cl);
555 }
556 r = _regions.next();
557 }
558 }
559 };
560
561 class ShenandoahAdjustRootPointersTask : public AbstractGangTask {
562 private:
563 ShenandoahRootProcessor* _rp;
564
565 public:
566 ShenandoahAdjustRootPointersTask(ShenandoahRootProcessor* rp) :
567 AbstractGangTask("Shenandoah Adjust Root Pointers Task"),
568 _rp(rp) {}
569
570 void work(uint worker_id) {
571 ShenandoahAdjustPointersClosure cl;
572 CLDToOopClosure adjust_cld_closure(&cl, true);
573 MarkingCodeBlobClosure adjust_code_closure(&cl,
589 {
590 COMPILER2_PRESENT(DerivedPointerTable::clear());
591 ShenandoahRootProcessor rp(heap, nworkers, ShenandoahPhaseTimings::full_gc_roots);
592 ShenandoahAdjustRootPointersTask task(&rp);
593 workers->run_task(&task);
594 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
595 }
596
597 ShenandoahAdjustPointersTask adjust_pointers_task;
598 workers->run_task(&adjust_pointers_task);
599 }
600
601 class ShenandoahCompactObjectsClosure : public ObjectClosure {
602 private:
603 ShenandoahHeap* const _heap;
604
605 public:
606 ShenandoahCompactObjectsClosure() : _heap(ShenandoahHeap::heap()) {}
607
608 void do_object(oop p) {
609 assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
610 size_t size = (size_t)p->size();
611 HeapWord* compact_to = BrooksPointer::get_raw(p);
612 HeapWord* compact_from = (HeapWord*) p;
613 if (compact_from != compact_to) {
614 Copy::aligned_conjoint_words(compact_from, compact_to, size);
615 }
616 oop new_obj = oop(compact_to);
617 BrooksPointer::initialize(new_obj);
618 }
619 };
620
621 class ShenandoahCompactObjectsTask : public AbstractGangTask {
622 private:
623 ShenandoahHeap* const _heap;
624 ShenandoahHeapRegionSet** const _worker_slices;
625
626 public:
627 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) :
628 AbstractGangTask("Shenandoah Compact Objects Task"),
629 _heap(ShenandoahHeap::heap()),
630 _worker_slices(worker_slices) {
631 }
632
633 void work(uint worker_id) {
634 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
635
636 ShenandoahCompactObjectsClosure cl;
637 ShenandoahHeapRegion* r = slice.next();
638 while (r != NULL) {
639 assert(!r->is_humongous(), "must not get humongous regions here");
640 if (r->has_live()) {
641 _heap->marked_object_iterate(r, &cl);
642 }
643 r->set_top(r->new_top());
644 r = slice.next();
645 }
646 }
647 };
648
649 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
650 private:
651 ShenandoahHeap* const _heap;
652 size_t _live;
653
654 public:
655 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) {
656 _heap->free_set()->clear();
657 }
658
659 bool heap_region_do(ShenandoahHeapRegion* r) {
660 assert (!r->is_cset(), "cset regions should have been demoted already");
661
662 // Need to reset the complete-top-at-mark-start pointer here because
663 // the complete marking bitmap is no longer valid. This ensures
664 // size-based iteration in marked_object_iterate().
665 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
666 // pinned regions.
667 if (!r->is_pinned()) {
668 _heap->complete_marking_context()->set_top_at_mark_start(r->region_number(), r->bottom());
669 }
670
671 size_t live = r->used();
672
673 // Make empty regions that have been allocated into regular
674 if (r->is_empty() && live > 0) {
675 r->make_regular_bypass();
676 }
677
678 // Reclaim regular regions that became empty
679 if (r->is_regular() && live == 0) {
680 r->make_trash();
681 }
682
683 // Recycle all trash regions
684 if (r->is_trash()) {
685 live = 0;
686 r->recycle();
687 }
688
767
768 // This is slightly different to ShHeap::reset_next_mark_bitmap:
769 // we need to remain able to walk pinned regions.
770 // Since pinned region do not move and don't get compacted, we will get holes with
771 // unreachable objects in them (which may have pointers to unloaded Klasses and thus
772 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using
773 // a valid marking bitmap and valid TAMS pointer. This class only resets marking
774 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
775 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask {
776 private:
777 ShenandoahRegionIterator _regions;
778
779 public:
780 ShenandoahMCResetCompleteBitmapTask() :
781 AbstractGangTask("Parallel Reset Bitmap Task") {
782 }
783
784 void work(uint worker_id) {
785 ShenandoahHeapRegion* region = _regions.next();
786 ShenandoahHeap* heap = ShenandoahHeap::heap();
787 ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
788 while (region != NULL) {
789 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned()) {
790 HeapWord* bottom = region->bottom();
791 HeapWord* top = ctx->top_at_mark_start(region->region_number());
792 if (top > bottom && region->has_live()) {
793 ctx->clear_bitmap(bottom, top);
794 }
795 assert(ctx->is_bitmap_clear_range(bottom, region->end()), "must be clear");
796 }
797 region = _regions.next();
798 }
799 }
800 };
801
802 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) {
803 ShenandoahHeap* heap = ShenandoahHeap::heap();
804 GCTraceTime time("Phase 4: Move objects", ShenandoahLogDebug, _gc_timer, heap->tracer()->gc_id());
805 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects);
806
807 // Compact regular objects first
808 {
809 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular);
810 ShenandoahCompactObjectsTask compact_task(worker_slices);
811 heap->workers()->run_task(&compact_task);
812 }
813
814 // Compact humongous objects after regular object moves
815 {
816 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong);
817 compact_humongous_objects();
818 }
819
820 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer
821 // and must ensure the bitmap is in sync.
822 {
823 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete);
824 ShenandoahMCResetCompleteBitmapTask task;
825 heap->workers()->run_task(&task);
826 }
827
828 // Bring regions in proper states after the collection, and set heap properties.
829 {
830 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild);
831
832 ShenandoahHeapLocker lock(heap->lock());
833 ShenandoahPostCompactClosure post_compact;
834 heap->heap_region_iterate(&post_compact);
835 heap->set_used(post_compact.get_live());
836
837 heap->collection_set()->clear();
838 heap->free_set()->rebuild();
839 }
840
841 heap->clear_cancelled_gc();
842
843 // Also clear the next bitmap in preparation for next marking.
844 {
845 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_next);
846 heap->reset_next_mark_bitmap();
847 }
848 }
|