106 }
107 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
108
109 // c. Reset the bitmaps for new marking
110 heap->reset_mark_bitmap();
111 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
112 assert(!heap->marking_context()->is_complete(), "sanity");
113
114 // d. Abandon reference discovery and clear all discovered references.
115 ReferenceProcessor* rp = heap->ref_processor();
116 rp->disable_discovery();
117 rp->abandon_partial_discovery();
118 rp->verify_no_references_recorded();
119
120 // e. Set back forwarded objects bit back, in case some steps above dropped it.
121 heap->set_has_forwarded_objects(has_forwarded_objects);
122 }
123
124 heap->make_parsable(true);
125
126 CodeCache::gc_prologue();
127
128 OrderAccess::fence();
129
130 phase1_mark_heap();
131
132 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
133 // Coming out of Full GC, we would not have any forwarded objects.
134 // This also prevents read barrier from kicking in while adjusting pointers in phase3.
135 heap->set_has_forwarded_objects(false);
136
137 heap->set_full_gc_move_in_progress(true);
138
139 // Setup workers for the rest
140 OrderAccess::fence();
141
142 // Initialize worker slices
143 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
144 for (uint i = 0; i < heap->max_workers(); i++) {
145 worker_slices[i] = new ShenandoahHeapRegionSet();
146 }
147
151 ShenandoahHeapLocker lock(heap->lock());
152
153 phase2_calculate_target_addresses(worker_slices);
154
155 OrderAccess::fence();
156
157 phase3_update_references();
158
159 phase4_compact_objects(worker_slices);
160 }
161
162 // Resize metaspace
163 MetaspaceGC::compute_new_size();
164
165 // Free worker slices
166 for (uint i = 0; i < heap->max_workers(); i++) {
167 delete worker_slices[i];
168 }
169 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
170
171 CodeCache::gc_epilogue();
172 JvmtiExport::gc_epilogue();
173
174 heap->set_full_gc_move_in_progress(false);
175 heap->set_full_gc_in_progress(false);
176
177 if (ShenandoahVerify) {
178 heap->verifier()->verify_after_fullgc();
179 }
180
181 if (VerifyAfterGC) {
182 Universe::verify();
183 }
184
185 {
186 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
187 heap->post_full_gc_dump(_gc_timer);
188 }
189 }
190
191 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
|
106 }
107 assert(!heap->is_concurrent_mark_in_progress(), "sanity");
108
109 // c. Reset the bitmaps for new marking
110 heap->reset_mark_bitmap();
111 assert(heap->marking_context()->is_bitmap_clear(), "sanity");
112 assert(!heap->marking_context()->is_complete(), "sanity");
113
114 // d. Abandon reference discovery and clear all discovered references.
115 ReferenceProcessor* rp = heap->ref_processor();
116 rp->disable_discovery();
117 rp->abandon_partial_discovery();
118 rp->verify_no_references_recorded();
119
120 // e. Set back forwarded objects bit back, in case some steps above dropped it.
121 heap->set_has_forwarded_objects(has_forwarded_objects);
122 }
123
124 heap->make_parsable(true);
125
126 OrderAccess::fence();
127
128 phase1_mark_heap();
129
130 // Once marking is done, which may have fixed up forwarded objects, we can drop it.
131 // Coming out of Full GC, we would not have any forwarded objects.
132 // This also prevents read barrier from kicking in while adjusting pointers in phase3.
133 heap->set_has_forwarded_objects(false);
134
135 heap->set_full_gc_move_in_progress(true);
136
137 // Setup workers for the rest
138 OrderAccess::fence();
139
140 // Initialize worker slices
141 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC);
142 for (uint i = 0; i < heap->max_workers(); i++) {
143 worker_slices[i] = new ShenandoahHeapRegionSet();
144 }
145
149 ShenandoahHeapLocker lock(heap->lock());
150
151 phase2_calculate_target_addresses(worker_slices);
152
153 OrderAccess::fence();
154
155 phase3_update_references();
156
157 phase4_compact_objects(worker_slices);
158 }
159
160 // Resize metaspace
161 MetaspaceGC::compute_new_size();
162
163 // Free worker slices
164 for (uint i = 0; i < heap->max_workers(); i++) {
165 delete worker_slices[i];
166 }
167 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices);
168
169 JvmtiExport::gc_epilogue();
170
171 heap->set_full_gc_move_in_progress(false);
172 heap->set_full_gc_in_progress(false);
173
174 if (ShenandoahVerify) {
175 heap->verifier()->verify_after_fullgc();
176 }
177
178 if (VerifyAfterGC) {
179 Universe::verify();
180 }
181
182 {
183 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps);
184 heap->post_full_gc_dump(_gc_timer);
185 }
186 }
187
188 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
|