40 #include "gc/shared/gcTrace.hpp"
41 #include "gc/shared/gcTraceTime.hpp"
42 #include "gc/shared/genCollectedHeap.hpp"
43 #include "gc/shared/modRefBarrierSet.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/space.hpp"
46 #include "oops/instanceRefKlass.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "prims/jvmtiExport.hpp"
49 #include "runtime/atomic.inline.hpp"
50 #include "runtime/biasedLocking.hpp"
51 #include "runtime/fprofiler.hpp"
52 #include "runtime/synchronizer.hpp"
53 #include "runtime/thread.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "utilities/copy.hpp"
56 #include "utilities/events.hpp"
57
58 class HeapRegion;
59
60 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
61 bool clear_all_softrefs) {
62 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
63
64 #ifdef ASSERT
65 if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
66 assert(clear_all_softrefs, "Policy should have been checked earler");
67 }
68 #endif
69 // hook up weak ref data so it can be used during Mark-Sweep
70 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
71 assert(rp != NULL, "should be non-NULL");
72 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
73
74 GenMarkSweep::_ref_processor = rp;
75 rp->setup_policy(clear_all_softrefs);
76
77 // When collecting the permanent generation Method*s may be moving,
78 // so we either have to flush all bcp data or convert it into bci.
79 CodeCache::gc_prologue();
195
196 // It is not required that we traverse spaces in the same order in
197 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
198 // tracking expects us to do so. See comment under phase4.
199
200 GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
201
202 prepare_compaction();
203 }
204
205 class G1AdjustPointersClosure: public HeapRegionClosure {
206 public:
207 bool doHeapRegion(HeapRegion* r) {
208 if (r->is_humongous()) {
209 if (r->is_starts_humongous()) {
210 // We must adjust the pointers on the single H object.
211 oop obj = oop(r->bottom());
212 // point all the oops to the new location
213 MarkSweep::adjust_pointers(obj);
214 }
215 } else {
216 // This really ought to be "as_CompactibleSpace"...
217 r->adjust_pointers();
218 }
219 return false;
220 }
221 };
222
223 class G1AlwaysTrueClosure: public BoolObjectClosure {
224 public:
225 bool do_object_b(oop p) { return true; }
226 };
227 static G1AlwaysTrueClosure always_true;
228
229 void G1MarkSweep::mark_sweep_phase3() {
230 G1CollectedHeap* g1h = G1CollectedHeap::heap();
231
232 // Adjust the pointers to reflect the new locations
233 GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
234
235 // Need cleared claim bits for the roots processing
259 G1AdjustPointersClosure blk;
260 g1h->heap_region_iterate(&blk);
261 }
262
263 class G1SpaceCompactClosure: public HeapRegionClosure {
264 public:
265 G1SpaceCompactClosure() {}
266
267 bool doHeapRegion(HeapRegion* hr) {
268 if (hr->is_humongous()) {
269 if (hr->is_starts_humongous()) {
270 oop obj = oop(hr->bottom());
271 if (obj->is_gc_marked()) {
272 obj->init_mark();
273 } else {
274 assert(hr->is_empty(), "Should have been cleared in phase 2.");
275 }
276 hr->reset_during_compaction();
277 }
278 } else {
279 hr->compact();
280 }
281 return false;
282 }
283 };
284
285 void G1MarkSweep::mark_sweep_phase4() {
286 // All pointers are now adjusted, move objects accordingly
287
288 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
289 // in the same order in phase2, phase3 and phase4. We don't quite do that
290 // here (code and comment not fixed for perm removal), so we tell the validate code
291 // to use a higher index (saved from phase2) when verifying perm_gen.
292 G1CollectedHeap* g1h = G1CollectedHeap::heap();
293
294 GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
295
296 G1SpaceCompactClosure blk;
297 g1h->heap_region_iterate(&blk);
298
299 }
300
301 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
302 G1CollectedHeap* g1h = G1CollectedHeap::heap();
303 g1h->heap_region_iterate(blk);
304 blk->update_sets();
305 }
306
307 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
308 HeapWord* end = hr->end();
309 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
310
311 assert(hr->is_starts_humongous(),
312 "Only the start of a humongous region should be freed.");
313
314 hr->set_containing_set(NULL);
315 _humongous_regions_removed.increment(1u, hr->capacity());
316
317 _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
318 prepare_for_compaction(hr, end);
319 dummy_free_list.remove_all();
320 }
341 void G1PrepareCompactClosure::update_sets() {
342 // We'll recalculate total used bytes and recreate the free list
343 // at the end of the GC, so no point in updating those values here.
344 HeapRegionSetCount empty_set;
345 _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
346 }
347
348 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
349 if (hr->is_humongous()) {
350 if (hr->is_starts_humongous()) {
351 oop obj = oop(hr->bottom());
352 if (obj->is_gc_marked()) {
353 obj->forward_to(obj);
354 } else {
355 free_humongous_region(hr);
356 }
357 } else {
358 assert(hr->is_continues_humongous(), "Invalid humongous.");
359 }
360 } else {
361 prepare_for_compaction(hr, hr->end());
362 }
363 return false;
364 }
|
40 #include "gc/shared/gcTrace.hpp"
41 #include "gc/shared/gcTraceTime.hpp"
42 #include "gc/shared/genCollectedHeap.hpp"
43 #include "gc/shared/modRefBarrierSet.hpp"
44 #include "gc/shared/referencePolicy.hpp"
45 #include "gc/shared/space.hpp"
46 #include "oops/instanceRefKlass.hpp"
47 #include "oops/oop.inline.hpp"
48 #include "prims/jvmtiExport.hpp"
49 #include "runtime/atomic.inline.hpp"
50 #include "runtime/biasedLocking.hpp"
51 #include "runtime/fprofiler.hpp"
52 #include "runtime/synchronizer.hpp"
53 #include "runtime/thread.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "utilities/copy.hpp"
56 #include "utilities/events.hpp"
57
58 class HeapRegion;
59
60 bool G1MarkSweep::_archive_check_enabled = false;
61 G1ArchiveRegionMap G1MarkSweep::_archive_region_map;
62
63 void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
64 bool clear_all_softrefs) {
65 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
66
67 #ifdef ASSERT
68 if (G1CollectedHeap::heap()->collector_policy()->should_clear_all_soft_refs()) {
69 assert(clear_all_softrefs, "Policy should have been checked earler");
70 }
71 #endif
72 // hook up weak ref data so it can be used during Mark-Sweep
73 assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
74 assert(rp != NULL, "should be non-NULL");
75 assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
76
77 GenMarkSweep::_ref_processor = rp;
78 rp->setup_policy(clear_all_softrefs);
79
80 // When collecting the permanent generation Method*s may be moving,
81 // so we either have to flush all bcp data or convert it into bci.
82 CodeCache::gc_prologue();
198
199 // It is not required that we traverse spaces in the same order in
200 // phase2, phase3 and phase4, but the ValidateMarkSweep live oops
201 // tracking expects us to do so. See comment under phase4.
202
203 GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
204
205 prepare_compaction();
206 }
207
208 class G1AdjustPointersClosure: public HeapRegionClosure {
209 public:
210 bool doHeapRegion(HeapRegion* r) {
211 if (r->is_humongous()) {
212 if (r->is_starts_humongous()) {
213 // We must adjust the pointers on the single H object.
214 oop obj = oop(r->bottom());
215 // point all the oops to the new location
216 MarkSweep::adjust_pointers(obj);
217 }
218 } else if (!r->is_archive()) {
219 // This really ought to be "as_CompactibleSpace"...
220 r->adjust_pointers();
221 }
222 return false;
223 }
224 };
225
226 class G1AlwaysTrueClosure: public BoolObjectClosure {
227 public:
228 bool do_object_b(oop p) { return true; }
229 };
230 static G1AlwaysTrueClosure always_true;
231
232 void G1MarkSweep::mark_sweep_phase3() {
233 G1CollectedHeap* g1h = G1CollectedHeap::heap();
234
235 // Adjust the pointers to reflect the new locations
236 GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
237
238 // Need cleared claim bits for the roots processing
262 G1AdjustPointersClosure blk;
263 g1h->heap_region_iterate(&blk);
264 }
265
266 class G1SpaceCompactClosure: public HeapRegionClosure {
267 public:
268 G1SpaceCompactClosure() {}
269
270 bool doHeapRegion(HeapRegion* hr) {
271 if (hr->is_humongous()) {
272 if (hr->is_starts_humongous()) {
273 oop obj = oop(hr->bottom());
274 if (obj->is_gc_marked()) {
275 obj->init_mark();
276 } else {
277 assert(hr->is_empty(), "Should have been cleared in phase 2.");
278 }
279 hr->reset_during_compaction();
280 }
281 } else {
282 if (!hr->is_archive()) {
283 hr->compact();
284 }
285 }
286 return false;
287 }
288 };
289
290 void G1MarkSweep::mark_sweep_phase4() {
291 // All pointers are now adjusted, move objects accordingly
292
293 // The ValidateMarkSweep live oops tracking expects us to traverse spaces
294 // in the same order in phase2, phase3 and phase4. We don't quite do that
295 // here (code and comment not fixed for perm removal), so we tell the validate code
296 // to use a higher index (saved from phase2) when verifying perm_gen.
297 G1CollectedHeap* g1h = G1CollectedHeap::heap();
298
299 GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
300
301 G1SpaceCompactClosure blk;
302 g1h->heap_region_iterate(&blk);
303
304 }
305
306 void G1MarkSweep::enable_archive_object_check() {
307 if (!_archive_check_enabled) {
308 _archive_check_enabled = true;
309 size_t length = Universe::heap()->max_capacity();
310 _archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
311 (HeapWord*)Universe::heap()->base() + length,
312 (size_t) HeapRegion::GrainBytes);
313 }
314 }
315
316 void G1MarkSweep::mark_range_archive(HeapWord* start, HeapWord* end) {
317 assert(_archive_check_enabled, "archive range check not enabled");
318 uint index = _archive_region_map.get_index_by_address(start);
319 uint end_index = _archive_region_map.get_index_by_address(end);
320 while (index <= end_index) {
321 _archive_region_map.set_by_index(index++, true);
322 }
323 }
324
325 bool G1MarkSweep::in_archive_range(oop object) {
326 // This is the out-of-line part of is_archive_object test, done separately
327 // to avoid additional performance impact when the check is not enabled.
328 return _archive_region_map.get_by_address((HeapWord*)object);
329 }
330
331
332
333 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
334 G1CollectedHeap* g1h = G1CollectedHeap::heap();
335 g1h->heap_region_iterate(blk);
336 blk->update_sets();
337 }
338
339 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
340 HeapWord* end = hr->end();
341 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
342
343 assert(hr->is_starts_humongous(),
344 "Only the start of a humongous region should be freed.");
345
346 hr->set_containing_set(NULL);
347 _humongous_regions_removed.increment(1u, hr->capacity());
348
349 _g1h->free_humongous_region(hr, &dummy_free_list, false /* par */);
350 prepare_for_compaction(hr, end);
351 dummy_free_list.remove_all();
352 }
373 void G1PrepareCompactClosure::update_sets() {
374 // We'll recalculate total used bytes and recreate the free list
375 // at the end of the GC, so no point in updating those values here.
376 HeapRegionSetCount empty_set;
377 _g1h->remove_from_old_sets(empty_set, _humongous_regions_removed);
378 }
379
380 bool G1PrepareCompactClosure::doHeapRegion(HeapRegion* hr) {
381 if (hr->is_humongous()) {
382 if (hr->is_starts_humongous()) {
383 oop obj = oop(hr->bottom());
384 if (obj->is_gc_marked()) {
385 obj->forward_to(obj);
386 } else {
387 free_humongous_region(hr);
388 }
389 } else {
390 assert(hr->is_continues_humongous(), "Invalid humongous.");
391 }
392 } else {
393 if (!hr->is_archive()) {
394 prepare_for_compaction(hr, hr->end());
395 }
396 }
397 return false;
398 }
|