293 // here (code and comment not fixed for perm removal), so we tell the validate code
294 // to use a higher index (saved from phase2) when verifying perm_gen.
295 G1CollectedHeap* g1h = G1CollectedHeap::heap();
296
297 GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
298
299 G1SpaceCompactClosure blk;
300 g1h->heap_region_iterate(&blk);
301
302 }
303
304 void G1MarkSweep::enable_archive_object_check() {
305 assert(!_archive_check_enabled, "archive range check already enabled");
306 _archive_check_enabled = true;
307 size_t length = Universe::heap()->max_capacity();
308 _archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
309 (HeapWord*)Universe::heap()->base() + length,
310 HeapRegion::GrainBytes);
311 }
312
313 void G1MarkSweep::mark_range_archive(MemRegion range) {
314 assert(_archive_check_enabled, "archive range check not enabled");
315 _archive_region_map.set_by_address(range, true);
316 }
317
318 bool G1MarkSweep::in_archive_range(oop object) {
319 // This is the out-of-line part of is_archive_object test, done separately
320 // to avoid additional performance impact when the check is not enabled.
321 return _archive_region_map.get_by_address((HeapWord*)object);
322 }
323
324 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
325 G1CollectedHeap* g1h = G1CollectedHeap::heap();
326 g1h->heap_region_iterate(blk);
327 blk->update_sets();
328 }
329
330 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
331 HeapWord* end = hr->end();
332 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
333
334 assert(hr->is_starts_humongous(),
335 "Only the start of a humongous region should be freed.");
|
293 // here (code and comment not fixed for perm removal), so we tell the validate code
294 // to use a higher index (saved from phase2) when verifying perm_gen.
295 G1CollectedHeap* g1h = G1CollectedHeap::heap();
296
297 GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer(), gc_tracer()->gc_id());
298
299 G1SpaceCompactClosure blk;
300 g1h->heap_region_iterate(&blk);
301
302 }
303
304 void G1MarkSweep::enable_archive_object_check() {
305 assert(!_archive_check_enabled, "archive range check already enabled");
306 _archive_check_enabled = true;
307 size_t length = Universe::heap()->max_capacity();
308 _archive_region_map.initialize((HeapWord*)Universe::heap()->base(),
309 (HeapWord*)Universe::heap()->base() + length,
310 HeapRegion::GrainBytes);
311 }
312
313 void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) {
314 assert(_archive_check_enabled, "archive range check not enabled");
315 _archive_region_map.set_by_address(range, is_archive);
316 }
317
318 bool G1MarkSweep::in_archive_range(oop object) {
319 // This is the out-of-line part of is_archive_object test, done separately
320 // to avoid additional performance impact when the check is not enabled.
321 return _archive_region_map.get_by_address((HeapWord*)object);
322 }
323
324 void G1MarkSweep::prepare_compaction_work(G1PrepareCompactClosure* blk) {
325 G1CollectedHeap* g1h = G1CollectedHeap::heap();
326 g1h->heap_region_iterate(blk);
327 blk->update_sets();
328 }
329
330 void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
331 HeapWord* end = hr->end();
332 FreeRegionList dummy_free_list("Dummy Free List for G1MarkSweep");
333
334 assert(hr->is_starts_humongous(),
335 "Only the start of a humongous region should be freed.");
|