--- old/src/share/vm/gc/g1/g1Allocator.cpp 2015-08-11 11:49:05.415774935 -0400 +++ new/src/share/vm/gc/g1/g1Allocator.cpp 2015-08-11 11:49:04.835741593 -0400 @@ -275,7 +275,7 @@ _max = _bottom + HeapRegion::min_region_size_in_words(); // Tell mark-sweep that objects in this region are not to be marked. - G1MarkSweep::mark_range_archive(MemRegion(_bottom, HeapRegion::GrainWords)); + G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true); // Since we've modified the old set, call update_sizes. _g1h->g1mm()->update_sizes(); --- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-08-11 11:49:08.391946016 -0400 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2015-08-11 11:49:07.799911985 -0400 @@ -65,6 +65,7 @@ #include "memory/iterator.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" +#include "runtime/init.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" #include "utilities/globalDefinitions.hpp" @@ -949,6 +950,7 @@ } bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, size_t count) { + assert(!is_init_completed(), "Expect to be called at JVM init time"); assert(ranges != NULL, "MemRegion array NULL"); assert(count != 0, "No MemRegions provided"); MutexLockerEx x(Heap_lock); @@ -1037,12 +1039,13 @@ } // Notify mark-sweep of the archive range. - G1MarkSweep::mark_range_archive(curr_range); + G1MarkSweep::set_range_archive(curr_range, true); } return true; } void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) { + assert(!is_init_completed(), "Expect to be called at JVM init time"); assert(ranges != NULL, "MemRegion array NULL"); assert(count != 0, "No MemRegions provided"); MemRegion reserved = _hrm.reserved(); @@ -1125,6 +1128,81 @@ return result; } +void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) { + assert(!is_init_completed(), "Expect to be called at JVM init time"); + assert(ranges != NULL, "MemRegion array NULL"); + assert(count != 0, "No MemRegions provided"); + MemRegion reserved = _hrm.reserved(); + HeapWord* prev_last_addr = NULL; + HeapRegion* prev_last_region = NULL; + size_t size_used = 0; + size_t uncommitted_regions = 0; + + // For each Memregion, free the G1 regions that constitute it, and + // notify mark-sweep that the range is no longer to be considered 'archive.' + MutexLockerEx x(Heap_lock); + for (size_t i = 0; i < count; i++) { + HeapWord* start_address = ranges[i].start(); + HeapWord* last_address = ranges[i].last(); + + assert(reserved.contains(start_address) && reserved.contains(last_address), + err_msg("MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]", + p2i(start_address), p2i(last_address))); + assert(start_address > prev_last_addr, + err_msg("Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT , + p2i(start_address), p2i(prev_last_addr))); + size_used += ranges[i].byte_size(); + prev_last_addr = last_address; + + HeapRegion* start_region = _hrm.addr_to_region(start_address); + HeapRegion* last_region = _hrm.addr_to_region(last_address); + + // Check for ranges that start in the same G1 region in which the previous + // range ended, and adjust the start address so we don't try to free + // the same region again. If the current range is entirely within that + // region, skip it. + if (start_region == prev_last_region) { + start_address = start_region->end(); + if (start_address > last_address) { + continue; + } + start_region = _hrm.addr_to_region(start_address); + } + prev_last_region = last_region; + + // After verifying that each region was marked as an archive region by + // alloc_archive_regions, set it free and empty and uncommit it. + HeapRegion* curr_region = start_region; + while (curr_region != NULL) { + guarantee(curr_region->is_archive(), + err_msg("Expected archive region at index %u", curr_region->hrm_index())); + uint curr_index = curr_region->hrm_index(); + _old_set.remove(curr_region); + curr_region->set_free(); + curr_region->set_top(curr_region->bottom()); + if (curr_region != last_region) { + curr_region = _hrm.next_region_in_heap(curr_region); + } else { + curr_region = NULL; + } + _hrm.shrink_at(curr_index); + uncommitted_regions++; + } + + // Notify mark-sweep that this is no longer an archive range. + G1MarkSweep::set_range_archive(ranges[i], false); + } + + if (uncommitted_regions != 0) { + ergo_verbose1(ErgoHeapSizing, + "attempt heap shrinking", + ergo_format_reason("uncommitted archive regions") + ergo_format_byte("total size"), + HeapRegion::GrainWords * HeapWordSize * uncommitted_regions); + } + decrease_used(size_used); +} + HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, uint* gclocker_retry_count_ret) { --- old/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-08-11 11:49:11.552127675 -0400 +++ new/src/share/vm/gc/g1/g1CollectedHeap.hpp 2015-08-11 11:49:10.948092953 -0400 @@ -751,6 +751,12 @@ // alloc_archive_regions, and after class loading has occurred. void fill_archive_regions(MemRegion* range, size_t count); + // For each of the specified MemRegions, uncommit the containing G1 regions + // which had been allocated by alloc_archive_regions. This should be called + // rather than fill_archive_regions at JVM init time if the archive file + // mapping failed, with the same non-overlapping and sorted MemRegion array. + void dealloc_archive_regions(MemRegion* range, size_t count); + protected: // Shrink the garbage-first heap by at most the given size (in bytes!). --- old/src/share/vm/gc/g1/g1MarkSweep.cpp 2015-08-11 11:49:14.664306573 -0400 +++ new/src/share/vm/gc/g1/g1MarkSweep.cpp 2015-08-11 11:49:14.064272081 -0400 @@ -310,9 +310,9 @@ HeapRegion::GrainBytes); } -void G1MarkSweep::mark_range_archive(MemRegion range) { +void G1MarkSweep::set_range_archive(MemRegion range, bool is_archive) { assert(_archive_check_enabled, "archive range check not enabled"); - _archive_region_map.set_by_address(range, true); + _archive_region_map.set_by_address(range, is_archive); } bool G1MarkSweep::in_archive_range(oop object) { --- old/src/share/vm/gc/g1/g1MarkSweep.hpp 2015-08-11 11:49:18.000498346 -0400 +++ new/src/share/vm/gc/g1/g1MarkSweep.hpp 2015-08-11 11:49:17.396463623 -0400 @@ -58,8 +58,8 @@ // Create the _archive_region_map which is used to identify archive objects. static void enable_archive_object_check(); - // Mark the regions containing the specified address range as archive regions. - static void mark_range_archive(MemRegion range); + // Set the regions containing the specified address range as archive/non-archive. + static void set_range_archive(MemRegion range, bool is_archive); // Check if an object is in an archive region using the _archive_region_map. static bool in_archive_range(oop object); --- old/src/share/vm/gc/g1/heapRegionManager.cpp 2015-08-11 11:49:21.052673794 -0400 +++ new/src/share/vm/gc/g1/heapRegionManager.cpp 2015-08-11 11:49:20.460639762 -0400 @@ -437,6 +437,13 @@ return removed; } +void HeapRegionManager::shrink_at(uint index) { + assert(is_available(index), err_msg("Expected available region at index %u", index)); + HeapRegion* curr_region = _regions.get_by_index(index); + assert(curr_region->is_free(), err_msg("Expected free region at index %u", index)); + uncommit_regions(index, 1); +} + uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { guarantee(start_idx < _allocated_heapregions_length, "checking"); guarantee(res_idx != NULL, "checking"); --- old/src/share/vm/gc/g1/heapRegionManager.hpp 2015-08-11 11:49:24.120850161 -0400 +++ new/src/share/vm/gc/g1/heapRegionManager.hpp 2015-08-11 11:49:23.520815670 -0400 @@ -241,6 +241,9 @@ // Return the actual number of uncommitted regions. uint shrink_by(uint num_regions_to_remove); + // Uncommit the region at the specified index, which must be available and free. + void shrink_at(uint index); + void verify(); // Do some sanity checking.