--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-03-18 13:56:55.076343711 +0100 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-03-18 13:56:54.997341363 +0100 @@ -1835,26 +1835,18 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description, size_t size, size_t translation_factor) { - // Determine the preferred page size for the auxiliary data structures. We always - // prefer large pages if the given size allows it for performance reasons. - size_t const commit_size = os::page_size_for_region_unaligned(size, 1); - // The base address reserved space must be aligned to that page. Otherwise we - // would need to split pages (or it would be completely impossible) when - // uncommitting memory within the heap. - // Size need *not* be aligned to above calculated commit size. - size_t const alignment = MAX2(commit_size, (size_t)os::vm_allocation_granularity()); - bool const use_large_pages = commit_size != (size_t)os::vm_page_size() ? UseLargePages : false; - ReservedSpace rs(align_size_up(size, alignment), alignment, use_large_pages); + // Allocate a new reserved space, preferring to use large pages. + ReservedSpace rs(size, true); G1RegionToSpaceMapper* result = G1RegionToSpaceMapper::create_mapper(rs, size, - commit_size, + rs.alignment(), HeapRegion::GrainBytes, translation_factor, mtGC); if (TracePageSizes) { - gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT" base=" PTR_FORMAT" size=" SIZE_FORMAT" alignment=" SIZE_FORMAT" reqsize=" SIZE_FORMAT, - description, commit_size, p2i(rs.base()), rs.size(), rs.alignment(), size); + gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT " size=" SIZE_FORMAT " alignment=" SIZE_FORMAT " reqsize=" SIZE_FORMAT, + description, rs.alignment(), p2i(rs.base()), rs.size(), rs.alignment(), size); } return result; } --- old/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2015-03-18 13:56:55.517356820 +0100 +++ new/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2015-03-18 13:56:55.450354828 +0100 @@ -44,41 +44,41 @@ #endif #include "utilities/bitMap.inline.hpp" -G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL), - _high_boundary(NULL), _committed(), _commit_size(0), _special(false), +G1PageBasedVirtualSpace::G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size) : + _low_boundary(NULL), _high_boundary(NULL), _committed(), _page_size(0), _special(false), _dirty(), _executable(false) { + initialize_with_page_size(rs, used_size, page_size); } -bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t actual_size, size_t commit_size) { - if (!rs.is_reserved()) { - return false; // Allocation failed. - } - assert(_low_boundary == NULL, "VirtualSpace already initialized"); - assert(commit_size > 0, "Granularity must be non-zero."); - - guarantee(is_ptr_aligned(rs.base(), commit_size), - err_msg("Reserved space base " PTR_FORMAT" is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), commit_size)); - guarantee(is_size_aligned(actual_size, os::vm_page_size()), - err_msg("Given actual reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), actual_size)); - guarantee(actual_size <= rs.size(), - err_msg("Actual size of reserved space " SIZE_FORMAT" bytes is smaller than reservation at " SIZE_FORMAT" bytes", actual_size, rs.size())); +void G1PageBasedVirtualSpace::initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size) { + guarantee(rs.is_reserved(), "Given reserved space must have been reserved already."); + + vmassert(_low_boundary == NULL, "VirtualSpace already initialized"); + vmassert(page_size > 0, "Page size must be non-zero."); + + guarantee(is_ptr_aligned(rs.base(), page_size), + err_msg("Reserved space base " PTR_FORMAT " is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), page_size)); + guarantee(is_size_aligned(used_size, os::vm_page_size()), + err_msg("Given used reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), used_size)); + guarantee(used_size <= rs.size(), + err_msg("Used size of reserved space " SIZE_FORMAT " bytes is smaller than reservation at " SIZE_FORMAT " bytes", used_size, rs.size())); + guarantee(is_size_aligned(rs.size(), page_size), + err_msg("Expected that the virtual space is size aligned, but " SIZE_FORMAT " is not aligned to page size " SIZE_FORMAT, rs.size(), page_size)); _low_boundary = rs.base(); - _high_boundary = _low_boundary + actual_size; + _high_boundary = _low_boundary + used_size; _special = rs.special(); _executable = rs.executable(); - _commit_size = commit_size; + _page_size = page_size; - assert(_committed.size() == 0, "virtual space initialized more than once"); - BitMap::idx_t size_in_commit_pages = round_to(rs.size(), commit_size) / commit_size; - _committed.resize(size_in_commit_pages, /* in_resource_area */ false); + vmassert(_committed.size() == 0, "virtual space initialized more than once"); + BitMap::idx_t size_in_pages = rs.size() / page_size; + _committed.resize(size_in_pages, /* in_resource_area */ false); if (_special) { - _dirty.resize(size_in_commit_pages, /* in_resource_area */ false); + _dirty.resize(size_in_pages, /* in_resource_area */ false); } - - return true; } G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { @@ -92,16 +92,17 @@ _high_boundary = NULL; _special = false; _executable = false; - _commit_size = 0; + _page_size = 0; _committed.resize(0, false); _dirty.resize(0, false); } size_t G1PageBasedVirtualSpace::committed_size() const { - size_t result = _committed.count_one_bits() * _commit_size; + size_t result = _committed.count_one_bits() * _page_size; // The last page might not be in full. - if (_committed.at(_committed.size()-1)) { - result -= pointer_delta((char*)align_ptr_up(_high_boundary, _commit_size), _high_boundary, sizeof(char)); + if (_committed.at(_committed.size() - 1)) { + char* aligned_high_boundary = (char*)align_ptr_up(_high_boundary, _page_size); + result -= pointer_delta(aligned_high_boundary, _high_boundary, sizeof(char)); } return result; } @@ -114,103 +115,131 @@ return reserved_size() - committed_size(); } -uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const { - return (addr - _low_boundary) / _commit_size; +size_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const { + return (addr - _low_boundary) / _page_size; } -bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const { - uintptr_t end = start + size_in_pages; +bool G1PageBasedVirtualSpace::is_area_committed(size_t start, size_t size_in_pages) const { + size_t end = start + size_in_pages; return _committed.get_next_zero_offset(start, end) >= end; } -bool G1PageBasedVirtualSpace::is_area_uncommitted(uintptr_t start, size_t size_in_pages) const { - uintptr_t end = start + size_in_pages; +bool G1PageBasedVirtualSpace::is_area_uncommitted(size_t start, size_t size_in_pages) const { + size_t end = start + size_in_pages; return _committed.get_next_one_offset(start, end) >= end; } -char* G1PageBasedVirtualSpace::page_start(uintptr_t index) { - return _low_boundary + index * _commit_size; +char* G1PageBasedVirtualSpace::page_start(size_t index) { + return _low_boundary + index * _page_size; +} + +bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) { + guarantee(index <= _committed.size(), + err_msg("Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size())); + return index == _committed.size(); +} + +void G1PageBasedVirtualSpace::commit_full_pages(size_t start, size_t num_pages) { + vmassert(num_pages > 0, "No full pages to commit"); + vmassert(start + num_pages <= _committed.size(), + err_msg("Tried to commit area from page " SIZE_FORMAT " to page " SIZE_FORMAT " " + "that is outside of managed space of " SIZE_FORMAT " pages", + start, start + num_pages, _committed.size())); + + char* start_addr = page_start(start); + size_t size = num_pages * _page_size; + + os::commit_memory_or_exit(start_addr, size, _page_size, _executable, + err_msg("Failed to commit area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".", + p2i(start_addr), p2i(start_addr + size), size)); +} + +void G1PageBasedVirtualSpace::commit_tail() { + char* const aligned_end_address = (char*)align_ptr_down(_high_boundary, _page_size); + size_t const tail_size = pointer_delta(_high_boundary, aligned_end_address, sizeof(char)); + + os::commit_memory_or_exit(aligned_end_address, tail_size, os::vm_page_size(), _executable, + err_msg("Failed to commit tail area from " PTR_FORMAT " to " PTR_FORMAT " of length " SIZE_FORMAT ".", + p2i(aligned_end_address), p2i(_high_boundary), tail_size)); } -char* G1PageBasedVirtualSpace::page_end(uintptr_t index) { - guarantee(index < _committed.size(), "invariant"); - if (index != (_committed.size() - 1)) { - return page_start(index + 1); - } - return _high_boundary; -} - -void G1PageBasedVirtualSpace::commit_internal(char* start, char* end) { - guarantee(start >= _low_boundary && start < _high_boundary, - err_msg("Start address " PTR_FORMAT" is outside of reserved space.", p2i(start))); - guarantee(is_ptr_aligned(start, _commit_size), - err_msg("Start address should be aligned to commit size " SIZE_FORMAT" but got " PTR_FORMAT".", - _commit_size, p2i(start))); - - guarantee(end >= _low_boundary && end <= _high_boundary, - err_msg("End address " PTR_FORMAT" is outside of reserved space.", p2i(end))); - bool is_high_aligned_to_commit_size = is_ptr_aligned(_high_boundary, _commit_size); - guarantee(is_ptr_aligned(end, is_high_aligned_to_commit_size ? _commit_size : os::vm_page_size()), - err_msg("End address should be aligned to page size " SIZE_FORMAT" but got " PTR_FORMAT".", - is_high_aligned_to_commit_size ? _commit_size : os::vm_page_size(), - p2i(end))); - // First try to commit in commit_size chunks. - char* const aligned_end_address = (char*)align_ptr_down(end, _commit_size); - size_t const size = pointer_delta(aligned_end_address, start, sizeof(char)); - if (size != 0) { - os::commit_memory_or_exit(start, size, _commit_size, _executable, - err_msg("Failed to commit area from " PTR_FORMAT" to " PTR_FORMAT" of length " SIZE_FORMAT".", - p2i(start), p2i(aligned_end_address), size)); - } - // Finally, commit any remaining tail. - if (end != aligned_end_address) { - size_t const tail_size = pointer_delta(end, aligned_end_address, sizeof(char)); - guarantee(tail_size < _commit_size, - err_msg("Remaining size " SIZE_FORMAT "must be smaller than commit size of " SIZE_FORMAT, tail_size, _commit_size)); - os::commit_memory_or_exit(start, tail_size, _executable, - err_msg("Failed to commit remainder pages from " PTR_FORMAT" to " PTR_FORMAT" of length "SIZE_FORMAT".", - p2i(aligned_end_address), p2i(end), tail_size)); +void G1PageBasedVirtualSpace::commit_internal(size_t start_page, size_t end_page) { + guarantee(start_page < end_page, + err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page)); + guarantee(end_page <= _committed.size(), + err_msg("Given end page " SIZE_FORMAT " is beyond end of managed page amount of " SIZE_FORMAT, end_page, _committed.size())); + + size_t pages = end_page - start_page; + bool need_to_commit_tail = is_after_last_page(end_page) && is_last_page_partial(); + + // If we have to commit some (partial) tail area, decrease the amount of pages to avoid + // committing that in the full-page commit code. + if (need_to_commit_tail) { + pages--; + } + + if (pages > 0) { + commit_full_pages(start_page, pages); + } + + if (need_to_commit_tail) { + commit_tail(); } } -bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) { +char* G1PageBasedVirtualSpace::bounded_end_addr(size_t end_page) { + return MIN2(_high_boundary, page_start(end_page)); +} + +void G1PageBasedVirtualSpace::pretouch_internal(size_t start_page, size_t end_page) { + guarantee(start_page < end_page, + err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page)); + + os::pretouch_memory(page_start(start_page), bounded_end_addr(end_page)); +} + +bool G1PageBasedVirtualSpace::commit(size_t start, size_t size_in_pages) { // We need to make sure to commit all pages covered by the given area. guarantee(is_area_uncommitted(start, size_in_pages), "Specified area is not uncommitted"); bool zero_filled = true; - uintptr_t end = start + size_in_pages; + size_t end = start + size_in_pages; if (_special) { // Check for dirty pages and update zero_filled if any found. - if (_dirty.get_next_one_offset(start,end) < end) { + if (_dirty.get_next_one_offset(start, end) < end) { zero_filled = false; _dirty.clear_range(start, end); } } else { - commit_internal(page_start(start), page_end(end - 1)); + commit_internal(start, end); } _committed.set_range(start, end); if (AlwaysPreTouch) { - os::pretouch_memory(page_start(start), page_end(end - 1)); + pretouch_internal(start, end); } return zero_filled; } -void G1PageBasedVirtualSpace::uncommit_internal(char* start, char* end) { - os::uncommit_memory(start, pointer_delta(end, start, sizeof(char))); +void G1PageBasedVirtualSpace::uncommit_internal(size_t start_page, size_t end_page) { + guarantee(start_page < end_page, + err_msg("Given start page " SIZE_FORMAT " is larger or equal to end page " SIZE_FORMAT, start_page, end_page)); + + char* start_addr = page_start(start_page); + os::uncommit_memory(start_addr, pointer_delta(bounded_end_addr(end_page), start_addr, sizeof(char))); } -void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) { +void G1PageBasedVirtualSpace::uncommit(size_t start, size_t size_in_pages) { guarantee(is_area_committed(start, size_in_pages), "checking"); - uintptr_t end = start + size_in_pages; + size_t end = start + size_in_pages; if (_special) { // Mark that memory is dirty. If committed again the memory might // need to be cleared explicitly. _dirty.set_range(start, end); } else { - uncommit_internal(page_start(start), page_end(end - 1)); + uncommit_internal(start, end); } _committed.clear_range(start, end); @@ -227,7 +256,8 @@ out->cr(); out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); - out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT", " PTR_FORMAT"]", p2i(_low_boundary), p2i(_high_boundary)); + out->print_cr(" - preferred page size: " SIZE_FORMAT, _page_size); + out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT ", " PTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary)); } void G1PageBasedVirtualSpace::print() { --- old/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp 2015-03-18 13:56:55.905368353 +0100 +++ new/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp 2015-03-18 13:56:55.839366391 +0100 @@ -49,8 +49,8 @@ char* _low_boundary; char* _high_boundary; - // The preferred commit/uncommit granularity in bytes. - size_t _commit_size; + // The preferred page size used for commit/uncommit in bytes. + size_t _page_size; // Bitmap used for verification of commit/uncommit operations. BitMap _committed; @@ -68,40 +68,55 @@ // Indicates whether the committed space should be executable. bool _executable; - // Commit the given memory range by using _commit_size pages as much as possible - // and the remainder with small sized pages. The start address must be _commit_size + // Commit the given memory range by using _page_size pages as much as possible + // and the remainder with small sized pages. The start address must be _page_size // aligned. - void commit_internal(char* start, char* end); + void commit_internal(size_t start_page, size_t end_page); + // Commit num_pages full pages of _page_size size starting from start. All argument + // checking has been performed. + void commit_full_pages(size_t start_page, size_t end_page); + // Commit the tail area. + void commit_tail(); + // Uncommit the given memory range. - void uncommit_internal(char* start, char* end); + void uncommit_internal(size_t start_page, size_t end_page); + + // Pretouch the given memory range. + void pretouch_internal(size_t start_page, size_t end_page); // Returns the index of the page which contains the given address. uintptr_t addr_to_page_index(char* addr) const; // Returns the address of the given page index. - char* page_start(uintptr_t index); - // Returns the address of the end of the page given the page index ranging - // from 0..size_in_pages-2. For the last page, return _high_boundary. - char* page_end(uintptr_t index); + char* page_start(size_t index); + // Is the given page index the last page? + bool is_last_page(size_t index) { return index == (_committed.size() - 1); } + // Is the given page index the first after last page? + bool is_after_last_page(size_t index); + // Is the last page only partially covered by this space? + bool is_last_page_partial() { return !is_ptr_aligned(_high_boundary, _page_size); } + // Returns the end address of the given page bounded by the reserved space. + char* bounded_end_addr(size_t end_page); + // Returns true if the entire area is backed by committed memory. - bool is_area_committed(uintptr_t start, size_t size_in_pages) const; + bool is_area_committed(size_t start, size_t size_in_pages) const; // Returns true if the entire area is not backed by committed memory. - bool is_area_uncommitted(uintptr_t start, size_t size_in_pages) const; + bool is_area_uncommitted(size_t start, size_t size_in_pages) const; + void initialize_with_page_size(ReservedSpace rs, size_t used_size, size_t page_size); public: // Commit the given area of pages starting at start being size_in_pages large. // Returns true if the given area is zero filled upon completion. - bool commit(uintptr_t start, size_t size_in_pages); + bool commit(size_t start, size_t size_in_pages); // Uncommit the given area of pages starting at start being size_in_pages large. - void uncommit(uintptr_t start, size_t size_in_pages); + void uncommit(size_t start, size_t size_in_pages); - // Initialization - G1PageBasedVirtualSpace(); - // Initialize the given reserved space with the given base address and actual size. - // Prefer to commit in commit_size chunks. - bool initialize_with_granularity(ReservedSpace rs, size_t actual_size, size_t commit_size); + // Initialize the given reserved space with the given base address and the size + // actually used. + // Prefer to commit in page_size chunks. + G1PageBasedVirtualSpace(ReservedSpace rs, size_t used_size, size_t page_size); // Destruction ~G1PageBasedVirtualSpace(); --- old/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp 2015-03-18 13:56:56.313380481 +0100 +++ new/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp 2015-03-18 13:56:56.237378222 +0100 @@ -31,18 +31,16 @@ #include "utilities/bitMap.inline.hpp" G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, - size_t actual_size, - size_t commit_granularity, + size_t used_size, + size_t page_size, size_t region_granularity, MemoryType type) : - _storage(), - _commit_granularity(commit_granularity), + _storage(rs, used_size, page_size), _region_granularity(region_granularity), _listener(NULL), _commit_map() { - guarantee(is_power_of_2(commit_granularity), "must be"); + guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); - _storage.initialize_with_granularity(rs, actual_size, commit_granularity); MemTracker::record_virtual_memory_type((address)rs.base(), type); } @@ -57,25 +55,25 @@ public: G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs, size_t actual_size, - size_t os_commit_granularity, + size_t page_size, size_t alloc_granularity, size_t commit_factor, MemoryType type) : - G1RegionToSpaceMapper(rs, actual_size, os_commit_granularity, alloc_granularity, type), - _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) { + G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type), + _pages_per_region(alloc_granularity / (page_size * commit_factor)) { - guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity"); + guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); } - virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { - bool zero_filled = _storage.commit(start_idx * _pages_per_region, num_regions * _pages_per_region); + virtual void commit_regions(uint start_idx, size_t num_regions) { + bool zero_filled = _storage.commit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region); _commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); } - virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { - _storage.uncommit(start_idx * _pages_per_region, num_regions * _pages_per_region); + virtual void uncommit_regions(uint start_idx, size_t num_regions) { + _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region); _commit_map.clear_range(start_idx, start_idx + num_regions); } }; @@ -101,22 +99,22 @@ public: G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, size_t actual_size, - size_t os_commit_granularity, + size_t page_size, size_t alloc_granularity, size_t commit_factor, MemoryType type) : - G1RegionToSpaceMapper(rs, actual_size, os_commit_granularity, alloc_granularity, type), - _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() { + G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, type), + _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() { - guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); - _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), os_commit_granularity)), os_commit_granularity); + guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); + _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), page_size)), page_size); _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); } - virtual void commit_regions(uintptr_t start_idx, size_t num_regions) { - for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { - assert(!_commit_map.at(i), err_msg("Trying to commit storage at region "INTPTR_FORMAT" that is already committed", i)); - uintptr_t idx = region_idx_to_page_idx(i); + virtual void commit_regions(uint start_idx, size_t num_regions) { + for (uint i = start_idx; i < start_idx + num_regions; i++) { + assert(!_commit_map.at(i), err_msg("Trying to commit storage at region %u that is already committed", i)); + size_t idx = region_idx_to_page_idx(i); uint old_refcount = _refcounts.get_by_index(idx); bool zero_filled = false; if (old_refcount == 0) { @@ -128,10 +126,10 @@ } } - virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions) { - for (uintptr_t i = start_idx; i < start_idx + num_regions; i++) { - assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region "INTPTR_FORMAT" that is not committed", i)); - uintptr_t idx = region_idx_to_page_idx(i); + virtual void uncommit_regions(uint start_idx, size_t num_regions) { + for (uint i = start_idx; i < start_idx + num_regions; i++) { + assert(_commit_map.at(i), err_msg("Trying to uncommit storage at region %u that is not committed", i)); + size_t idx = region_idx_to_page_idx(i); uint old_refcount = _refcounts.get_by_index(idx); assert(old_refcount > 0, "must be"); if (old_refcount == 1) { @@ -151,14 +149,14 @@ G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, size_t actual_size, - size_t os_commit_granularity, + size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type) { - if (region_granularity >= (os_commit_granularity * commit_factor)) { - return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, os_commit_granularity, region_granularity, commit_factor, type); + if (region_granularity >= (page_size * commit_factor)) { + return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); } else { - return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, os_commit_granularity, region_granularity, commit_factor, type); + return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); } } --- old/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp 2015-03-18 13:56:56.702392044 +0100 +++ new/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp 2015-03-18 13:56:56.635390053 +0100 @@ -46,12 +46,12 @@ protected: // Backing storage. G1PageBasedVirtualSpace _storage; - size_t _commit_granularity; + size_t _region_granularity; // Mapping management BitMap _commit_map; - G1RegionToSpaceMapper(ReservedSpace rs, size_t actual_size, size_t commit_granularity, size_t region_granularity, MemoryType type); + G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, MemoryType type); void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled); public: @@ -70,20 +70,20 @@ return _commit_map.at(idx); } - virtual void commit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; - virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; + virtual void commit_regions(uint start_idx, size_t num_regions = 1) = 0; + virtual void uncommit_regions(uint start_idx, size_t num_regions = 1) = 0; // Creates an appropriate G1RegionToSpaceMapper for the given parameters. // The actual space to be used within the given reservation is given by actual_size. // This is because some OSes need to round up the reservation size to guarantee - // alignment of os_commit_granularity. + // alignment of page_size. // The byte_translation_factor defines how many bytes in a region correspond to // a single byte in the data structure this mapper is for. // Eg. in the card table, this value corresponds to the size a single card // table entry corresponds to in the heap. static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs, size_t actual_size, - size_t os_commit_granularity, + size_t page_size, size_t region_granularity, size_t byte_translation_factor, MemoryType type); --- old/src/share/vm/runtime/virtualspace.cpp 2015-03-18 13:56:57.108404112 +0100 +++ new/src/share/vm/runtime/virtualspace.cpp 2015-03-18 13:56:57.040402091 +0100 @@ -37,13 +37,21 @@ _alignment(0), _special(false), _executable(false) { } -ReservedSpace::ReservedSpace(size_t size) { +ReservedSpace::ReservedSpace(size_t size, bool prefer_large_pages) { // Want to use large pages where possible and pad with small pages. size_t page_size = os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); - // Don't force the alignment to be large page aligned, - // since that will waste memory. - size_t alignment = os::vm_allocation_granularity(); + size_t alignment; + if (large_pages && prefer_large_pages) { + alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); + // ReservedSpace initialization requires size to be aligned to the given + // alignment. Align the size up. + size = align_size_up(size, alignment); + } else { + // Don't force the alignment to be large page aligned, + // since that will waste memory. + alignment = os::vm_allocation_granularity(); + } initialize(size, alignment, large_pages, NULL, false); } --- old/src/share/vm/runtime/virtualspace.hpp 2015-03-18 13:56:57.485415319 +0100 +++ new/src/share/vm/runtime/virtualspace.hpp 2015-03-18 13:56:57.419413357 +0100 @@ -51,7 +51,12 @@ public: // Constructor ReservedSpace(); - ReservedSpace(size_t size); + // Initialize the reserved space with the given size. If prefer_large_pages is + // set, if the given size warrants use of large pages, try to force them by + // passing an alignment restriction further down. This may waste some space + // if the given size is not aligned, as the reservation will be aligned up + // to large page alignment. + ReservedSpace(size_t size, bool prefer_large_pages = false); ReservedSpace(size_t size, size_t alignment, bool large, char* requested_address = NULL); ReservedSpace(size_t size, size_t alignment, bool large, bool executable);