--- old/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2015-02-03 11:17:52.043666200 +0100 +++ new/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2015-02-03 11:17:51.973664179 +0100 @@ -72,10 +72,10 @@ _commit_size = commit_size; assert(_committed.size() == 0, "virtual space initialized more than once"); - BitMap::idx_t size_in_pages = align_size_up(rs.size(), commit_size) / commit_size; - _committed.resize(size_in_pages, /* in_resource_area */ false); + BitMap::idx_t size_in_commit_pages = round_to(rs.size(), commit_size); + _committed.resize(size_in_commit_pages, /* in_resource_area */ false); if (_special) { - _dirty.resize(size_in_pages, /* in_resource_area */ false); + _dirty.resize(size_in_commit_pages, /* in_resource_area */ false); } return true; @@ -140,7 +140,7 @@ return _high_boundary; } -void G1PageBasedVirtualSpace::commit_int(char* start, char* end) { +void G1PageBasedVirtualSpace::commit_internal(char* start, char* end) { guarantee(start >= _low_boundary && start < _high_boundary, err_msg("Start address " PTR_FORMAT" is outside of reserved space.", p2i(start))); guarantee(is_ptr_aligned(start, _commit_size), @@ -187,7 +187,7 @@ _dirty.clear_range(start, end); } } else { - commit_int(page_start(start), page_end(end - 1)); + commit_internal(page_start(start), page_end(end - 1)); } _committed.set_range(start, end); @@ -197,7 +197,7 @@ return zero_filled; } -void G1PageBasedVirtualSpace::uncommit_int(char* start, char* end) { +void G1PageBasedVirtualSpace::uncommit_internal(char* start, char* end) { os::uncommit_memory(start, pointer_delta(end, start, sizeof(char))); } @@ -210,7 +210,7 @@ // need to be cleared explicitly. _dirty.set_range(start, end); } else { - uncommit_int(page_start(start), page_end(end - 1)); + uncommit_internal(page_start(start), page_end(end - 1)); } _committed.clear_range(start, end); --- old/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp 2015-02-03 11:17:52.425677231 +0100 +++ new/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp 2015-02-03 11:17:52.355675209 +0100 @@ -34,11 +34,12 @@ // granularity. // (De-)Allocation requests are always OS page aligned by passing a page index // and multiples of pages. -// We only support reservations which base address is aligned to a given commit -// size. The length of the area managed need not commit size aligned (but OS default -// page size aligned) because some OSes cannot provide a an os_commit_size aligned -// reservation without also being size-aligned. Any tail area is committed using OS -// small pages. +// For systems that only commits of memory in a given size (always greater than +// page size) the base address is required to be aligned to that commit size. +// The actual size requested need not be aligned to the commit size, but the size +// of the reservation passed may be rounded up to the commit size. Any fragment +// (less than the commit size) of the actual size at the tail of the request will +// be committed using OS small pages. // The implementation gives an error when trying to commit or uncommit pages that // have already been committed or uncommitted. class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC { @@ -70,15 +71,16 @@ // Commit the given memory range by using _commit_size pages as much as possible // and the remainder with small sized pages. The start address must be _commit_size // aligned. - void commit_int(char* start, char* end); + void commit_internal(char* start, char* end); // Uncommit the given memory range. - void uncommit_int(char* start, char* end); + void uncommit_internal(char* start, char* end); // Returns the index of the page which contains the given address. uintptr_t addr_to_page_index(char* addr) const; // Returns the address of the given page index. char* page_start(uintptr_t index); - // Returns the address of the given page index ranging from 0..size_in_pages-1. + // Returns the address of the end of the page given the page index ranging + // from 0..size_in_pages-2. For the last page, return _high_boundary. char* page_end(uintptr_t index); // Returns true if the entire area is backed by committed memory.