< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page
rev 52439 : [mq]: webrev.2_reserved_page_size

*** 34,47 **** // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), ! _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) { } ! ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); size_t alignment; --- 34,47 ---- // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), ! _alignment(0), _special(false), _fd_for_heap(-1), _actual_page_size(0), _executable(false) { } ! ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _actual_page_size(0) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); size_t alignment;
*** 58,79 **** initialize(size, alignment, large_pages, NULL, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! char* requested_address) : _fd_for_heap(-1) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! bool executable) : _fd_for_heap(-1) { initialize(size, alignment, large, NULL, executable); } ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, ! bool special, bool executable) : _fd_for_heap(-1) { assert((size % os::vm_allocation_granularity()) == 0, "size not allocation aligned"); _base = base; _size = size; _alignment = alignment; --- 58,79 ---- initialize(size, alignment, large_pages, NULL, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! char* requested_address) : _fd_for_heap(-1), _actual_page_size(0) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! bool executable) : _fd_for_heap(-1), _actual_page_size(0) { initialize(size, alignment, large, NULL, executable); } ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, ! bool special, bool executable) : _fd_for_heap(-1), _actual_page_size(0) { assert((size % os::vm_allocation_granularity()) == 0, "size not allocation aligned"); _base = base; _size = size; _alignment = alignment;
*** 115,124 **** --- 115,137 ---- } } return true; } + // Should be called after _special is decided. + void ReservedSpace::update_actual_page_size(bool large_page) { + // There are two ways to manage large page memory in ReservedSpace. + // 1. OS supports committing large page memory. + // 2. OS doesn't support committing large page memory so ReservedSpace manages it specially. + // When succeeded reserving it, '_special' will be set. + if (large_page && (os::can_commit_large_page_memory() || _special)) { + _actual_page_size = os::large_page_size(); + } else { + _actual_page_size = os::vm_page_size(); + } + } + void ReservedSpace::initialize(size_t size, size_t alignment, bool large, char* requested_address, bool executable) { const size_t granularity = os::vm_allocation_granularity(); assert((size & (granularity - 1)) == 0,
*** 226,235 **** --- 239,250 ---- _alignment = alignment; // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true if (_fd_for_heap != -1) { _special = true; } + + update_actual_page_size(large); } ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, bool split, bool realloc) { assert(partition_size <= size(), "partition failed");
*** 288,297 **** --- 303,313 ---- _size = 0; _noaccess_prefix = 0; _alignment = 0; _special = false; _executable = false; + _actual_page_size = 0; } } static size_t noaccess_prefix_size(size_t alignment) { return lcm(os::vm_page_size(), alignment);
*** 397,411 **** // Done _base = base; _size = size; _alignment = alignment; - // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true if (_fd_for_heap != -1) { _special = true; } // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry. release(); --- 413,427 ---- // Done _base = base; _size = size; _alignment = alignment; // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true if (_fd_for_heap != -1) { _special = true; } + update_actual_page_size(large); // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry. release();
< prev index next >