# HG changeset patch # User ehelin # Date 1418647178 -3600 # Mon Dec 15 13:39:38 2014 +0100 # Node ID 26f94ec6d73801ea7ed25d8be7e8379da2112cce # Parent c73efd13c4e45a95a76b43c452dbe0c5b8755629 8066875: VirtualSpace does not use large pages (01) diff --git a/src/share/vm/code/codeCache.cpp b/src/share/vm/code/codeCache.cpp --- a/src/share/vm/code/codeCache.cpp +++ b/src/share/vm/code/codeCache.cpp @@ -228,18 +228,18 @@ void CodeCache::initialize_heaps() { add_heap(profiled_space, "CodeHeap 'profiled nmethods'", init_profiled_size, CodeBlobType::MethodProfiled); // Tier 1 and tier 4 (non-profiled) methods and native methods add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", init_non_profiled_size, CodeBlobType::MethodNonProfiled); } ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { // Determine alignment const size_t page_size = os::can_execute_large_page_memory() ? - MIN2(os::page_size_for_region(InitialCodeCacheSize, 8), - os::page_size_for_region(size, 8)) : + MIN2(os::page_size_for_region_aligned(InitialCodeCacheSize, 8), + os::page_size_for_region_aligned(size, 8)) : os::vm_page_size(); const size_t granularity = os::vm_allocation_granularity(); const size_t r_align = MAX2(page_size, granularity); const size_t r_size = align_size_up(size, r_align); const size_t rs_align = page_size == (size_t) os::vm_page_size() ? 0 : MAX2(page_size, granularity); ReservedCodeSpace rs(r_size, rs_align, rs_align > 0); diff --git a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.cpp @@ -56,19 +56,19 @@ void GenerationSizer::initialize_flags() if (InitialSurvivorRatio < 3) { FLAG_SET_ERGO(uintx, InitialSurvivorRatio, 3); } } void GenerationSizer::initialize_size_info() { trace_gen_sizes("ps heap raw"); - const size_t max_page_sz = os::page_size_for_region(_max_heap_byte_size, 8); + const size_t max_page_sz = os::page_size_for_region_aligned(_max_heap_byte_size, 8); const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old - const size_t min_page_sz = os::page_size_for_region(_min_heap_byte_size, min_pages); + const size_t min_page_sz = os::page_size_for_region_aligned(_min_heap_byte_size, min_pages); const size_t page_sz = MIN2(max_page_sz, min_page_sz); // Can a page size be something else than a power of two? assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2"); size_t new_alignment = round_to(page_sz, _gen_alignment); if (new_alignment != _gen_alignment) { _gen_alignment = new_alignment; _space_alignment = new_alignment; diff --git a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.cpp @@ -36,17 +36,17 @@ ParMarkBitMap::initialize(MemRegion cove { const idx_t bits = bits_required(covered_region); // The bits will be divided evenly between two bitmaps; each of them should be // an integral number of words. assert(bits % (BitsPerWord * 2) == 0, "region size unaligned"); const size_t words = bits / BitsPerWord; const size_t raw_bytes = words * sizeof(idx_t); - const size_t page_sz = os::page_size_for_region(raw_bytes, 10); + const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); const size_t granularity = os::vm_allocation_granularity(); _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity)); const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0); os::trace_page_sizes("par bitmap", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -398,17 +398,17 @@ bool ParallelCompactData::initialize(Mem bool result = initialize_region_data(region_size) && initialize_block_data(); return result; } PSVirtualSpace* ParallelCompactData::create_vspace(size_t count, size_t element_size) { const size_t raw_bytes = count * element_size; - const size_t page_sz = os::page_size_for_region(raw_bytes, 10); + const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); const size_t granularity = os::vm_allocation_granularity(); _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity)); const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : MAX2(page_sz, granularity); ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0); os::trace_page_sizes("par compact", raw_bytes, raw_bytes, page_sz, rs.base(), rs.size()); diff --git a/src/share/vm/memory/heap.cpp b/src/share/vm/memory/heap.cpp --- a/src/share/vm/memory/heap.cpp +++ b/src/share/vm/memory/heap.cpp @@ -99,18 +99,18 @@ bool CodeHeap::reserve(ReservedSpace rs, _segment_size = segment_size; _log2_segment_size = exact_log2(segment_size); // Reserve and initialize space for _memory. size_t page_size = os::vm_page_size(); if (os::can_execute_large_page_memory()) { const size_t min_pages = 8; - page_size = MIN2(os::page_size_for_region(committed_size, min_pages), - os::page_size_for_region(rs.size(), min_pages)); + page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages), + os::page_size_for_region_aligned(rs.size(), min_pages)); } const size_t granularity = os::vm_allocation_granularity(); const size_t c_size = align_size_up(committed_size, page_size); os::trace_page_sizes(_name, committed_size, rs.size(), page_size, rs.base(), rs.size()); if (!_memory.initialize(rs, c_size)) { diff --git a/src/share/vm/runtime/os.cpp b/src/share/vm/runtime/os.cpp --- a/src/share/vm/runtime/os.cpp +++ b/src/share/vm/runtime/os.cpp @@ -1394,42 +1394,43 @@ bool os::stack_shadow_pages_available(Th Interpreter::size_top_interpreter_activation(method()) * wordSize; int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages) * vm_page_size()) + framesize_in_bytes; // The very lower end of the stack address stack_limit = thread->stack_base() - thread->stack_size(); return (sp > (stack_limit + reserved_area)); } -size_t os::page_size_for_region(size_t region_size, size_t min_pages) { +size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) { assert(min_pages > 0, "sanity"); if (UseLargePages) { const size_t max_page_size = region_size / min_pages; for (size_t i = 0; _page_sizes[i] != 0; ++i) { const size_t page_size = _page_sizes[i]; - if (page_size <= max_page_size && is_size_aligned(region_size, page_size)) { - return page_size; + if (page_size <= max_page_size) { + if (!must_be_aligned) { + return page_size; + } + if (is_size_aligned(region_size, page_size)) { + return page_size; + } } } } return vm_page_size(); } -size_t os::largest_page_size_less_than(size_t sz) { - if (UseLargePages) { - // The page sizes are sorted descendingly. - for (size_t i = 0; _page_sizes[i] != 0; ++i) { - if (_page_sizes[i] <= sz) { - return _page_sizes[i]; - } - } - } - return vm_page_size(); +size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) { + return page_size_for_region(region_size, min_pages, true); +} + +size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) { + return page_size_for_region(region_size, min_pages, false); } #ifndef PRODUCT void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) { if (TracePageSizes) { tty->print("%s: ", str); for (int i = 0; i < count; ++i) { @@ -1665,86 +1666,86 @@ os::SuspendResume::State os::SuspendResu class TestOS : AllStatic { static size_t small_page_size() { return os::vm_page_size(); } static size_t large_page_size() { const size_t large_page_size_example = 4 * M; - return os::page_size_for_region(large_page_size_example, 1); + return os::page_size_for_region_aligned(large_page_size_example, 1); } - static void test_page_size_for_region() { + static void test_page_size_for_region_aligned() { if (UseLargePages) { const size_t small_page = small_page_size(); const size_t large_page = large_page_size(); if (large_page > small_page) { size_t num_small_pages_in_large = large_page / small_page; - size_t page = os::page_size_for_region(large_page, num_small_pages_in_large); + size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large); assert_eq(page, small_page); } } } static void test_page_size_for_region_alignment() { if (UseLargePages) { const size_t small_page = small_page_size(); const size_t large_page = large_page_size(); if (large_page > small_page) { const size_t unaligned_region = large_page + 17; - size_t page = os::page_size_for_region(unaligned_region, 1); + size_t page = os::page_size_for_region_aligned(unaligned_region, 1); assert_eq(page, small_page); const size_t num_pages = 5; const size_t aligned_region = large_page * num_pages; - page = os::page_size_for_region(aligned_region, num_pages); + page = os::page_size_for_region_aligned(aligned_region, num_pages); assert_eq(page, large_page); } } } - static void test_largest_page_size_less_than() { + static void test_page_size_for_region_unaligned() { if (UseLargePages) { // Given exact page size, should return that page size for (size_t i = 0; os::_page_sizes[i] != 0; i++) { size_t expected = os::_page_sizes[i]; - size_t actual = os::largest_page_size_less_than(expected); + size_t actual = os::page_size_for_region_unaligned(expected, 1); assert_eq(expected, actual); } // Given slightly larger size than a page size, return the page size for (size_t i = 0; os::_page_sizes[i] != 0; i++) { size_t expected = os::_page_sizes[i]; - size_t actual = os::largest_page_size_less_than(expected + 17); + size_t actual = os::page_size_for_region_unaligned(expected + 17, 1); assert_eq(expected, actual); } // Given a slightly smaller size than a page size, // return the next smaller page size if (os::_page_sizes[1] > os::_page_sizes[0]) { size_t expected = os::_page_sizes[0]; - size_t actual = os::largest_page_size_less_than(os::_page_sizes[1] - 17); + size_t actual = os::page_size_for_region_unaligned(os::_page_sizes[1] - 17, 1); assert_eq(actual, expected); } // Return small page size for values less than a small page size_t small_page = small_page_size(); - size_t actual = os::largest_page_size_less_than(small_page - 17); + size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1); assert_eq(small_page, actual); } } public: static void run_tests() { - test_page_size_for_region(); + test_page_size_for_region_aligned(); test_page_size_for_region_alignment(); - test_largest_page_size_less_than(); + test_page_size_for_region_unaligned(); } }; void TestOS_test() { TestOS::run_tests(); } #endif // PRODUCT diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp --- a/src/share/vm/runtime/os.hpp +++ b/src/share/vm/runtime/os.hpp @@ -143,16 +143,17 @@ class os: AllStatic { bool allow_exec = false); static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, bool allow_exec); static bool pd_unmap_memory(char *addr, size_t bytes); static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); + static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned); public: static void init(void); // Called before command line parsing static void init_before_ergo(void); // Called after command line parsing // before VM ergonomics processing. static jint init_2(void); // Called after command line parsing // and VM ergonomics processing static void init_globals(void) { // Called from init_globals() in init.cpp @@ -262,28 +263,30 @@ class os: AllStatic { // OS interface to Virtual Memory // Return the default page size. static int vm_page_size(); // Returns the page size to use for a region of memory. // region_size / min_pages will always be greater than or equal to the - // returned value. - static size_t page_size_for_region(size_t region_size, size_t min_pages); + // returned value. The returned value will divide region_size. + static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages); + + // Returns the page size to use for a region of memory. + // region_size / min_pages will always be greater than or equal to the + // returned value. The returned value might not divide region_size. + static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages); // Return the largest page size that can be used static size_t max_page_size() { // The _page_sizes array is sorted in descending order. return _page_sizes[0]; } - // Returns the largest page size smaller than the argument. - static size_t largest_page_size_less_than(size_t sz); - // Methods for tracing page sizes returned by the above method; enabled by // TracePageSizes. The region_{min,max}_size parameters should be the values // passed to page_size_for_region() and page_size should be the result of that // call. The (optional) base and size parameters should come from the // ReservedSpace base() and size() methods. static void trace_page_sizes(const char* str, const size_t* page_sizes, int count) PRODUCT_RETURN; static void trace_page_sizes(const char* str, const size_t region_min_size, diff --git a/src/share/vm/runtime/virtualspace.cpp b/src/share/vm/runtime/virtualspace.cpp --- a/src/share/vm/runtime/virtualspace.cpp +++ b/src/share/vm/runtime/virtualspace.cpp @@ -33,17 +33,17 @@ PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), _alignment(0), _special(false), _executable(false) { } ReservedSpace::ReservedSpace(size_t size) { - size_t page_size = os::page_size_for_region(size, 1); + size_t page_size = os::page_size_for_region_aligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); // Don't force the alignment to be large page aligned, // since that will waste memory. size_t alignment = os::vm_allocation_granularity(); initialize(size, alignment, large_pages, NULL, 0, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, @@ -352,17 +352,17 @@ VirtualSpace::VirtualSpace() { _middle_alignment = 0; _upper_alignment = 0; _special = false; _executable = false; } bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) { - const size_t max_commit_granularity = os::largest_page_size_less_than(rs.size()); + const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1); return initialize_with_granularity(rs, committed_size, max_commit_granularity); } bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) { if(!rs.is_reserved()) return false; // allocation failed. assert(_low_boundary == NULL, "VirtualSpace already initialized"); assert(max_commit_granularity > 0, "Granularity must be non-zero."); @@ -987,17 +987,17 @@ class TestVirtualSpace : AllStatic { switch(mode) { default: case Default: case Reserve: return vs.initialize(rs, 0); case Disable: return vs.initialize_with_granularity(rs, 0, os::vm_page_size()); case Commit: - return vs.initialize_with_granularity(rs, 0, os::largest_page_size_less_than(rs.size())); + return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1)); } } public: static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, TestLargePages mode = Default) { size_t granularity = os::vm_allocation_granularity(); size_t reserve_size_aligned = align_size_up(reserve_size, granularity);