--- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2015-02-03 11:18:15.200334867 +0100 +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2015-02-03 11:18:15.126332730 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,7 +115,7 @@ } size_t CMBitMap::compute_size(size_t heap_size) { - return heap_size / mark_distance(); + return ReservedSpace::allocation_align_size_up(heap_size / mark_distance()); } size_t CMBitMap::mark_distance() { --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-02-03 11:18:15.621347024 +0100 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-02-03 11:18:15.544344801 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1832,6 +1832,33 @@ guarantee(_task_queues != NULL, "task_queues allocation failure."); } +G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description, + size_t size, + size_t translation_factor) { + // Determine the preferred page size for the auxiliary data structures. We always + // prefer large pages if the given size allows it for performance reasons. + size_t const commit_size = os::page_size_for_region_unaligned(size, 1); + // The base address reserved space must be aligned to that page. Otherwise we + // would need to split pages (or it would be completely impossible) when + // uncommitting memory within the heap. + // Size need *not* be aligned to above calculated commit size. + size_t const alignment = MAX2(commit_size, (size_t)os::vm_allocation_granularity()); + bool const use_large_pages = commit_size != (size_t)os::vm_page_size() ? UseLargePages : false; + ReservedSpace rs(align_size_up(size, alignment), alignment, use_large_pages); + G1RegionToSpaceMapper* result = + G1RegionToSpaceMapper::create_mapper(rs, + size, + commit_size, + HeapRegion::GrainBytes, + translation_factor, + mtGC); + if (TracePageSizes) { + gclog_or_tty->print_cr("G1 '%s': pg_sz=" SIZE_FORMAT" base=" PTR_FORMAT" size=" SIZE_FORMAT" alignment=" SIZE_FORMAT" reqsize=" SIZE_FORMAT, + description, commit_size, p2i(rs.base()), rs.size(), rs.alignment(), size); + } + return result; +} + jint G1CollectedHeap::initialize() { CollectedHeap::pre_initialize(); os::enable_vtime(); @@ -1899,57 +1926,35 @@ ReservedSpace g1_rs = heap_rs.first_part(max_byte_size); G1RegionToSpaceMapper* heap_storage = G1RegionToSpaceMapper::create_mapper(g1_rs, + g1_rs.size(), UseLargePages ? os::large_page_size() : os::vm_page_size(), HeapRegion::GrainBytes, 1, mtJavaHeap); heap_storage->set_mapping_changed_listener(&_listener); - // Reserve space for the block offset table. We do not support automatic uncommit - // for the card table at this time. BOT only. - ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); + // Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps. G1RegionToSpaceMapper* bot_storage = - G1RegionToSpaceMapper::create_mapper(bot_rs, - os::vm_page_size(), - HeapRegion::GrainBytes, - G1BlockOffsetSharedArray::N_bytes, - mtGC); + create_aux_memory_mapper("Block offset table", + G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize), + G1BlockOffsetSharedArray::N_bytes); ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize)); G1RegionToSpaceMapper* cardtable_storage = - G1RegionToSpaceMapper::create_mapper(cardtable_rs, - os::vm_page_size(), - HeapRegion::GrainBytes, - G1BlockOffsetSharedArray::N_bytes, - mtGC); + create_aux_memory_mapper("Card table", + G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize), + G1BlockOffsetSharedArray::N_bytes); - // Reserve space for the card counts table. - ReservedSpace card_counts_rs(G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize)); G1RegionToSpaceMapper* card_counts_storage = - G1RegionToSpaceMapper::create_mapper(card_counts_rs, - os::vm_page_size(), - HeapRegion::GrainBytes, - G1BlockOffsetSharedArray::N_bytes, - mtGC); + create_aux_memory_mapper("Card counts table", + G1BlockOffsetSharedArray::compute_size(g1_rs.size() / HeapWordSize), + G1BlockOffsetSharedArray::N_bytes); - // Reserve space for prev and next bitmap. size_t bitmap_size = CMBitMap::compute_size(g1_rs.size()); - - ReservedSpace prev_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); G1RegionToSpaceMapper* prev_bitmap_storage = - G1RegionToSpaceMapper::create_mapper(prev_bitmap_rs, - os::vm_page_size(), - HeapRegion::GrainBytes, - CMBitMap::mark_distance(), - mtGC); - - ReservedSpace next_bitmap_rs(ReservedSpace::allocation_align_size_up(bitmap_size)); + create_aux_memory_mapper("Prev Bitmap", bitmap_size, CMBitMap::mark_distance()); G1RegionToSpaceMapper* next_bitmap_storage = - G1RegionToSpaceMapper::create_mapper(next_bitmap_rs, - os::vm_page_size(), - HeapRegion::GrainBytes, - CMBitMap::mark_distance(), - mtGC); + create_aux_memory_mapper("Next Bitmap", bitmap_size, CMBitMap::mark_distance()); _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); g1_barrier_set()->initialize(cardtable_storage); --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-02-03 11:18:16.073360076 +0100 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp 2015-02-03 11:18:16.002358026 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -349,6 +349,12 @@ // heap after a compaction. void print_hrm_post_compaction(); + // Create a memory mapper for auxiliary data structures of the given size and + // translation factor. + static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description, + size_t size, + size_t translation_factor); + double verify(bool guard, const char* msg); void verify_before_gc(); void verify_after_gc(); --- old/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2015-02-03 11:18:16.465371396 +0100 +++ new/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp 2015-02-03 11:18:16.397369432 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,36 +45,42 @@ #include "utilities/bitMap.inline.hpp" G1PageBasedVirtualSpace::G1PageBasedVirtualSpace() : _low_boundary(NULL), - _high_boundary(NULL), _committed(), _page_size(0), _special(false), + _high_boundary(NULL), _committed(), _commit_size(0), _special(false), _dirty(), _executable(false) { } -bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t page_size) { +bool G1PageBasedVirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t actual_size, size_t commit_size) { if (!rs.is_reserved()) { return false; // Allocation failed. } assert(_low_boundary == NULL, "VirtualSpace already initialized"); - assert(page_size > 0, "Granularity must be non-zero."); + assert(commit_size > 0, "Granularity must be non-zero."); + + guarantee(is_ptr_aligned(rs.base(), commit_size), + err_msg("Reserved space base " PTR_FORMAT" is not aligned to requested page size " SIZE_FORMAT, p2i(rs.base()), commit_size)); + guarantee(is_size_aligned(actual_size, os::vm_page_size()), + err_msg("Given actual reserved space size needs to be OS page size aligned (%d bytes) but is " SIZE_FORMAT, os::vm_page_size(), actual_size)); + guarantee(actual_size <= rs.size(), + err_msg("Actual size of reserved space " SIZE_FORMAT" bytes is smaller than reservation at " SIZE_FORMAT" bytes", actual_size, rs.size())); _low_boundary = rs.base(); - _high_boundary = _low_boundary + rs.size(); + _high_boundary = _low_boundary + actual_size; _special = rs.special(); _executable = rs.executable(); - _page_size = page_size; + _commit_size = commit_size; assert(_committed.size() == 0, "virtual space initialized more than once"); - uintx size_in_bits = rs.size() / page_size; - _committed.resize(size_in_bits, /* in_resource_area */ false); + BitMap::idx_t size_in_commit_pages = round_to(rs.size(), commit_size); + _committed.resize(size_in_commit_pages, /* in_resource_area */ false); if (_special) { - _dirty.resize(size_in_bits, /* in_resource_area */ false); + _dirty.resize(size_in_commit_pages, /* in_resource_area */ false); } return true; } - G1PageBasedVirtualSpace::~G1PageBasedVirtualSpace() { release(); } @@ -86,13 +92,18 @@ _high_boundary = NULL; _special = false; _executable = false; - _page_size = 0; + _commit_size = 0; _committed.resize(0, false); _dirty.resize(0, false); } size_t G1PageBasedVirtualSpace::committed_size() const { - return _committed.count_one_bits() * _page_size; + size_t result = _committed.count_one_bits() * _commit_size; + // The last page might not be in full. + if (_committed.at(_committed.size()-1)) { + result -= pointer_delta((char*)align_ptr_up(_high_boundary, _commit_size), _high_boundary, sizeof(char)); + } + return result; } size_t G1PageBasedVirtualSpace::reserved_size() const { @@ -104,7 +115,7 @@ } uintptr_t G1PageBasedVirtualSpace::addr_to_page_index(char* addr) const { - return (addr - _low_boundary) / _page_size; + return (addr - _low_boundary) / _commit_size; } bool G1PageBasedVirtualSpace::is_area_committed(uintptr_t start, size_t size_in_pages) const { @@ -118,11 +129,48 @@ } char* G1PageBasedVirtualSpace::page_start(uintptr_t index) { - return _low_boundary + index * _page_size; + return _low_boundary + index * _commit_size; } -size_t G1PageBasedVirtualSpace::byte_size_for_pages(size_t num) { - return num * _page_size; +char* G1PageBasedVirtualSpace::page_end(uintptr_t index) { + guarantee(index < _committed.size(), "invariant"); + if (index != (_committed.size() - 1)) { + return page_start(index + 1); + } + return _high_boundary; +} + +void G1PageBasedVirtualSpace::commit_internal(char* start, char* end) { + guarantee(start >= _low_boundary && start < _high_boundary, + err_msg("Start address " PTR_FORMAT" is outside of reserved space.", p2i(start))); + guarantee(is_ptr_aligned(start, _commit_size), + err_msg("Start address should be aligned to commit size " SIZE_FORMAT" but got " PTR_FORMAT".", + _commit_size, p2i(start))); + + guarantee(end >= _low_boundary && end <= _high_boundary, + err_msg("End address " PTR_FORMAT" is outside of reserved space.", p2i(end))); + bool is_high_aligned_to_commit_size = is_ptr_aligned(_high_boundary, _commit_size); + guarantee(is_ptr_aligned(end, is_high_aligned_to_commit_size ? _commit_size : os::vm_page_size()), + err_msg("End address should be aligned to page size " SIZE_FORMAT" but got " PTR_FORMAT".", + is_high_aligned_to_commit_size ? _commit_size : os::vm_page_size(), + p2i(end))); + // First try to commit in commit_size chunks. + char* const aligned_end_address = (char*)align_ptr_down(end, _commit_size); + size_t const size = pointer_delta(aligned_end_address, start, sizeof(char)); + if (size != 0) { + os::commit_memory_or_exit(start, size, _commit_size, _executable, + err_msg("Failed to commit area from " PTR_FORMAT" to " PTR_FORMAT" of length " SIZE_FORMAT".", + p2i(start), p2i(aligned_end_address), size)); + } + // Finally, commit any remaining tail. + if (end != aligned_end_address) { + size_t const tail_size = pointer_delta(end, aligned_end_address, sizeof(char)); + guarantee(tail_size < _commit_size, + err_msg("Remaining size " SIZE_FORMAT "must be smaller than commit size of " SIZE_FORMAT, tail_size, _commit_size)); + os::commit_memory_or_exit(start, tail_size, _executable, + err_msg("Failed to commit remainder pages from " PTR_FORMAT" to " PTR_FORMAT" of length "SIZE_FORMAT".", + p2i(aligned_end_address), p2i(end), tail_size)); + } } bool G1PageBasedVirtualSpace::commit(uintptr_t start, size_t size_in_pages) { @@ -139,29 +187,33 @@ _dirty.clear_range(start, end); } } else { - os::commit_memory_or_exit(page_start(start), byte_size_for_pages(size_in_pages), _executable, - err_msg("Failed to commit pages from "SIZE_FORMAT" of length "SIZE_FORMAT, start, size_in_pages)); + commit_internal(page_start(start), page_end(end - 1)); } _committed.set_range(start, end); if (AlwaysPreTouch) { - os::pretouch_memory(page_start(start), page_start(end)); + os::pretouch_memory(page_start(start), page_end(end - 1)); } return zero_filled; } +void G1PageBasedVirtualSpace::uncommit_internal(char* start, char* end) { + os::uncommit_memory(start, pointer_delta(end, start, sizeof(char))); +} + void G1PageBasedVirtualSpace::uncommit(uintptr_t start, size_t size_in_pages) { guarantee(is_area_committed(start, size_in_pages), "checking"); + uintptr_t end = start + size_in_pages; if (_special) { // Mark that memory is dirty. If committed again the memory might // need to be cleared explicitly. - _dirty.set_range(start, start + size_in_pages); + _dirty.set_range(start, end); } else { - os::uncommit_memory(page_start(start), byte_size_for_pages(size_in_pages)); + uncommit_internal(page_start(start), page_end(end - 1)); } - _committed.clear_range(start, start + size_in_pages); + _committed.clear_range(start, end); } bool G1PageBasedVirtualSpace::contains(const void* p) const { @@ -175,7 +227,7 @@ out->cr(); out->print_cr(" - committed: " SIZE_FORMAT, committed_size()); out->print_cr(" - reserved: " SIZE_FORMAT, reserved_size()); - out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]", p2i(_low_boundary), p2i(_high_boundary)); + out->print_cr(" - [low_b, high_b]: [" PTR_FORMAT", " PTR_FORMAT"]", p2i(_low_boundary), p2i(_high_boundary)); } void G1PageBasedVirtualSpace::print() { --- old/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp 2015-02-03 11:18:16.849382485 +0100 +++ new/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.hpp 2015-02-03 11:18:16.780380492 +0100 @@ -34,6 +34,12 @@ // granularity. // (De-)Allocation requests are always OS page aligned by passing a page index // and multiples of pages. +// For systems that only commits of memory in a given size (always greater than +// page size) the base address is required to be aligned to that commit size. +// The actual size requested need not be aligned to the commit size, but the size +// of the reservation passed may be rounded up to the commit size. Any fragment +// (less than the commit size) of the actual size at the tail of the request will +// be committed using OS small pages. // The implementation gives an error when trying to commit or uncommit pages that // have already been committed or uncommitted. class G1PageBasedVirtualSpace VALUE_OBJ_CLASS_SPEC { @@ -43,8 +49,8 @@ char* _low_boundary; char* _high_boundary; - // The commit/uncommit granularity in bytes. - size_t _page_size; + // The preferred commit/uncommit granularity in bytes. + size_t _commit_size; // Bitmap used for verification of commit/uncommit operations. BitMap _committed; @@ -62,12 +68,20 @@ // Indicates whether the committed space should be executable. bool _executable; + // Commit the given memory range by using _commit_size pages as much as possible + // and the remainder with small sized pages. The start address must be _commit_size + // aligned. + void commit_internal(char* start, char* end); + // Uncommit the given memory range. + void uncommit_internal(char* start, char* end); + // Returns the index of the page which contains the given address. uintptr_t addr_to_page_index(char* addr) const; // Returns the address of the given page index. char* page_start(uintptr_t index); - // Returns the byte size of the given number of pages. - size_t byte_size_for_pages(size_t num); + // Returns the address of the end of the page given the page index ranging + // from 0..size_in_pages-2. For the last page, return _high_boundary. + char* page_end(uintptr_t index); // Returns true if the entire area is backed by committed memory. bool is_area_committed(uintptr_t start, size_t size_in_pages) const; @@ -85,7 +99,9 @@ // Initialization G1PageBasedVirtualSpace(); - bool initialize_with_granularity(ReservedSpace rs, size_t page_size); + // Initialize the given reserved space with the given base address and actual size. + // Prefer to commit in commit_size chunks. + bool initialize_with_granularity(ReservedSpace rs, size_t actual_size, size_t commit_size); // Destruction ~G1PageBasedVirtualSpace(); --- old/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp 2015-02-03 11:18:17.230393487 +0100 +++ new/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.cpp 2015-02-03 11:18:17.161391494 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ #include "utilities/bitMap.inline.hpp" G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, + size_t actual_size, size_t commit_granularity, size_t region_granularity, MemoryType type) : @@ -41,7 +42,7 @@ _commit_map() { guarantee(is_power_of_2(commit_granularity), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); - _storage.initialize_with_granularity(rs, commit_granularity); + _storage.initialize_with_granularity(rs, actual_size, commit_granularity); MemTracker::record_virtual_memory_type((address)rs.base(), type); } @@ -55,11 +56,12 @@ public: G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs, + size_t actual_size, size_t os_commit_granularity, size_t alloc_granularity, size_t commit_factor, MemoryType type) : - G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), + G1RegionToSpaceMapper(rs, actual_size, os_commit_granularity, alloc_granularity, type), _pages_per_region(alloc_granularity / (os_commit_granularity * commit_factor)) { guarantee(alloc_granularity >= os_commit_granularity, "allocation granularity smaller than commit granularity"); @@ -98,15 +100,16 @@ public: G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, + size_t actual_size, size_t os_commit_granularity, size_t alloc_granularity, size_t commit_factor, MemoryType type) : - G1RegionToSpaceMapper(rs, os_commit_granularity, alloc_granularity, type), + G1RegionToSpaceMapper(rs, actual_size, os_commit_granularity, alloc_granularity, type), _regions_per_page((os_commit_granularity * commit_factor) / alloc_granularity), _refcounts() { guarantee((os_commit_granularity * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); - _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + rs.size()), os_commit_granularity); + _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_size_up(rs.size(), os_commit_granularity)), os_commit_granularity); _commit_map.resize(rs.size() * commit_factor / alloc_granularity, /* in_resource_area */ false); } @@ -147,14 +150,15 @@ } G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, + size_t actual_size, size_t os_commit_granularity, size_t region_granularity, size_t commit_factor, MemoryType type) { if (region_granularity >= (os_commit_granularity * commit_factor)) { - return new G1RegionsLargerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); + return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, os_commit_granularity, region_granularity, commit_factor, type); } else { - return new G1RegionsSmallerThanCommitSizeMapper(rs, os_commit_granularity, region_granularity, commit_factor, type); + return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, os_commit_granularity, region_granularity, commit_factor, type); } } --- old/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp 2015-02-03 11:18:17.617404662 +0100 +++ new/src/share/vm/gc_implementation/g1/g1RegionToSpaceMapper.hpp 2015-02-03 11:18:17.545402583 +0100 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2015 Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,7 +51,7 @@ // Mapping management BitMap _commit_map; - G1RegionToSpaceMapper(ReservedSpace rs, size_t commit_granularity, size_t region_granularity, MemoryType type); + G1RegionToSpaceMapper(ReservedSpace rs, size_t actual_size, size_t commit_granularity, size_t region_granularity, MemoryType type); void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled); public: @@ -71,11 +71,15 @@ virtual void uncommit_regions(uintptr_t start_idx, size_t num_regions = 1) = 0; // Creates an appropriate G1RegionToSpaceMapper for the given parameters. + // The actual space to be used within the given reservation is given by actual_size. + // This is because some OSes need to round up the reservation size to guarantee + // alignment of os_commit_granularity. // The byte_translation_factor defines how many bytes in a region correspond to // a single byte in the data structure this mapper is for. // Eg. in the card table, this value corresponds to the size a single card - // table entry corresponds to. + // table entry corresponds to in the heap. static G1RegionToSpaceMapper* create_mapper(ReservedSpace rs, + size_t actual_size, size_t os_commit_granularity, size_t region_granularity, size_t byte_translation_factor, --- old/src/share/vm/gc_implementation/g1/heapRegionSet.cpp 2015-02-03 11:18:17.997415635 +0100 +++ new/src/share/vm/gc_implementation/g1/heapRegionSet.cpp 2015-02-03 11:18:17.926413585 +0100 @@ -420,6 +420,7 @@ ReservedSpace bot_rs(G1BlockOffsetSharedArray::compute_size(heap.word_size())); G1RegionToSpaceMapper* bot_storage = G1RegionToSpaceMapper::create_mapper(bot_rs, + bot_rs.size(), os::vm_page_size(), HeapRegion::GrainBytes, G1BlockOffsetSharedArray::N_bytes, --- old/src/share/vm/prims/whitebox.cpp 2015-02-03 11:18:18.379426666 +0100 +++ new/src/share/vm/prims/whitebox.cpp 2015-02-03 11:18:18.308424616 +0100 @@ -89,6 +89,10 @@ return os::vm_page_size(); WB_END +WB_ENTRY(jlong, WB_GetVMLargePageSize(JNIEnv* env, jobject o)) + return os::large_page_size(); +WB_END + class WBIsKlassAliveClosure : public KlassClosure { Symbol* _name; bool _found; @@ -1222,6 +1226,7 @@ {CC"isObjectInOldGen", CC"(Ljava/lang/Object;)Z", (void*)&WB_isObjectInOldGen }, {CC"getHeapOopSize", CC"()I", (void*)&WB_GetHeapOopSize }, {CC"getVMPageSize", CC"()I", (void*)&WB_GetVMPageSize }, + {CC"getVMLargePageSize", CC"()J", (void*)&WB_GetVMLargePageSize}, {CC"isClassAlive0", CC"(Ljava/lang/String;)Z", (void*)&WB_IsClassAlive }, {CC"parseCommandLine", CC"(Ljava/lang/String;C[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;", --- /dev/null 2015-01-13 09:38:10.268752718 +0100 +++ new/test/gc/g1/TestLargePageUseForAuxMemory.java 2015-02-03 11:18:18.697435849 +0100 @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test TestLargePageUseForAuxMemory.java + * @bug 8058354 + * @key gc + * @library /testlibrary /../../test/lib + * @requires (vm.gc=="G1" | vm.gc=="null") + * @build TestLargePageUseForAuxMemory + * @run main ClassFileInstaller sun.hotspot.WhiteBox + * sun.hotspot.WhiteBox$WhiteBoxPermission + * @summary Test that auxiliary data structures are allocated using large pages if available. + * @run main/othervm -Xbootclasspath/a:. -XX:+UseG1GC -XX:+WhiteBoxAPI -XX:+IgnoreUnrecognizedVMOptions -XX:+UseLargePages TestLargePageUseForAuxMemory + */ + +import com.oracle.java.testlibrary.*; +import sun.hotspot.WhiteBox; + +public class TestLargePageUseForAuxMemory { + static final int HEAP_REGION_SIZE = 4 * 1024 * 1024; + static long largePageSize; + static long smallPageSize; + + static void checkSmallTables(OutputAnalyzer output, long expectedPageSize) throws Exception { + output.shouldContain("G1 'Block offset table': pg_sz=" + expectedPageSize); + output.shouldContain("G1 'Card counts table': pg_sz=" + expectedPageSize); + } + + static void checkBitmaps(OutputAnalyzer output, long expectedPageSize) throws Exception { + output.shouldContain("G1 'Prev Bitmap': pg_sz=" + expectedPageSize); + output.shouldContain("G1 'Next Bitmap': pg_sz=" + expectedPageSize); + } + + static void testVM(long heapsize, boolean cardsShouldUseLargePages, boolean bitmapShouldUseLargePages) throws Exception { + ProcessBuilder pb; + // Test with large page enabled. + pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", + "-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE, + "-Xms" + 10 * HEAP_REGION_SIZE, + "-Xmx" + heapsize, + "-XX:+TracePageSizes", + "-XX:+UseLargePages", + "-XX:+IgnoreUnrecognizedVMOptions", // there is on ObjectAlignmentInBytes in 32 bit builds + "-XX:ObjectAlignmentInBytes=8", + "-version"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + checkSmallTables(output, (cardsShouldUseLargePages ? largePageSize : smallPageSize)); + checkBitmaps(output, (bitmapShouldUseLargePages ? largePageSize : smallPageSize)); + output.shouldHaveExitValue(0); + + // Test with large page disabled. + pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", + "-XX:G1HeapRegionSize=" + HEAP_REGION_SIZE, + "-Xms" + 10 * HEAP_REGION_SIZE, + "-Xmx" + heapsize, + "-XX:+TracePageSizes", + "-XX:-UseLargePages", + "-XX:+IgnoreUnrecognizedVMOptions", // there is on ObjectAlignmentInBytes in 32 bit builds + "-XX:ObjectAlignmentInBytes=8", + "-version"); + + output = new OutputAnalyzer(pb.start()); + checkSmallTables(output, smallPageSize); + checkBitmaps(output, smallPageSize); + output.shouldHaveExitValue(0); + } + + public static void main(String[] args) throws Exception { + if (!Platform.isDebugBuild()) { + System.out.println("Skip tests on non-debug builds because the required option TracePageSizes is a debug-only option."); + return; + } + + WhiteBox wb = WhiteBox.getWhiteBox(); + smallPageSize = wb.getVMPageSize(); + largePageSize = wb.getVMLargePageSize(); + + if (largePageSize == 0) { + System.out.println("Skip tests because large page support does not seem to be available on this platform."); + return; + } + + // To get large pages for the card table etc. we need at least a 1G heap (with 4k page size). + // 32 bit systems will have problems reserving such an amount of contiguous space, so skip the + // test there. + if (!Platform.is32bit()) { + // Size that a single card covers. + final int cardSize = 512; + + final long heapSizeForCardTableUsingLargePages = largePageSize * cardSize; + + testVM(heapSizeForCardTableUsingLargePages, true, true); + testVM(heapSizeForCardTableUsingLargePages + HEAP_REGION_SIZE, true, true); + testVM(heapSizeForCardTableUsingLargePages - HEAP_REGION_SIZE, false, true); + } + + // Minimum heap requirement to get large pages for bitmaps is 128M heap. This seems okay to test + // everywhere. + final int bitmapTranslationFactor = 8 * 8; // ObjectAlignmentInBytes * BitsPerByte + final long heapSizeForBitmapUsingLargePages = largePageSize * bitmapTranslationFactor; + + testVM(heapSizeForBitmapUsingLargePages, false, true); + testVM(heapSizeForBitmapUsingLargePages + HEAP_REGION_SIZE, false, true); + testVM(heapSizeForBitmapUsingLargePages - HEAP_REGION_SIZE, false, false); + } +} +