< prev index next >
src/share/vm/runtime/virtualspace.cpp
Print this page
rev 7386 : 8064457: Introduce compressed oops mode disjoint base and improve compressed heap handling.
*** 41,65 ****
size_t page_size = os::page_size_for_region(size, 1);
bool large_pages = page_size != (size_t)os::vm_page_size();
// Don't force the alignment to be large page aligned,
// since that will waste memory.
size_t alignment = os::vm_allocation_granularity();
! initialize(size, alignment, large_pages, NULL, 0, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
bool large,
! char* requested_address,
! const size_t noaccess_prefix) {
! initialize(size+noaccess_prefix, alignment, large, requested_address,
! noaccess_prefix, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
bool large,
bool executable) {
! initialize(size, alignment, large, NULL, 0, executable);
}
// Helper method.
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
const size_t size, bool special)
--- 41,63 ----
size_t page_size = os::page_size_for_region(size, 1);
bool large_pages = page_size != (size_t)os::vm_page_size();
// Don't force the alignment to be large page aligned,
// since that will waste memory.
size_t alignment = os::vm_allocation_granularity();
! initialize(size, alignment, large_pages, NULL, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
bool large,
! char* requested_address) {
! initialize(size, alignment, large, requested_address, false);
}
ReservedSpace::ReservedSpace(size_t size, size_t alignment,
bool large,
bool executable) {
! initialize(size, alignment, large, NULL, executable);
}
// Helper method.
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
const size_t size, bool special)
*** 89,99 ****
return true;
}
void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
char* requested_address,
- const size_t noaccess_prefix,
bool executable) {
const size_t granularity = os::vm_allocation_granularity();
assert((size & (granularity - 1)) == 0,
"size not aligned to os::vm_allocation_granularity()");
assert((alignment & (granularity - 1)) == 0,
--- 87,96 ----
*** 101,114 ****
assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
"not a power of 2");
alignment = MAX2(alignment, (size_t)os::vm_page_size());
- // Assert that if noaccess_prefix is used, it is the same as alignment.
- assert(noaccess_prefix == 0 ||
- noaccess_prefix == alignment, "noaccess prefix wrong");
-
_base = NULL;
_size = 0;
_special = false;
_executable = executable;
_alignment = 0;
--- 98,107 ----
*** 120,134 ****
// If OS doesn't support demand paging for large page memory, we need
// to use reserve_memory_special() to reserve and pin the entire region.
bool special = large && !os::can_commit_large_page_memory();
char* base = NULL;
- if (requested_address != 0) {
- requested_address -= noaccess_prefix; // adjust requested address
- assert(requested_address != NULL, "huge noaccess prefix?");
- }
-
if (special) {
base = os::reserve_memory_special(size, alignment, requested_address, executable);
if (base != NULL) {
--- 113,122 ----
*** 174,184 ****
}
if (base == NULL) return;
// Check alignment constraints
! if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
// Base not aligned, retry
if (!os::release_memory(base, size)) fatal("os::release_memory failed");
// Make sure that size is aligned
size = align_size_up(size, alignment);
base = os::reserve_memory_aligned(size, alignment);
--- 162,172 ----
}
if (base == NULL) return;
// Check alignment constraints
! if ((((size_t)base) & (alignment - 1)) != 0) {
// Base not aligned, retry
if (!os::release_memory(base, size)) fatal("os::release_memory failed");
// Make sure that size is aligned
size = align_size_up(size, alignment);
base = os::reserve_memory_aligned(size, alignment);
*** 195,209 ****
}
// Done
_base = base;
_size = size;
_alignment = alignment;
- _noaccess_prefix = noaccess_prefix;
-
- // Assert that if noaccess_prefix is used, it is the same as alignment.
- assert(noaccess_prefix == 0 ||
- noaccess_prefix == _alignment, "noaccess prefix wrong");
assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
"area must be distinguishable from marks for mark-sweep");
assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
"area must be distinguishable from marks for mark-sweep");
--- 183,192 ----
*** 279,331 ****
_special = false;
_executable = false;
}
}
! void ReservedSpace::protect_noaccess_prefix(const size_t size) {
! assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
! (Universe::narrow_oop_base() != NULL) &&
! Universe::narrow_oop_use_implicit_null_checks()),
! "noaccess_prefix should be used only with non zero based compressed oops");
! // If there is no noaccess prefix, return.
! if (_noaccess_prefix == 0) return;
! assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
! "must be at least page size big");
// Protect memory at the base of the allocated region.
// If special, the page was committed (only matters on windows)
! if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
! _special)) {
fatal("cannot protect protection page");
}
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
}
_base += _noaccess_prefix;
_size -= _noaccess_prefix;
! assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
! "must be exactly of required size and alignment");
}
! ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
! bool large, char* requested_address) :
! ReservedSpace(size, alignment, large,
! requested_address,
! (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
! Universe::narrow_oop_use_implicit_null_checks()) ?
! lcm(os::vm_page_size(), alignment) : 0) {
if (base() > 0) {
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
}
-
- // Only reserved space for the java heap should have a noaccess_prefix
- // if using compressed oops.
- protect_noaccess_prefix(size);
}
// Reserve space for code segment. Same as Java heap only we mark this as
// executable.
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
--- 262,560 ----
_special = false;
_executable = false;
}
}
! static size_t noaccess_prefix_size(size_t alignment) {
! return lcm(os::vm_page_size(), alignment);
! }
! void ReservedSpace::establish_noaccess_prefix() {
! assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
! // ...
! _noaccess_prefix = noaccess_prefix_size(_alignment);
+ if (true
+ WIN64_ONLY(&& !UseLargePages)
+ AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
// Protect memory at the base of the allocated region.
// If special, the page was committed (only matters on windows)
! if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
fatal("cannot protect protection page");
}
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
}
+ assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
+ } else {
+ Universe::set_narrow_oop_use_implicit_null_checks(false);
+ }
_base += _noaccess_prefix;
_size -= _noaccess_prefix;
! assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
}
!
! // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
! // Does not check whether the reserved memory actually is at requested_address, as the memory returned
! // might still fulfill the wishes of the caller.
! // Assures the memory is aligned to 'alignment'.
! // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
! void ReservedHeapSpace::try_reserve_heap(size_t size, size_t alignment, bool large, char* requested_address) {
! if (_base != NULL) {
! // We tried before, but we didn't like the address delivered.
! release();
! }
!
! // If OS doesn't support demand paging for large page memory, we need
! // to use reserve_memory_special() to reserve and pin the entire region.
! bool special = large && !os::can_commit_large_page_memory();
! char* base = NULL;
!
! if (PrintCompressedOopsMode && Verbose) {
! tty->print("Trying to allocate at address " PTR_FORMAT " size" PTR_FORMAT ".\n",
! requested_address, (address)size);
! }
!
! if (special) {
! base = os::reserve_memory_special(size, alignment, requested_address, false);
!
! if (base != NULL) {
! // Check alignment constraints.
! assert((uintptr_t) base % alignment == 0,
! err_msg("Large pages returned a non-aligned address, base: "
! PTR_FORMAT " alignment: " PTR_FORMAT,
! base, (void*)(uintptr_t)alignment));
! _special = true;
! }
! }
!
! if (!base) {
! // Failed; try to reserve regular memory below
! if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
! !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
! if (PrintCompressedOopsMode) {
! tty->cr();
! tty->print_cr("Reserve regular memory without large pages.");
! }
! }
!
! // Optimistically assume that the OSes returns an aligned base pointer.
! // When reserving a large address range, most OSes seem to align to at
! // least 64K.
!
! // If the memory was requested at a particular address, use
! // os::attempt_reserve_memory_at() to avoid over mapping something
! // important. If available space is not detected, return NULL.
!
! if (requested_address != 0) {
! base = os::attempt_reserve_memory_at(size, requested_address);
! } else {
! base = os::reserve_memory(size, NULL, alignment);
! }
! }
! if (base == NULL) return;
!
! // Done
! _base = base;
! _size = size;
! _alignment = alignment;
!
! // Check alignment constraints
! if ((((size_t)base) & (alignment - 1)) != 0) {
! // Base not aligned, retry.
! release();
! return;
! }
! }
!
! void ReservedHeapSpace::initialize_compressed_heap(size_t size, size_t alignment, bool large) {
! guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
! "can not allocate compressed oop heap for this size");
! guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
! assert(HeapBaseMinAddress > 0, "sanity");
!
! const size_t granularity = os::vm_allocation_granularity();
! assert((size & (granularity - 1)) == 0,
! "size not aligned to os::vm_allocation_granularity()");
! assert((alignment & (granularity - 1)) == 0,
! "alignment not aligned to os::vm_allocation_granularity()");
! assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
! "not a power of 2");
!
! // The necessary attach point alignment for generated wish addresses.
! // This is needed to increase the chance of attaching for mmap and shmat.
! const size_t os_attach_point_alignment =
! AIX_ONLY(SIZE_256M) // Known shm boundary alignment.
! NOT_AIX(os::vm_allocation_granularity());
! const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
!
! guarantee(HeapSearchSteps > 0, "Don't set HeapSearchSteps to 0");
! const uint64_t num_attempts = HeapSearchSteps;
!
! char *aligned_HBMA = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
! size_t noaccess_prefix = ((aligned_HBMA + size) > (char*)OopEncodingHeapMax) ? noaccess_prefix_size(alignment) : 0;
!
! // Attempt to alloc at user-given address.
! if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
! if (PrintCompressedOopsMode && Verbose) {
! tty->print(" == H E A P B A S E M I N A D D R E S S ==\n");
! }
! try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_HBMA);
! if (_base != aligned_HBMA) { // Enforce this exact address.
! release();
! }
! }
!
! // Keep heap at HeapBaseMinAddress.
! if (!_base) {
!
! if (PrintCompressedOopsMode && Verbose) {
! tty->print(" == U N S C A L E D ==\n");
! }
!
! // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
! // Give it several tries from top of range to bottom.
! if (aligned_HBMA + size <= (char *)UnscaledOopHeapMax) {
!
! // Calc address range within we try to attach (range of possible start addresses).
! char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
! char* const lowest_start = (char *)align_ptr_up ( aligned_HBMA , attach_point_alignment);
! const size_t attach_range = highest_start - lowest_start;
!
! // Cap num_attempts at possible number.
! const uint64_t num_attempts_possible =
! (attach_range / attach_point_alignment) + 1; // At least one is possible even for 0 sized attach range.
! const uint64_t num_attempts_to_try = MIN2(num_attempts, num_attempts_possible);
!
! const size_t stepsize = align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
!
! // Try attach points from top to bottom.
! char* attach_point = highest_start;
! while (attach_point >= lowest_start &&
! attach_point <= highest_start && // Avoid wrap around.
! (!_base || _base < aligned_HBMA || _base + size > (char *)UnscaledOopHeapMax)) {
! try_reserve_heap(size, alignment, large, attach_point);
! attach_point -= stepsize;
! }
!
! }
!
! if (PrintCompressedOopsMode && Verbose) {
! tty->print(" == Z E R O B A S E D ==\n");
! }
!
! // zerobased: Attempt to allocate in the lower 32G.
! // But leave room for the compressed class pointers, which is allocated above
! // the heap.
! char *zerobased_max = (char *)OopEncodingHeapMax;
! // For small heaps, save some space for compressed class pointer
! // space so it can be decoded with no base.
! if (UseCompressedClassPointers && !UseSharedSpaces &&
! OopEncodingHeapMax <= KlassEncodingMetaspaceMax) {
! const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
! zerobased_max = (char *)OopEncodingHeapMax - class_space;
! }
!
! // Give it several tries from top of range to bottom.
! if (aligned_HBMA + size <= zerobased_max && // Zerobased theoretical possible.
! (!_base || // No previous try succeeded.
! (_base && _base + size > zerobased_max))) { // Unscaled delivered an arbitrary address.
!
! // Calc address range within we try to attach (range of possible start addresses).
! char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
! // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
! // "Cannot use int to initialize char*." Introduce aux variable.
! char *unscaled_end = (char *)UnscaledOopHeapMax;
! unscaled_end -= size;
! char *lowest_start = (size < UnscaledOopHeapMax) ? MAX2(unscaled_end, aligned_HBMA) : aligned_HBMA;
! lowest_start = (char *)align_ptr_up(lowest_start, attach_point_alignment);
! const size_t attach_range = highest_start - lowest_start;
!
! // Cap num_attempts at possible number.
! const uint64_t num_attempts_possible =
! (attach_range / attach_point_alignment) + 1; // At least one is possible even for 0 sized attach range.
! const uint64_t num_attempts_to_try = MIN2(num_attempts, num_attempts_possible);
!
! const size_t stepsize = align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
!
! // Try attach points from top to bottom.
! char* attach_point = highest_start;
! while (attach_point >= lowest_start &&
! attach_point <= highest_start && // Avoid wrap around.
! (!_base || _base < aligned_HBMA || _base + size > zerobased_max)) {
! try_reserve_heap(size, alignment, large, attach_point);
! attach_point -= stepsize;
! }
!
! }
!
! if (PrintCompressedOopsMode && Verbose) {
! tty->print(" == D I S J O I N T B A S E ==\n");
! }
!
! // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
! // implement null checks.
! noaccess_prefix = noaccess_prefix_size(alignment);
!
! // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
! char** addresses = Universe::get_attach_addresses_for_disjoint_mode();
! int i = 0;
! while (addresses[i] &&
! (!_base ||
! (_base && _base + size > (char *)OopEncodingHeapMax &&
! !Universe::is_disjoint_heap_base_address((address)_base)))) {
! char* const attach_point = addresses[i];
! assert(attach_point >= aligned_HBMA, "Flag support broken");
! try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
! i++;
! }
!
! if (PrintCompressedOopsMode && Verbose) {
! tty->print(" == H E A P B A S E D ==\n");
! }
!
! // Last, desperate try without any placement.
! if (!_base) {
! if (PrintCompressedOopsMode && Verbose) {
! tty->print("Trying to allocate at address NULL size" PTR_FORMAT ".\n", (address)size);
! }
! initialize(size + noaccess_prefix, alignment, large, NULL, false);
! }
! }
!
! assert(!_base || markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
! "area must be distinguishable from marks for mark-sweep");
! assert(!_base || markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
! "area must be distinguishable from marks for mark-sweep");
! }
!
! ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
!
! if (size == 0) {
! return;
! }
!
! // Heap size should be aligned to alignment, too.
! guarantee(is_size_aligned(size, alignment), "set by caller");
!
! if (UseCompressedOops) {
! initialize_compressed_heap(size, alignment, large);
! if (base() && base() + size > (char *)OopEncodingHeapMax) {
! establish_noaccess_prefix();
! }
!
! } else {
! initialize(size, alignment, large, NULL, false);
! }
!
if (base() > 0) {
MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
}
}
// Reserve space for code segment. Same as Java heap only we mark this as
// executable.
ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
*** 801,812 ****
assert(is_size_aligned(size, alignment), "Incorrect input parameters");
ReservedSpace rs(size, // size
alignment, // alignment
UseLargePages, // large
! NULL, // requested_address
! 0); // noacces_prefix
test_log(" rs.special() == %d", rs.special());
assert(rs.base() != NULL, "Must be");
assert(rs.size() == size, "Must be");
--- 1030,1040 ----
assert(is_size_aligned(size, alignment), "Incorrect input parameters");
ReservedSpace rs(size, // size
alignment, // alignment
UseLargePages, // large
! (char *)NULL); // requested_address
test_log(" rs.special() == %d", rs.special());
assert(rs.base() != NULL, "Must be");
assert(rs.size() == size, "Must be");
< prev index next >