< prev index next >

src/share/vm/memory/virtualspace.cpp

Print this page

        

*** 45,55 **** size_t alignment; if (large_pages && has_preferred_page_size) { alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); // ReservedSpace initialization requires size to be aligned to the given // alignment. Align the size up. ! size = align_size_up(size, alignment); } else { // Don't force the alignment to be large page aligned, // since that will waste memory. alignment = os::vm_allocation_granularity(); } --- 45,55 ---- size_t alignment; if (large_pages && has_preferred_page_size) { alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity()); // ReservedSpace initialization requires size to be aligned to the given // alignment. Align the size up. ! size = align_up(size, alignment); } else { // Don't force the alignment to be large page aligned, // since that will waste memory. alignment = os::vm_allocation_granularity(); }
*** 170,180 **** // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry if (!os::release_memory(base, size)) fatal("os::release_memory failed"); // Make sure that size is aligned ! size = align_size_up(size, alignment); base = os::reserve_memory_aligned(size, alignment); if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) { // As a result of the alignment constraints, the allocated base differs --- 170,180 ---- // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry if (!os::release_memory(base, size)) fatal("os::release_memory failed"); // Make sure that size is aligned ! size = align_up(size, alignment); base = os::reserve_memory_aligned(size, alignment); if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) { // As a result of the alignment constraints, the allocated base differs
*** 224,250 **** alignment, special(), executable()); return result; } ! size_t ReservedSpace::page_align_size_up(size_t size) { ! return align_size_up(size, os::vm_page_size()); } ! size_t ReservedSpace::page_align_size_down(size_t size) { ! return align_size_down(size, os::vm_page_size()); } ! size_t ReservedSpace::allocation_align_size_up(size_t size) { ! return align_size_up(size, os::vm_allocation_granularity()); } ! size_t ReservedSpace::allocation_align_size_down(size_t size) { ! return align_size_down(size, os::vm_allocation_granularity()); } void ReservedSpace::release() { if (is_reserved()) { --- 224,250 ---- alignment, special(), executable()); return result; } ! size_t ReservedSpace::page_align_up(size_t size) { ! return align_up(size, os::vm_page_size()); } ! size_t ReservedSpace::page_align_down(size_t size) { ! return align_down(size, os::vm_page_size()); } ! size_t ReservedSpace::allocation_align_up(size_t size) { ! return align_up(size, os::vm_allocation_granularity()); } ! size_t ReservedSpace::allocation_align_down(size_t size) { ! return align_down(size, os::vm_allocation_granularity()); } void ReservedSpace::release() { if (is_reserved()) {
*** 381,391 **** // At least one is possible even for 0 sized attach range. const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1; const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible); const size_t stepsize = (attach_range == 0) ? // Only one try. ! (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment); // Try attach points from top to bottom. char* attach_point = highest_start; while (attach_point >= lowest_start && attach_point <= highest_start && // Avoid wrap around. --- 381,391 ---- // At least one is possible even for 0 sized attach range. const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1; const uint64_t num_attempts_to_try = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible); const size_t stepsize = (attach_range == 0) ? // Only one try. ! (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment); // Try attach points from top to bottom. char* attach_point = highest_start; while (attach_point >= lowest_start && attach_point <= highest_start && // Avoid wrap around.
*** 461,471 **** const size_t os_attach_point_alignment = AIX_ONLY(SIZE_256M) // Known shm boundary alignment. NOT_AIX(os::vm_allocation_granularity()); const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment); ! char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment); size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ? noaccess_prefix_size(alignment) : 0; // Attempt to alloc at user-given address. if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) { --- 461,471 ---- const size_t os_attach_point_alignment = AIX_ONLY(SIZE_256M) // Known shm boundary alignment. NOT_AIX(os::vm_allocation_granularity()); const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment); ! char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment); size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ? noaccess_prefix_size(alignment) : 0; // Attempt to alloc at user-given address. if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
*** 490,510 **** // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops). // Give it several tries from top of range to bottom. if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) { // Calc address range within we try to attach (range of possible start addresses). ! char* const highest_start = align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); ! char* const lowest_start = align_ptr_up(aligned_heap_base_min_address, attach_point_alignment); try_reserve_range(highest_start, lowest_start, attach_point_alignment, aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large); } // zerobased: Attempt to allocate in the lower 32G. // But leave room for the compressed class pointers, which is allocated above // the heap. char *zerobased_max = (char *)OopEncodingHeapMax; ! const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment); // For small heaps, save some space for compressed class pointer // space so it can be decoded with no base. if (UseCompressedClassPointers && !UseSharedSpaces && OopEncodingHeapMax <= KlassEncodingMetaspaceMax && (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) { --- 490,510 ---- // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops). // Give it several tries from top of range to bottom. if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) { // Calc address range within we try to attach (range of possible start addresses). ! char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment); ! char* const lowest_start = align_up(aligned_heap_base_min_address, attach_point_alignment); try_reserve_range(highest_start, lowest_start, attach_point_alignment, aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large); } // zerobased: Attempt to allocate in the lower 32G. // But leave room for the compressed class pointers, which is allocated above // the heap. char *zerobased_max = (char *)OopEncodingHeapMax; ! const size_t class_space = align_up(CompressedClassSpaceSize, alignment); // For small heaps, save some space for compressed class pointer // space so it can be decoded with no base. if (UseCompressedClassPointers && !UseSharedSpaces && OopEncodingHeapMax <= KlassEncodingMetaspaceMax && (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
*** 515,533 **** if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible. ((_base == NULL) || // No previous try succeeded. (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. // Calc address range within we try to attach (range of possible start addresses). ! char *const highest_start = align_ptr_down(zerobased_max - size, attach_point_alignment); // Need to be careful about size being guaranteed to be less // than UnscaledOopHeapMax due to type constraints. char *lowest_start = aligned_heap_base_min_address; uint64_t unscaled_end = UnscaledOopHeapMax - size; if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large lowest_start = MAX2(lowest_start, (char*)unscaled_end); } ! lowest_start = align_ptr_up(lowest_start, attach_point_alignment); try_reserve_range(highest_start, lowest_start, attach_point_alignment, aligned_heap_base_min_address, zerobased_max, size, alignment, large); } // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently --- 515,533 ---- if (aligned_heap_base_min_address + size <= zerobased_max && // Zerobased theoretical possible. ((_base == NULL) || // No previous try succeeded. (_base + size > zerobased_max))) { // Unscaled delivered an arbitrary address. // Calc address range within we try to attach (range of possible start addresses). ! char *const highest_start = align_down(zerobased_max - size, attach_point_alignment); // Need to be careful about size being guaranteed to be less // than UnscaledOopHeapMax due to type constraints. char *lowest_start = aligned_heap_base_min_address; uint64_t unscaled_end = UnscaledOopHeapMax - size; if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large lowest_start = MAX2(lowest_start, (char*)unscaled_end); } ! lowest_start = align_up(lowest_start, attach_point_alignment); try_reserve_range(highest_start, lowest_start, attach_point_alignment, aligned_heap_base_min_address, zerobased_max, size, alignment, large); } // Now we go for heaps with base != 0. We need a noaccess prefix to efficiently
*** 560,570 **** if (size == 0) { return; } // Heap size should be aligned to alignment, too. ! guarantee(is_size_aligned(size, alignment), "set by caller"); if (UseCompressedOops) { initialize_compressed_heap(size, alignment, large); if (_size > size) { // We allocated heap with noaccess prefix. --- 560,570 ---- if (size == 0) { return; } // Heap size should be aligned to alignment, too. ! guarantee(is_aligned(size, alignment), "set by caller"); if (UseCompressedOops) { initialize_compressed_heap(size, alignment, large); if (_size > size) { // We allocated heap with noaccess prefix.
*** 749,760 **** bool VirtualSpace::contains(const void* p) const { return low() <= (const char*) p && (const char*) p < high(); } static void pretouch_expanded_memory(void* start, void* end) { ! assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment"); ! assert(is_ptr_aligned(end, os::vm_page_size()), "Unexpected alignment"); os::pretouch_memory(start, end); } static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) { --- 749,760 ---- bool VirtualSpace::contains(const void* p) const { return low() <= (const char*) p && (const char*) p < high(); } static void pretouch_expanded_memory(void* start, void* end) { ! assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment"); ! assert(is_aligned(end, os::vm_page_size()), "Unexpected alignment"); os::pretouch_memory(start, end); } static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
*** 1035,1045 **** } static void test_reserved_space1(size_t size, size_t alignment) { test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size); ! assert(is_size_aligned(size, alignment), "Incorrect input parameters"); ReservedSpace rs(size, // size alignment, // alignment UseLargePages, // large (char *)NULL); // requested_address --- 1035,1045 ---- } static void test_reserved_space1(size_t size, size_t alignment) { test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size); ! assert(is_aligned(size, alignment), "Incorrect input parameters"); ReservedSpace rs(size, // size alignment, // alignment UseLargePages, // large (char *)NULL); // requested_address
*** 1047,1058 **** test_log(" rs.special() == %d", rs.special()); assert(rs.base() != NULL, "Must be"); assert(rs.size() == size, "Must be"); ! assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses"); ! assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses"); if (rs.special()) { small_page_write(rs.base(), size); } --- 1047,1058 ---- test_log(" rs.special() == %d", rs.special()); assert(rs.base() != NULL, "Must be"); assert(rs.size() == size, "Must be"); ! assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses"); ! assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses"); if (rs.special()) { small_page_write(rs.base(), size); }
*** 1060,1070 **** } static void test_reserved_space2(size_t size) { test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size); ! assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); ReservedSpace rs(size); test_log(" rs.special() == %d", rs.special()); --- 1060,1070 ---- } static void test_reserved_space2(size_t size) { test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size); ! assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); ReservedSpace rs(size); test_log(" rs.special() == %d", rs.special());
*** 1086,1097 **** // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test. assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement"); return; } ! assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); ! assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment"); bool large = maybe_large && UseLargePages && size >= os::large_page_size(); ReservedSpace rs(size, alignment, large, false); --- 1086,1097 ---- // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test. assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement"); return; } ! assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned"); ! assert(is_aligned(size, alignment), "Must be at least aligned against alignment"); bool large = maybe_large && UseLargePages && size >= os::large_page_size(); ReservedSpace rs(size, alignment, large, false);
*** 1242,1252 **** public: static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, TestLargePages mode = Default) { size_t granularity = os::vm_allocation_granularity(); ! size_t reserve_size_aligned = align_size_up(reserve_size, granularity); ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); assert(reserved.is_reserved(), "Must be"); --- 1242,1252 ---- public: static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size, TestLargePages mode = Default) { size_t granularity = os::vm_allocation_granularity(); ! size_t reserve_size_aligned = align_up(reserve_size, granularity); ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode); assert(reserved.is_reserved(), "Must be");
< prev index next >