< prev index next >

src/share/vm/memory/virtualspace.cpp

Print this page

        

*** 33,46 **** // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), ! _alignment(0), _special(false), _executable(false) { } ! ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); size_t alignment; --- 33,46 ---- // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), ! _alignment(0), _special(false), _executable(false), _backing_fd(-1) { } ! ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); size_t alignment;
*** 57,73 **** initialize(size, alignment, large_pages, NULL, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! char* requested_address) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! bool executable) { initialize(size, alignment, large, NULL, executable); } // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address, --- 57,73 ---- initialize(size, alignment, large_pages, NULL, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! char* requested_address) : _backing_fd(-1) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! bool executable) : _backing_fd(-1) { initialize(size, alignment, large, NULL, executable); } // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address,
*** 118,128 **** return; } // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. ! bool special = large && !os::can_commit_large_page_memory(); char* base = NULL; if (special) { base = os::reserve_memory_special(size, alignment, requested_address, executable); --- 118,130 ---- return; } // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. ! // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in. ! // So we ignore the UseLargePages flag in this case. ! bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory()); char* base = NULL; if (special) { base = os::reserve_memory_special(size, alignment, requested_address, executable);
*** 155,182 **** // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address); if (failed_to_reserve_as_requested(base, requested_address, size, false)) { // OS ignored requested address. Try different address. base = NULL; } } else { ! base = os::reserve_memory(size, NULL, alignment); } if (base == NULL) return; // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry if (!os::release_memory(base, size)) fatal("os::release_memory failed"); // Make sure that size is aligned size = align_size_up(size, alignment); ! base = os::reserve_memory_aligned(size, alignment); if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) { // As a result of the alignment constraints, the allocated base differs // from the requested address. Return back to the caller who can --- 157,189 ---- // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd); if (failed_to_reserve_as_requested(base, requested_address, size, false)) { // OS ignored requested address. Try different address. base = NULL; } } else { ! base = os::reserve_memory(size, NULL, alignment, _backing_fd); } if (base == NULL) return; // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry + if (_backing_fd != -1) { + if (!os::unmap_memory(base, size)) fatal("os::release_memory failed"); + } + else { if (!os::release_memory(base, size)) fatal("os::release_memory failed"); + } // Make sure that size is aligned size = align_size_up(size, alignment); ! base = os::reserve_memory_aligned(size, alignment, _backing_fd); if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) { // As a result of the alignment constraints, the allocated base differs // from the requested address. Return back to the caller who can
*** 188,197 **** --- 195,208 ---- } // Done _base = base; _size = size; _alignment = alignment; + // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true + if (_backing_fd != -1) { + _special = true; + } } ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, bool special, bool executable) {
*** 311,321 **** release(); } // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. ! bool special = large && !os::can_commit_large_page_memory(); char* base = NULL; log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX, p2i(requested_address), --- 322,334 ---- release(); } // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. ! // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in. ! // So we ignore the UseLargePages flag in this case. ! bool special = (_backing_fd == -1) && (large && !os::can_commit_large_page_memory()); char* base = NULL; log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX, p2i(requested_address),
*** 348,368 **** // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address); } else { ! base = os::reserve_memory(size, NULL, alignment); } } if (base == NULL) { return; } // Done _base = base; _size = size; _alignment = alignment; // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry. release(); --- 361,385 ---- // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd); } else { ! base = os::reserve_memory(size, NULL, alignment, _backing_fd); } } if (base == NULL) { return; } // Done _base = base; _size = size; _alignment = alignment; + // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true + if (_backing_fd != -1) { + _special = true; + } // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry. release();
*** 554,569 **** initialize(size + noaccess_prefix, alignment, large, NULL, false); } } } ! ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() { if (size == 0) { return; } // Heap size should be aligned to alignment, too. guarantee(is_size_aligned(size, alignment), "set by caller"); if (UseCompressedOops) { initialize_compressed_heap(size, alignment, large); --- 571,590 ---- initialize(size + noaccess_prefix, alignment, large, NULL, false); } } } ! ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() { if (size == 0) { return; } + if (backingFSforHeap != NULL) { + _backing_fd = os::create_file_for_heap(backingFSforHeap, size); + } + // Heap size should be aligned to alignment, too. guarantee(is_size_aligned(size, alignment), "set by caller"); if (UseCompressedOops) { initialize_compressed_heap(size, alignment, large);
*** 583,592 **** --- 604,617 ---- "area must be distinguishable from marks for mark-sweep"); if (base() > 0) { MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); } + + if (backingFSforHeap != NULL) { + os::close(_backing_fd); + } } // Reserve space for code segment. Same as Java heap only we mark this as // executable. ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
< prev index next >