< prev index next >

src/share/vm/memory/virtualspace.cpp

Print this page

        

*** 33,46 **** // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), ! _alignment(0), _special(false), _executable(false) { } ! ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); size_t alignment; --- 33,46 ---- // ReservedSpace // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), ! _alignment(0), _special(false), _executable(false), _backing_fd(-1) { } ! ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); bool large_pages = page_size != (size_t)os::vm_page_size(); size_t alignment;
*** 57,79 **** initialize(size, alignment, large_pages, NULL, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! char* requested_address) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! bool executable) { initialize(size, alignment, large, NULL, executable); } // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address, ! const size_t size, bool special) { if (base == requested_address || requested_address == NULL) return false; // did not fail if (base != NULL) { --- 57,79 ---- initialize(size, alignment, large_pages, NULL, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! char* requested_address) : _backing_fd(-1) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, ! bool executable) : _backing_fd(-1) { initialize(size, alignment, large, NULL, executable); } // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address, ! const size_t size, bool special, bool is_file_mapped= false) { if (base == requested_address || requested_address == NULL) return false; // did not fail if (base != NULL) {
*** 85,99 **** --- 85,105 ---- if (special) { if (!os::release_memory_special(base, size)) { fatal("os::release_memory_special failed"); } } else { + if (is_file_mapped) { + if (!os::unmap_memory(base, size)) { + fatal("os::release_memory failed"); + } + } else { if (!os::release_memory(base, size)) { fatal("os::release_memory failed"); } } } + } return true; } void ReservedSpace::initialize(size_t size, size_t alignment, bool large, char* requested_address,
*** 118,128 **** --- 124,144 ---- return; } // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. + // If there is a backing file directory for this VirtualSpace then whether + // large pages are allocated is upto the filesystem the dir resides in. + // So we ignore the UseLargePages flag in this case. bool special = large && !os::can_commit_large_page_memory(); + if (special && _backing_fd != -1) { + special = false; + if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || + !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { + log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option."); + } + } char* base = NULL; if (special) { base = os::reserve_memory_special(size, alignment, requested_address, executable);
*** 155,185 **** // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address); ! if (failed_to_reserve_as_requested(base, requested_address, size, false)) { // OS ignored requested address. Try different address. base = NULL; } } else { ! base = os::reserve_memory(size, NULL, alignment); } if (base == NULL) return; // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry if (!os::release_memory(base, size)) fatal("os::release_memory failed"); // Make sure that size is aligned size = align_size_up(size, alignment); ! base = os::reserve_memory_aligned(size, alignment); if (requested_address != 0 && ! failed_to_reserve_as_requested(base, requested_address, size, false)) { // As a result of the alignment constraints, the allocated base differs // from the requested address. Return back to the caller who can // take remedial action (like try again without a requested address). assert(_base == NULL, "should be"); return; --- 171,206 ---- // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd); ! if (failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) { // OS ignored requested address. Try different address. base = NULL; } } else { ! base = os::reserve_memory(size, NULL, alignment, _backing_fd); } if (base == NULL) return; // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry + if (_backing_fd != -1) { + // unmap_memory will do extra work esp. in Windows + if (!os::unmap_memory(base, size)) fatal("os::release_memory failed"); + } else { if (!os::release_memory(base, size)) fatal("os::release_memory failed"); + } // Make sure that size is aligned size = align_size_up(size, alignment); ! base = os::reserve_memory_aligned(size, alignment, _backing_fd); if (requested_address != 0 && ! failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) { // As a result of the alignment constraints, the allocated base differs // from the requested address. Return back to the caller who can // take remedial action (like try again without a requested address). assert(_base == NULL, "should be"); return;
*** 188,197 **** --- 209,222 ---- } // Done _base = base; _size = size; _alignment = alignment; + // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true + if (_backing_fd != -1) { + _special = true; + } } ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment, bool special, bool executable) {
*** 250,260 **** --- 275,290 ---- void ReservedSpace::release() { if (is_reserved()) { char *real_base = _base - _noaccess_prefix; const size_t real_size = _size + _noaccess_prefix; if (special()) { + if (_backing_fd != -1) { + os::unmap_memory(real_base, real_size); + } + else { os::release_memory_special(real_base, real_size); + } } else{ os::release_memory(real_base, real_size); } _base = NULL; _size = 0;
*** 311,321 **** --- 341,361 ---- release(); } // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. + // If there is a backing file directory for this VirtualSpace then whether + // large pages are allocated is upto the filesystem the dir resides in. + // So we ignore the UseLargePages flag in this case. bool special = large && !os::can_commit_large_page_memory(); + if (special && _backing_fd != -1) { + special = false; + if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || + !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { + log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option."); + } + } char* base = NULL; log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX, p2i(requested_address),
*** 348,368 **** // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address); } else { ! base = os::reserve_memory(size, NULL, alignment); } } if (base == NULL) { return; } // Done _base = base; _size = size; _alignment = alignment; // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry. release(); --- 388,412 ---- // If the memory was requested at a particular address, use // os::attempt_reserve_memory_at() to avoid over mapping something // important. If available space is not detected, return NULL. if (requested_address != 0) { ! base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd); } else { ! base = os::reserve_memory(size, NULL, alignment, _backing_fd); } } if (base == NULL) { return; } // Done _base = base; _size = size; _alignment = alignment; + // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true + if (_backing_fd != -1) { + _special = true; + } // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry. release();
*** 554,569 **** initialize(size + noaccess_prefix, alignment, large, NULL, false); } } } ! ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() { if (size == 0) { return; } // Heap size should be aligned to alignment, too. guarantee(is_size_aligned(size, alignment), "set by caller"); if (UseCompressedOops) { initialize_compressed_heap(size, alignment, large); --- 598,621 ---- initialize(size + noaccess_prefix, alignment, large, NULL, false); } } } ! ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backing_fs_for_heap) : ReservedSpace() { if (size == 0) { return; } + if (backing_fs_for_heap != NULL) { + _backing_fd = os::create_file_for_heap(backing_fs_for_heap, size); + if (_backing_fd == -1) { + vm_exit_during_initialization( + err_msg("Could not create file for Heap at location %s", backing_fs_for_heap)); + } + } + // Heap size should be aligned to alignment, too. guarantee(is_size_aligned(size, alignment), "set by caller"); if (UseCompressedOops) { initialize_compressed_heap(size, alignment, large);
*** 583,592 **** --- 635,648 ---- "area must be distinguishable from marks for mark-sweep"); if (base() > 0) { MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); } + + if (backing_fs_for_heap != NULL) { + os::close(_backing_fd); + } } // Reserve space for code segment. Same as Java heap only we mark this as // executable. ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
< prev index next >