--- old/src/os/aix/vm/os_aix.cpp 2017-07-20 17:44:46.056829900 -0700 +++ new/src/os/aix/vm/os_aix.cpp 2017-07-20 17:44:45.651932200 -0700 @@ -2511,7 +2511,7 @@ // Reserve memory at an arbitrary address, only if that area is // available (and not reserved for something else). -char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { +char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, bool use_SHM) { char* addr = NULL; // Always round to os::vm_page_size(), which may be larger than 4K. @@ -2522,7 +2522,7 @@ if (os::vm_page_size() == 4*K) { return reserve_mmaped_memory(bytes, requested_addr, 0); } else { - if (bytes >= Use64KPagesThreshold) { + if (use_SHM && bytes >= Use64KPagesThreshold) { return reserve_shmated_memory(bytes, requested_addr, 0); } else { return reserve_mmaped_memory(bytes, requested_addr, 0); --- old/src/os/posix/vm/os_posix.cpp 2017-07-20 17:44:48.956237900 -0700 +++ new/src/os/posix/vm/os_posix.cpp 2017-07-20 17:44:48.578844900 -0700 @@ -38,6 +38,7 @@ #include #include #include +#include // Todo: provide a os::get_max_process_id() or similar. Number of processes // may have been configured, can be read more accurately from proc fs etc. @@ -139,10 +140,164 @@ return; } +// Helper function to create a temp file in the given directory. +int os::create_file_for_heap(const char* dir, size_t size) { + + const char name_template[] = "/jvmheap.XXXXXX"; + + char *fullname = (char*)::malloc(strlen(dir) + sizeof(name_template)); + if (fullname == NULL) { + vm_exit_during_initialization(err_msg("malloc failed")); + return -1; + } + (void)strcpy(fullname, dir); + (void)strcat(fullname, name_template); + + sigset_t set, oldset; + int ret = sigfillset(&set); + assert(ret == 0, "sigfillset error"); + + // block all signals while we do the file operation. + ret = pthread_sigmask(SIG_BLOCK, &set, &oldset); + assert(ret == 0, "pthread_sigmask error"); + + // set the file creation mask. + mode_t file_mode = S_IRUSR | S_IWUSR; + + // create a new file. + int fd = mkstemp(fullname); + + if (fd < 0) { + // reset the signal mask. + ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); + assert(ret == 0, "pthread_sigmask error"); + ::free(fullname); + return -1; + } + + // change file permissions; mkstemp creates file with permissions 0600 (glibc versions after 2.06) or 0666 (2.06 and earlier versions) + ret = fchmod(fd, file_mode); + assert(ret == 0, "fchmod error"); + + // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted. + ret = unlink(fullname); + assert(ret == 0, "unlink error"); + + // reset the signal mask. + ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL); + assert(ret == 0, "pthread_sigmask error"); + + ::free(fullname); + return fd; +} + +static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) { + char * addr; + int flags; + + flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS; + if (requested_addr != NULL) { + assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address"); + flags |= MAP_FIXED; + } + + // Map reserved/uncommitted pages PROT_NONE so we fail early if we + // touch an uncommitted page. Otherwise, the read/write might + // succeed if we have enough swap space to back the physical page. + addr = (char*)::mmap(requested_addr, bytes, PROT_NONE, + flags, -1, 0); + + if (addr != MAP_FAILED) { + MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC); + return addr; + } + return NULL; +} + +static int util_posix_fallocate(int fd, off_t offset, off_t len) { +#ifdef __APPLE__ + fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len }; + // First we try to get a continous chunk of disk space + int ret = fcntl(fd, F_PREALLOCATE, &store); + if (ret == -1) { + // Maybe we are too fragmented, try to allocate non-continuous range + store.fst_flags = F_ALLOCATEALL; + ret = fcntl(fd, F_PREALLOCATE, &store); + if (ret == -1) + return -1; + } + return ftruncate(fd, len); +#else + return posix_fallocate(fd, offset, len); +#endif +} + +// Map the given address range to the provided file descriptor. +char* os::map_memory_to_dax_file(char* base, size_t size, int fd) { + assert(fd != -1, "File descriptor is not valid"); + + // allocate space for the file + if (util_posix_fallocate(fd, 0, (off_t)size) != 0) { + vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory (%s)", os::strerror(errno))); + return NULL; + } + + int prot = PROT_READ | PROT_WRITE; + int flags = MAP_SHARED; + if (base != NULL) { + flags |= MAP_FIXED; + } + char* addr = (char*)mmap(base, size, prot, flags, fd, 0); + + if (addr == MAP_FAILED || (base != NULL && addr != base)) { + if (addr != MAP_FAILED) { + if (!os::release_memory(addr, size)) { + warning("Could not release memory on unsuccessful file mapping"); + } + } + return NULL; + } + + return addr; +} + +char* os::replace_existing_mapping_with_dax_file_mapping(char* base, size_t size, int fd) { + assert(fd != -1, "File descriptor is not valid"); + assert(base != NULL, "base cannot be NULL"); + + return map_memory_to_dax_file(base, size, fd); + +} + +char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) { + + // We would want to use the complex logic in pd_attempt_reserve_memory_at(), especially in Linux. + // So we call pd_attempt_reserve_memory_at() to purely reserve mmemory + // and then replace the anonymous mapping with file mapping. + // Unfortunately for AIX, we need to pass new bool parameter to pd_attempt_reserve_memory_at() + // to indicate not to use SHM + #if defined(AIX) + char* result = pd_attempt_reserve_memory_at(bytes, addr, file_desc == -1 /*can use SHM*/); + #else + char* result = pd_attempt_reserve_memory_at(bytes, addr); + #endif + if (result != NULL && file_desc != -1) { + if (replace_existing_mapping_with_dax_file_mapping(result, bytes, file_desc) == NULL) { + vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); + } + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); + return result; + } + if (result != NULL) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + } + return result; +} + // Multiple threads can race in this code, and can remap over each other with MAP_FIXED, // so on posix, unmap the section at the start and at the end of the chunk that we mapped // rather than unmapping and remapping the whole chunk to get requested alignment. -char* os::reserve_memory_aligned(size_t size, size_t alignment) { +char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, "Alignment must be a multiple of allocation granularity (page size)"); assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); @@ -150,7 +305,21 @@ size_t extra_size = size + alignment; assert(extra_size >= size, "overflow, size is too large to allow alignment"); - char* extra_base = os::reserve_memory(extra_size, NULL, alignment); + char* extra_base; + if (file_desc != -1) { + // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because + // we need to deal with shrinking of the file space later when we release extra memory after alignment. + // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory. + // So here to call a helper function while reserve memory for us. After we have a aligned base, + // we will replace anonymous mapping with file mapping. + extra_base = reserve_mmaped_memory(extra_size, NULL); + if (extra_base != NULL) { + MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC); + } + } + else { + extra_base = os::reserve_memory(extra_size, NULL, alignment); + } if (extra_base == NULL) { return NULL; @@ -177,6 +346,13 @@ os::release_memory(extra_base + begin_offset + size, end_offset); } + if (file_desc != -1) { + // After we have an aligned address, we can replace anonymopus mapping with file mapping + if (replace_existing_mapping_with_dax_file_mapping(aligned_base, size, file_desc) == NULL) { + vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); + } + MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC); + } return aligned_base; } --- old/src/os/windows/vm/os_windows.cpp 2017-07-20 17:44:51.984854600 -0700 +++ new/src/os/windows/vm/os_windows.cpp 2017-07-20 17:44:51.570317400 -0700 @@ -3113,6 +3113,65 @@ UseLargePages = success; } +// Helper function to create a temp file in the given directory +int os::create_file_for_heap(const char* dir, size_t size) { + + const char name_template[] = "/jvmheap.XXXXXX"; + + char *fullname = (char*)_alloca(strlen(dir) + sizeof(name_template)); + (void)strcpy(fullname, dir); + (void)strcat(fullname, name_template); + os::native_path(fullname); + + char *path = _mktemp(fullname); + if (path == NULL) { + return -1; + } + + int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD); + + if (fd < 0) { + warning("Failure to create file %s for heap", path); + return -1; + } + + return fd; +} + +// if 'base' is not NULL, function will return NULL if it cannot get 'base' +// +char* os::map_memory_to_dax_file(char* base, size_t size, int fd) { + assert(fd != -1, "File descriptor is not valid"); + + HANDLE fh = (HANDLE)_get_osfhandle(fd); + HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, + (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL); + if (fileMapping == NULL) { + if (GetLastError() == ERROR_DISK_FULL) { + vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for heap")); + } else { + vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); + } + + return NULL; + } + + LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base); + + CloseHandle(fileMapping); + + return (char*)addr; +} + +char* os::replace_existing_mapping_with_dax_file_mapping(char* base, size_t size, int fd) { + assert(fd != -1, "File descriptor is not valid"); + assert(base != NULL, "base cannot be NULL"); + + release_memory(base, size); + return map_memory_to_dax_file(base, size, fd); + +} + // On win32, one cannot release just a part of reserved memory, it's an // all or nothing deal. When we split a reservation, we must break the // reservation into two reservations. @@ -3129,10 +3188,26 @@ } } +char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) { + char* result = NULL; + if (file_desc != -1) { + result = map_memory_to_dax_file(addr, bytes, file_desc); + if (result != NULL) { + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); + } + } else { + result = pd_attempt_reserve_memory_at(bytes, addr); + if (result != NULL) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + } + } + return result; +} + // Multiple threads can race in this code but it's not possible to unmap small sections of // virtual space to get requested alignment, like posix-like os's. // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. -char* os::reserve_memory_aligned(size_t size, size_t alignment) { +char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, "Alignment must be a multiple of allocation granularity (page size)"); assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); @@ -3143,16 +3218,21 @@ char* aligned_base = NULL; do { - char* extra_base = os::reserve_memory(extra_size, NULL, alignment); + char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc); if (extra_base == NULL) { return NULL; } // Do manual alignment aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); - os::release_memory(extra_base, extra_size); + if (file_desc != -1) { + os::unmap_memory(extra_base, extra_size); + } + else { + os::release_memory(extra_base, extra_size); + } - aligned_base = os::reserve_memory(size, aligned_base); + aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc); } while (aligned_base == NULL); @@ -3195,7 +3275,7 @@ char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { // Windows os::reserve_memory() fails of the requested address range is // not avilable. - return reserve_memory(bytes, requested_addr); + return reserve_memory(bytes, requested_addr, 0); } size_t os::large_page_size() { @@ -4908,7 +4988,6 @@ return base; } - // Remap a block of memory. char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, --- old/src/share/vm/memory/universe.cpp 2017-07-20 17:44:54.835190200 -0700 +++ new/src/share/vm/memory/universe.cpp 2017-07-20 17:44:54.456871700 -0700 @@ -845,7 +845,7 @@ || use_large_pages, "Wrong alignment to use large pages"); // Now create the space. - ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages); + ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, HeapDir); if (total_rs.is_reserved()) { assert((total_reserved == total_rs.size()) && ((uintptr_t)total_rs.base() % alignment == 0), --- old/src/share/vm/memory/virtualspace.cpp 2017-07-20 17:44:57.554139700 -0700 +++ new/src/share/vm/memory/virtualspace.cpp 2017-07-20 17:44:57.184239500 -0700 @@ -35,10 +35,10 @@ // Dummy constructor ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0), - _alignment(0), _special(false), _executable(false) { + _alignment(0), _special(false), _executable(false), _backing_fd(-1) { } -ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) { +ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) { bool has_preferred_page_size = preferred_page_size != 0; // Want to use large pages where possible and pad with small pages. size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1); @@ -59,19 +59,19 @@ ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, - char* requested_address) { + char* requested_address) : _backing_fd(-1) { initialize(size, alignment, large, requested_address, false); } ReservedSpace::ReservedSpace(size_t size, size_t alignment, bool large, - bool executable) { + bool executable) : _backing_fd(-1) { initialize(size, alignment, large, NULL, executable); } // Helper method. static bool failed_to_reserve_as_requested(char* base, char* requested_address, - const size_t size, bool special) + const size_t size, bool special, bool is_file_mapped= false) { if (base == requested_address || requested_address == NULL) return false; // did not fail @@ -87,8 +87,14 @@ fatal("os::release_memory_special failed"); } } else { - if (!os::release_memory(base, size)) { - fatal("os::release_memory failed"); + if (is_file_mapped) { + if (!os::unmap_memory(base, size)) { + fatal("os::release_memory failed"); + } + } else { + if (!os::release_memory(base, size)) { + fatal("os::release_memory failed"); + } } } } @@ -120,7 +126,17 @@ // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. + // If there is a backing file directory for this VirtualSpace then whether + // large pages are allocated is upto the filesystem the dir resides in. + // So we ignore the UseLargePages flag in this case. bool special = large && !os::can_commit_large_page_memory(); + if (special && _backing_fd != -1) { + special = false; + if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || + !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { + log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option."); + } + } char* base = NULL; if (special) { @@ -157,13 +173,13 @@ // important. If available space is not detected, return NULL. if (requested_address != 0) { - base = os::attempt_reserve_memory_at(size, requested_address); - if (failed_to_reserve_as_requested(base, requested_address, size, false)) { + base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd); + if (failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) { // OS ignored requested address. Try different address. base = NULL; } } else { - base = os::reserve_memory(size, NULL, alignment); + base = os::reserve_memory(size, NULL, alignment, _backing_fd); } if (base == NULL) return; @@ -171,13 +187,18 @@ // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { // Base not aligned, retry - if (!os::release_memory(base, size)) fatal("os::release_memory failed"); + if (_backing_fd != -1) { + // unmap_memory will do extra work esp. in Windows + if (!os::unmap_memory(base, size)) fatal("os::release_memory failed"); + } else { + if (!os::release_memory(base, size)) fatal("os::release_memory failed"); + } // Make sure that size is aligned size = align_size_up(size, alignment); - base = os::reserve_memory_aligned(size, alignment); + base = os::reserve_memory_aligned(size, alignment, _backing_fd); if (requested_address != 0 && - failed_to_reserve_as_requested(base, requested_address, size, false)) { + failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) { // As a result of the alignment constraints, the allocated base differs // from the requested address. Return back to the caller who can // take remedial action (like try again without a requested address). @@ -190,6 +211,10 @@ _base = base; _size = size; _alignment = alignment; + // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true + if (_backing_fd != -1) { + _special = true; + } } @@ -252,7 +277,12 @@ char *real_base = _base - _noaccess_prefix; const size_t real_size = _size + _noaccess_prefix; if (special()) { - os::release_memory_special(real_base, real_size); + if (_backing_fd != -1) { + os::unmap_memory(real_base, real_size); + } + else { + os::release_memory_special(real_base, real_size); + } } else{ os::release_memory(real_base, real_size); } @@ -313,7 +343,17 @@ // If OS doesn't support demand paging for large page memory, we need // to use reserve_memory_special() to reserve and pin the entire region. + // If there is a backing file directory for this VirtualSpace then whether + // large pages are allocated is upto the filesystem the dir resides in. + // So we ignore the UseLargePages flag in this case. bool special = large && !os::can_commit_large_page_memory(); + if (special && _backing_fd != -1) { + special = false; + if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || + !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { + log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option."); + } + } char* base = NULL; log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT @@ -350,9 +390,9 @@ // important. If available space is not detected, return NULL. if (requested_address != 0) { - base = os::attempt_reserve_memory_at(size, requested_address); + base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd); } else { - base = os::reserve_memory(size, NULL, alignment); + base = os::reserve_memory(size, NULL, alignment, _backing_fd); } } if (base == NULL) { return; } @@ -361,6 +401,10 @@ _base = base; _size = size; _alignment = alignment; + // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true + if (_backing_fd != -1) { + _special = true; + } // Check alignment constraints if ((((size_t)base) & (alignment - 1)) != 0) { @@ -556,12 +600,20 @@ } } -ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() { +ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backing_fs_for_heap) : ReservedSpace() { if (size == 0) { return; } + if (backing_fs_for_heap != NULL) { + _backing_fd = os::create_file_for_heap(backing_fs_for_heap, size); + if (_backing_fd == -1) { + vm_exit_during_initialization( + err_msg("Could not create file for Heap at location %s", backing_fs_for_heap)); + } + } + // Heap size should be aligned to alignment, too. guarantee(is_size_aligned(size, alignment), "set by caller"); @@ -585,6 +637,10 @@ if (base() > 0) { MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); } + + if (backing_fs_for_heap != NULL) { + os::close(_backing_fd); + } } // Reserve space for code segment. Same as Java heap only we mark this as --- old/src/share/vm/memory/virtualspace.hpp 2017-07-20 17:45:00.226206900 -0700 +++ new/src/share/vm/memory/virtualspace.hpp 2017-07-20 17:44:59.829911200 -0700 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,6 +37,7 @@ size_t _noaccess_prefix; size_t _alignment; bool _special; + int _backing_fd; private: bool _executable; @@ -111,7 +112,7 @@ void establish_noaccess_prefix(); public: // Constructor. Tries to find a heap that is good for compressed oops. - ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large); + ReservedHeapSpace(size_t size, size_t forced_base_alignment, bool large, const char* backingFSforHeap = NULL); // Returns the base to be used for compression, i.e. so that null can be // encoded safely and implicit null checks can work. char *compressed_oop_base() { return _base - _noaccess_prefix; } --- old/src/share/vm/runtime/arguments.cpp 2017-07-20 17:45:03.067930600 -0700 +++ new/src/share/vm/runtime/arguments.cpp 2017-07-20 17:45:02.682583500 -0700 @@ -3647,6 +3647,14 @@ return JNI_ERR; } + if (!FLAG_IS_DEFAULT(HeapDir)) { + if (!FLAG_IS_DEFAULT(UseNUMAInterleaving) || !FLAG_IS_DEFAULT(UseNUMA)) { + log_warning(arguments) ("NUMA support depends on the file system when HeapDir option is used.\n"); + } + FLAG_SET_CMDLINE(bool, UseNUMAInterleaving, false); + FLAG_SET_CMDLINE(bool, UseNUMA, false); + } + return JNI_OK; } --- old/src/share/vm/runtime/globals.hpp 2017-07-20 17:45:05.842297000 -0700 +++ new/src/share/vm/runtime/globals.hpp 2017-07-20 17:45:05.465175300 -0700 @@ -4044,7 +4044,11 @@ diagnostic(bool, CompilerDirectivesPrint, false, \ "Print compiler directives on installation.") \ diagnostic(int, CompilerDirectivesLimit, 50, \ - "Limit on number of compiler directives.") + "Limit on number of compiler directives.") \ + \ + product(ccstr, HeapDir, NULL, \ + "Path to the directoy where a temporary file will be created \ + to use as the backing store for Java Heap.") /* --- old/src/share/vm/runtime/os.cpp 2017-07-20 17:45:08.892412600 -0700 +++ new/src/share/vm/runtime/os.cpp 2017-07-20 17:45:08.516573900 -0700 @@ -1628,10 +1628,23 @@ return os::pd_create_stack_guard_pages(addr, bytes); } -char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { - char* result = pd_reserve_memory(bytes, addr, alignment_hint); - if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); +char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) { + + char* result = NULL; + + if (file_desc != -1) { + // Do your own memory mapping instead of doing + // pd_reserve_memory() followed by replace_existing_mapping_with_dax_file_mapping(), because AIX might use SHM + result = os::map_memory_to_dax_file(addr, bytes, file_desc); + if (result != NULL) { + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); + } + } + else { + result = pd_reserve_memory(bytes, addr, alignment_hint); + if (result != NULL) { + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); + } } return result; @@ -1648,14 +1661,6 @@ return result; } -char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { - char* result = pd_attempt_reserve_memory_at(bytes, addr); - if (result != NULL) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); - } - return result; -} - void os::split_reserved_memory(char *base, size_t size, size_t split, bool realloc) { pd_split_reserved_memory(base, size, split, realloc); --- old/src/share/vm/runtime/os.hpp 2017-07-20 17:45:11.645275300 -0700 +++ new/src/share/vm/runtime/os.hpp 2017-07-20 17:45:11.237418800 -0700 @@ -107,8 +107,12 @@ } static char* pd_reserve_memory(size_t bytes, char* addr = 0, - size_t alignment_hint = 0); + size_t alignment_hint = 0); + #if defined(AIX) + static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr, bool use_SHM = true); + #else static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); + #endif static void pd_split_reserved_memory(char *base, size_t size, size_t split, bool realloc); static bool pd_commit_memory(char* addr, size_t bytes, bool executable); @@ -301,11 +305,12 @@ static int vm_allocation_granularity(); static char* reserve_memory(size_t bytes, char* addr = 0, - size_t alignment_hint = 0); + size_t alignment_hint = 0, int file_desc = -1); static char* reserve_memory(size_t bytes, char* addr, size_t alignment_hint, MEMFLAGS flags); - static char* reserve_memory_aligned(size_t size, size_t alignment); - static char* attempt_reserve_memory_at(size_t bytes, char* addr); + static char* reserve_memory_aligned(size_t size, size_t alignment, int file_desc = -1); + static char* attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc = -1); + static void split_reserved_memory(char *base, size_t size, size_t split, bool realloc); static bool commit_memory(char* addr, size_t bytes, bool executable); @@ -337,6 +342,13 @@ static bool pd_create_stack_guard_pages(char* addr, size_t bytes); static bool remove_stack_guard_pages(char* addr, size_t bytes); + // Helper function to create a new file with template jvmheap.XXXXXX + static int create_file_for_heap(const char* dir, size_t size); + // map memory to the dax (direct access) file referred by fd + static char* map_memory_to_dax_file(char* base, size_t size, int fd); + // replace existing reserved memory with file mapping + static char* replace_existing_mapping_with_dax_file_mapping(char* base, size_t size, int fd); + static char* map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only = false, bool allow_exec = false);