< prev index next >
src/os/windows/vm/os_windows.cpp
Print this page
*** 3111,3120 ****
--- 3111,3181 ----
cleanup_after_large_page_init();
UseLargePages = success;
}
+ // Helper function to create a temp file in the given directory
+ int os::create_file_for_heap(const char* dir, size_t size) {
+
+ const char name_template[] = "/jvmheap.XXXXXX";
+
+ char *fullname = (char*)alloca(strlen(dir) + sizeof(name_template));
+ (void)strcpy(fullname, dir);
+ (void)strcat(fullname, name_template);
+ os::native_path(fullname);
+
+ char *path = _mktemp(fullname);
+ if (path == NULL) {
+ return -1;
+ }
+
+ int fd = _open(path, O_RDWR | O_CREAT | O_EXCL, S_IWRITE | S_IREAD);
+
+ if (fd < 0) {
+ warning("Failure to create file %s for heap", path);
+ return -1;
+ }
+
+ // Delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted
+ _unlink(fullname);
+
+ return fd;
+ }
+
+ // if 'base' is not NULL, function will return NULL if it cannot get 'base'
+ //
+ char* os::map_memory_to_file(char* base, size_t size, int fd) {
+ assert(fd != -1, "File descriptor is not valid");
+
+ HANDLE fh = (HANDLE)_get_osfhandle(fd);
+ HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
+ (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
+ if (fileMapping == NULL) {
+ if (GetLastError() == ERROR_DISK_FULL) {
+ vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for heap"));
+ } else {
+ vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
+ }
+
+ return NULL;
+ }
+
+ LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
+
+ CloseHandle(fileMapping);
+ if (addr == NULL || (base != NULL && addr != base)) {
+ if (addr != NULL) {
+ if(!pd_unmap_memory((char*)addr, size)) {
+ warning("Could not release memory on unsuccessful file mapping");
+ }
+ }
+ return NULL;
+ }
+
+ return (char*)addr;
+ }
+
// On win32, one cannot release just a part of reserved memory, it's an
// all or nothing deal. When we split a reservation, we must break the
// reservation into two reservations.
void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
bool realloc) {
*** 3130,3176 ****
}
// Multiple threads can race in this code but it's not possible to unmap small sections of
// virtual space to get requested alignment, like posix-like os's.
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
! char* os::reserve_memory_aligned(size_t size, size_t alignment) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
size_t extra_size = size + alignment;
assert(extra_size >= size, "overflow, size is too large to allow alignment");
char* aligned_base = NULL;
do {
! char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
if (extra_base == NULL) {
return NULL;
}
// Do manual alignment
aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
os::release_memory(extra_base, extra_size);
! aligned_base = os::reserve_memory(size, aligned_base);
} while (aligned_base == NULL);
return aligned_base;
}
! char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
assert((size_t)addr % os::vm_allocation_granularity() == 0,
"reserve alignment");
assert(bytes % os::vm_page_size() == 0, "reserve page size");
char* res;
// note that if UseLargePages is on, all the areas that require interleaving
// will go thru reserve_memory_special rather than thru here.
bool use_individual = (UseNUMAInterleaving && !UseLargePages);
if (!use_individual) {
res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
} else {
elapsedTimer reserveTimer;
if (Verbose && PrintMiscellaneous) reserveTimer.start();
// in numa interleaving, we have to allocate pages individually
// (well really chunks of NUMAInterleaveGranularity size)
--- 3191,3247 ----
}
// Multiple threads can race in this code but it's not possible to unmap small sections of
// virtual space to get requested alignment, like posix-like os's.
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
! char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
size_t extra_size = size + alignment;
assert(extra_size >= size, "overflow, size is too large to allow alignment");
char* aligned_base = NULL;
do {
! char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
if (extra_base == NULL) {
return NULL;
}
// Do manual alignment
aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
+ if (file_desc != -1) {
+ os::unmap_memory(extra_base, extra_size);
+ }
+ else {
os::release_memory(extra_base, extra_size);
+ }
! aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
} while (aligned_base == NULL);
return aligned_base;
}
! char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
assert((size_t)addr % os::vm_allocation_granularity() == 0,
"reserve alignment");
assert(bytes % os::vm_page_size() == 0, "reserve page size");
char* res;
// note that if UseLargePages is on, all the areas that require interleaving
// will go thru reserve_memory_special rather than thru here.
bool use_individual = (UseNUMAInterleaving && !UseLargePages);
+ assert(file_desc == -1 || !use_individual, "NUMA allocation not supported when HeapDir is used");
if (!use_individual) {
+ if (file_desc != -1) {
+ res = map_memory_to_file(addr, bytes, file_desc);
+ } else {
res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
+ }
} else {
elapsedTimer reserveTimer;
if (Verbose && PrintMiscellaneous) reserveTimer.start();
// in numa interleaving, we have to allocate pages individually
// (well really chunks of NUMAInterleaveGranularity size)
*** 3190,3203 ****
return res;
}
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
! char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
// Windows os::reserve_memory() fails of the requested address range is
// not avilable.
! return reserve_memory(bytes, requested_addr);
}
size_t os::large_page_size() {
return _large_page_size;
}
--- 3261,3274 ----
return res;
}
// Reserve memory at an arbitrary address, only if that area is
// available (and not reserved for something else).
! char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
// Windows os::reserve_memory() fails of the requested address range is
// not avilable.
! return reserve_memory(bytes, requested_addr, 0, file_desc);
}
size_t os::large_page_size() {
return _large_page_size;
}
*** 4906,4916 ****
}
return base;
}
-
// Remap a block of memory.
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
bool allow_exec) {
// This OS does not allow existing memory maps to be remapped so we
--- 4977,4986 ----
< prev index next >