< prev index next >
src/share/vm/memory/virtualspace.cpp
Print this page
@@ -119,11 +119,13 @@
return;
}
// If OS doesn't support demand paging for large page memory, we need
// to use reserve_memory_special() to reserve and pin the entire region.
- bool special = large && !os::can_commit_large_page_memory();
+ // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
+ // So we ignore the UseLargePages flag in this case.
+ bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
char* base = NULL;
if (special) {
base = os::reserve_memory_special(size, alignment, requested_address, executable);
@@ -150,18 +152,31 @@
}
}
}
if (base == NULL) {
+
+ // If '_backingFileDir' variable is not null, the memory should be backed by a file.
+ // The code path taken in the 'if' block below is very similar to the 'else' block with only different being that it uses a file descriptor (fd) for mmap() calls.
+ // reserve_memory_with_backing_file() encapsulates the different cases which are handled in the 'else' block
+ if (_backingFileDir != NULL) {
+ base = os::reserve_memory_with_backing_file(size, requested_address, alignment, _backingFileDir);
+
+ if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) {
+ // OS ignored requested address. Try different address.
+ base = NULL;
+ return;
+ }
+ }
+ else {
// Optimistically assume that the OSes returns an aligned base pointer.
// When reserving a large address range, most OSes seem to align to at
// least 64K.
// If the memory was requested at a particular address, use
// os::attempt_reserve_memory_at() to avoid over mapping something
// important. If available space is not detected, return NULL.
-
if (requested_address != 0) {
base = os::attempt_reserve_memory_at(size, requested_address);
if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
// OS ignored requested address. Try different address.
base = NULL;
@@ -188,10 +203,11 @@
assert(_base == NULL, "should be");
return;
}
}
}
+ }
// Done
_base = base;
_size = size;
_alignment = alignment;
}
@@ -316,11 +332,11 @@
release();
}
// If OS doesn't support demand paging for large page memory, we need
// to use reserve_memory_special() to reserve and pin the entire region.
- bool special = large && !os::can_commit_large_page_memory();
+ bool special = _backingFileDir == NULL && large && !os::can_commit_large_page_memory();
char* base = NULL;
if (PrintCompressedOopsMode && Verbose) {
tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " SIZE_FORMAT_HEX ".\n",
p2i(requested_address), size);
@@ -347,10 +363,24 @@
tty->cr();
tty->print_cr("Reserve regular memory without large pages.");
}
}
+ // If '_backingFileDir' variable is not null, the memory should be backed by a file.
+ // The code path taken in the 'if' block below is very similar to the 'else' block with only different being that it uses a file descriptor (fd) for mmap() calls.
+ // reserve_memory_with_backing_file() encapsulates the two cases which are handled in the 'else' block
+ if (_backingFileDir != NULL) {
+ base = os::reserve_memory_with_backing_file(size, requested_address, alignment, _backingFileDir);
+ if (requested_address != 0 && failed_to_reserve_as_requested(base, requested_address, size, false)) {
+ // OS ignored requested address. Try different address.
+ base = NULL;
+ return;
+ }
+ _special = true;
+ }
+ else {
+
// Optimistically assume that the OSes returns an aligned base pointer.
// When reserving a large address range, most OSes seem to align to at
// least 64K.
// If the memory was requested at a particular address, use
@@ -361,10 +391,11 @@
base = os::attempt_reserve_memory_at(size, requested_address);
} else {
base = os::reserve_memory(size, NULL, alignment);
}
}
+ }
if (base == NULL) { return; }
// Done
_base = base;
_size = size;
@@ -564,16 +595,16 @@
initialize(size + noaccess_prefix, alignment, large, NULL, false);
}
}
}
-ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
+ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
if (size == 0) {
return;
}
-
+ _backingFileDir= backingFSforHeap;
// Heap size should be aligned to alignment, too.
guarantee(is_size_aligned(size, alignment), "set by caller");
if (UseCompressedOops) {
initialize_compressed_heap(size, alignment, large);
< prev index next >