< prev index next >

src/share/vm/memory/virtualspace.cpp

Print this page

        

@@ -118,11 +118,13 @@
     return;
   }
 
   // If OS doesn't support demand paging for large page memory, we need
   // to use reserve_memory_special() to reserve and pin the entire region.
-  bool special = large && !os::can_commit_large_page_memory();
+  // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
+  // So we ignore the UseLargePages flag in this case.
+  bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
   char* base = NULL;
 
   if (special) {
 
     base = os::reserve_memory_special(size, alignment, requested_address, executable);

@@ -188,10 +190,17 @@
   }
   // Done
   _base = base;
   _size = size;
   _alignment = alignment;
+
+  if (_backingFileDir != NULL) {
+        // At this point a virtual address range is reserved, now map this memory to a file
+    os::map_memory_to_file(base, size, _backingFileDir);
+    // mark this virtual space as _special because the physical memory is committed.
+    _special = true;
+  }
 }
 
 
 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
                              bool special, bool executable) {

@@ -311,11 +320,13 @@
     release();
   }
 
   // If OS doesn't support demand paging for large page memory, we need
   // to use reserve_memory_special() to reserve and pin the entire region.
-  bool special = large && !os::can_commit_large_page_memory();
+  // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
+  // So we ignore the UseLargePages flag in this case.
+  bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
   char* base = NULL;
 
   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
                              " heap of size " SIZE_FORMAT_HEX,
                              p2i(requested_address),

@@ -364,10 +375,19 @@
 
   // Check alignment constraints
   if ((((size_t)base) & (alignment - 1)) != 0) {
     // Base not aligned, retry.
     release();
+    return;
+  }
+  if (_backingFileDir != NULL) {
+        // At this point a virtual address range is reserved, now map this memory to a file
+    if (!os::map_memory_to_file(base, size, _backingFileDir)) {
+      vm_exit_during_initialization(err_msg("Error in mapping object heap at the given filesystem dir %s", _backingFileDir));
+    }
+    // mark this virtual space as _special because the physical memory is committed.
+    _special = true;
   }
 }
 
 void ReservedHeapSpace::try_reserve_range(char *highest_start,
                                           char *lowest_start,

@@ -554,16 +574,17 @@
       initialize(size + noaccess_prefix, alignment, large, NULL, false);
     }
   }
 }
 
-ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
+ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
 
   if (size == 0) {
     return;
   }
 
+  _backingFileDir= backingFSforHeap;
   // Heap size should be aligned to alignment, too.
   guarantee(is_size_aligned(size, alignment), "set by caller");
 
   if (UseCompressedOops) {
     initialize_compressed_heap(size, alignment, large);
< prev index next >