< prev index next >

src/hotspot/share/memory/virtualspace.cpp

Print this page

        

@@ -1,7 +1,7 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 only, as
  * published by the Free Software Foundation.

@@ -33,14 +33,14 @@
 
 // ReservedSpace
 
 // Dummy constructor
 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
-    _alignment(0), _special(false), _executable(false) {
+    _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
 }
 
-ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
+ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
   bool has_preferred_page_size = preferred_page_size != 0;
   // Want to use large pages where possible and pad with small pages.
   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
   bool large_pages = page_size != (size_t)os::vm_page_size();
   size_t alignment;

@@ -57,23 +57,34 @@
   initialize(size, alignment, large_pages, NULL, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
                              bool large,
-                             char* requested_address) {
+                             char* requested_address) : _fd_for_heap(-1) {
   initialize(size, alignment, large, requested_address, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
                              bool large,
-                             bool executable) {
+                             bool executable) : _fd_for_heap(-1) {
   initialize(size, alignment, large, NULL, executable);
 }
 
+// Helper method
+static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
+  if (is_file_mapped) {
+    if (!os::unmap_memory(base, size)) {
+      fatal("os::unmap_memory failed");
+    }
+  } else if (!os::release_memory(base, size)) {
+    fatal("os::release_memory failed");
+  }
+}
+
 // Helper method.
 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
-                                           const size_t size, bool special)
+                                           const size_t size, bool special, bool is_file_mapped = false)
 {
   if (base == requested_address || requested_address == NULL)
     return false; // did not fail
 
   if (base != NULL) {

@@ -85,13 +96,11 @@
     if (special) {
       if (!os::release_memory_special(base, size)) {
         fatal("os::release_memory_special failed");
       }
     } else {
-      if (!os::release_memory(base, size)) {
-        fatal("os::release_memory failed");
-      }
+      unmap_or_release_memory(base, size, is_file_mapped); 
     }
   }
   return true;
 }
 

@@ -118,11 +127,22 @@
     return;
   }
 
   // If OS doesn't support demand paging for large page memory, we need
   // to use reserve_memory_special() to reserve and pin the entire region.
+  // If there is a backing file directory for this space then whether
+  // large pages are allocated is up to the filesystem of the backing file.
+  // So we ignore the UseLargePages flag in this case.
   bool special = large && !os::can_commit_large_page_memory();
+  if (special && _fd_for_heap != -1) {
+    special = false;
+    if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
+      !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
+      log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
+    }
+  }
+
   char* base = NULL;
 
   if (special) {
 
     base = os::reserve_memory_special(size, alignment, requested_address, executable);

@@ -155,31 +175,32 @@
     // If the memory was requested at a particular address, use
     // os::attempt_reserve_memory_at() to avoid over mapping something
     // important.  If available space is not detected, return NULL.
 
     if (requested_address != 0) {
-      base = os::attempt_reserve_memory_at(size, requested_address);
-      if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
+      base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
+      if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
         // OS ignored requested address. Try different address.
         base = NULL;
       }
     } else {
-      base = os::reserve_memory(size, NULL, alignment);
+      base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
     }
 
     if (base == NULL) return;
 
     // Check alignment constraints
     if ((((size_t)base) & (alignment - 1)) != 0) {
       // Base not aligned, retry
-      if (!os::release_memory(base, size)) fatal("os::release_memory failed");
+      unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
+
       // Make sure that size is aligned
       size = align_up(size, alignment);
-      base = os::reserve_memory_aligned(size, alignment);
+      base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 
       if (requested_address != 0 &&
-          failed_to_reserve_as_requested(base, requested_address, size, false)) {
+          failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
         // As a result of the alignment constraints, the allocated base differs
         // from the requested address. Return back to the caller who can
         // take remedial action (like try again without a requested address).
         assert(_base == NULL, "should be");
         return;

@@ -188,10 +209,14 @@
   }
   // Done
   _base = base;
   _size = size;
   _alignment = alignment;
+  // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
+  if (_fd_for_heap != -1) {
+    _special = true;
+  }
 }
 
 
 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
                              bool special, bool executable) {

@@ -250,11 +275,15 @@
 void ReservedSpace::release() {
   if (is_reserved()) {
     char *real_base = _base - _noaccess_prefix;
     const size_t real_size = _size + _noaccess_prefix;
     if (special()) {
+      if (_fd_for_heap != -1) {
+        os::unmap_memory(real_base, real_size);
+      } else {
       os::release_memory_special(real_base, real_size);
+      }
     } else{
       os::release_memory(real_base, real_size);
     }
     _base = NULL;
     _size = 0;

@@ -311,11 +340,21 @@
     release();
   }
 
   // If OS doesn't support demand paging for large page memory, we need
   // to use reserve_memory_special() to reserve and pin the entire region.
+  // If there is a backing file directory for this space then whether
+  // large pages are allocated is up to the filesystem of the backing file.
+  // So we ignore the UseLargePages flag in this case.
   bool special = large && !os::can_commit_large_page_memory();
+  if (special && _fd_for_heap != -1) {
+    special = false;
+    if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
+                          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
+      log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
+    }
+  }
   char* base = NULL;
 
   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
                              " heap of size " SIZE_FORMAT_HEX,
                              p2i(requested_address),

@@ -348,22 +387,27 @@
     // If the memory was requested at a particular address, use
     // os::attempt_reserve_memory_at() to avoid over mapping something
     // important.  If available space is not detected, return NULL.
 
     if (requested_address != 0) {
-      base = os::attempt_reserve_memory_at(size, requested_address);
+      base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
     } else {
-      base = os::reserve_memory(size, NULL, alignment);
+      base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
     }
   }
   if (base == NULL) { return; }
 
   // Done
   _base = base;
   _size = size;
   _alignment = alignment;
 
+  // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
+  if (_fd_for_heap != -1) {
+    _special = true;
+  }
+
   // Check alignment constraints
   if ((((size_t)base) & (alignment - 1)) != 0) {
     // Base not aligned, retry.
     release();
   }

@@ -554,16 +598,24 @@
       initialize(size + noaccess_prefix, alignment, large, NULL, false);
     }
   }
 }
 
-ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
+ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 
   if (size == 0) {
     return;
   }
 
+  if (heap_allocation_directory != NULL) {
+    _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
+    if (_fd_for_heap == -1) {
+      vm_exit_during_initialization(
+        err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
+    }
+  }
+
   // Heap size should be aligned to alignment, too.
   guarantee(is_aligned(size, alignment), "set by caller");
 
   if (UseCompressedOops) {
     initialize_compressed_heap(size, alignment, large);

@@ -583,10 +635,14 @@
          "area must be distinguishable from marks for mark-sweep");
 
   if (base() != NULL) {
     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   }
+
+  if (_fd_for_heap != -1) {
+    os::close(_fd_for_heap);
+  }
 }
 
 // Reserve space for code segment.  Same as Java heap only we mark this as
 // executable.
 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
< prev index next >