< prev index next >

share/memory/virtualspace.cpp

Print this page
rev 1 : G1GC+POGC+NVDIMM Patch with latest comments incorporated from all.

@@ -34,14 +34,16 @@
 
 // ReservedSpace
 
 // Dummy constructor
 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
-    _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
+    _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0),
+    _alignment(0), _special(false), _executable(false), _fd_for_heap(-1), _fd_for_nvdimm(-1) {
 }
 
-ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
+ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
+    _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
   bool has_preferred_page_size = preferred_page_size != 0;
   // Want to use large pages where possible and pad with small pages.
   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
   bool large_pages = page_size != (size_t)os::vm_page_size();
   size_t alignment;

@@ -58,17 +60,19 @@
   initialize(size, alignment, large_pages, NULL, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
                              bool large,
-                             char* requested_address) : _fd_for_heap(-1) {
+                             char* requested_address) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
+                             _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
   initialize(size, alignment, large, requested_address, false);
 }
 
 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
                              bool large,
-                             bool executable) : _fd_for_heap(-1) {
+                             bool executable) : _fd_for_heap(-1), _fd_for_nvdimm(-1), 
+                             _nvdimm_base_nv(NULL), _nvdimm_size(0), _dram_size(0) {
   initialize(size, alignment, large, NULL, executable);
 }
 
 // Helper method
 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {

@@ -141,10 +145,11 @@
       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
     }
   }
 
   char* base = NULL;
+  char* nvdimm_base = NULL;
 
   if (special) {
 
     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 

@@ -182,12 +187,16 @@
       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
         // OS ignored requested address. Try different address.
         base = NULL;
       }
     } else {
+      if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
+        base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment, _fd_for_heap);
+      } else {
       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
     }
+    }
 
     if (base == NULL) return;
 
     // Check alignment constraints
     if ((((size_t)base) & (alignment - 1)) != 0) {

@@ -208,10 +217,13 @@
       }
     }
   }
   // Done
   _base = base;
+  _nvdimm_base = _base-_nvdimm_size;
+  _nvdimm_base_nv = NULL;
+  _dram_size = (size_t)size;
   _size = size;
   _alignment = alignment;
   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
   if (_fd_for_heap != -1) {
     _special = true;

@@ -223,17 +235,19 @@
                              bool special, bool executable) {
   assert((size % os::vm_allocation_granularity()) == 0,
          "size not allocation aligned");
   _base = base;
   _size = size;
+  _nvdimm_base = NULL; 
+  _nvdimm_base_nv = NULL;
+  _dram_size = (size_t)size;
   _alignment = alignment;
   _noaccess_prefix = 0;
   _special = special;
   _executable = executable;
 }
 
-
 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
                                         bool split, bool realloc) {
   assert(partition_size <= size(), "partition failed");
   if (split) {
     os::split_reserved_memory(base(), size(), partition_size, realloc);

@@ -275,20 +289,28 @@
 
 void ReservedSpace::release() {
   if (is_reserved()) {
     char *real_base = _base - _noaccess_prefix;
     const size_t real_size = _size + _noaccess_prefix;
+    // unmap nvdimm
+    if (_fd_for_nvdimm != -1) {
+      os::unmap_memory(real_base+real_size, _nvdimm_size);
+    }
     if (special()) {
       if (_fd_for_heap != -1) {
         os::unmap_memory(real_base, real_size);
       } else {
         os::release_memory_special(real_base, real_size);
       }
     } else{
       os::release_memory(real_base, real_size);
     }
     _base = NULL;
+    _nvdimm_base = NULL;
+    _nvdimm_base_nv = NULL;
+    _dram_size = 0;
+    _nvdimm_size = 0;
     _size = 0;
     _noaccess_prefix = 0;
     _alignment = 0;
     _special = false;
     _executable = false;

@@ -339,10 +361,16 @@
   if (_base != NULL) {
     // We tried before, but we didn't like the address delivered.
     release();
   }
 
+  if (_fd_for_nvdimm != -1 && UseG1GC) {
+    char* base_nv = os::reserve_memory(size, requested_address, alignment);
+    initialize_g1gc_nvdimm_dram_sizes(size, alignment);
+    _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM COMPRESSED HEAP.
+  }
+
   // If OS doesn't support demand paging for large page memory, we need
   // to use reserve_memory_special() to reserve and pin the entire region.
   // If there is a backing file directory for this space then whether
   // large pages are allocated is up to the filesystem of the backing file.
   // So we ignore the UseLargePages flag in this case.

@@ -353,10 +381,11 @@
                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
     }
   }
   char* base = NULL;
+  char* nvdimm_base = NULL;
 
   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
                              " heap of size " SIZE_FORMAT_HEX,
                              p2i(requested_address),
                              size);

@@ -388,20 +417,37 @@
     // If the memory was requested at a particular address, use
     // os::attempt_reserve_memory_at() to avoid over mapping something
     // important.  If available space is not detected, return NULL.
 
     if (requested_address != 0) {
+      if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
+        // first unmap so that OS does not keep trying.
+        os::unmap_memory(_nvdimm_base_nv, _dram_size);
+        base = os::attempt_reserve_memory_at(_dram_size, _nvdimm_base_nv);
+      } else {
       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
+      }
+    } else {
+      if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
+        // first unmap so that OS does not keep trying.
+        os::unmap_memory(_nvdimm_base_nv, _dram_size);
+        base = os::reserve_memory(_dram_size, _nvdimm_base_nv, alignment);
     } else {
       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
     }
   }
+  }
   if (base == NULL) { return; }
 
   // Done
   _base = base;
+  _nvdimm_base = _base-_nvdimm_size;
+  if (_nvdimm_base_nv != NULL && _fd_for_nvdimm != -1) {
+    _size = _dram_size;
+  } else {
   _size = size;
+  }
   _alignment = alignment;
 
   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
   if (_fd_for_heap != -1) {
     _special = true;

@@ -599,16 +645,45 @@
       initialize(size + noaccess_prefix, alignment, large, NULL, false);
     }
   }
 }
 
+void ReservedHeapSpace::initialize_g1gc_nvdimm_dram_sizes(size_t size, size_t alignment) {
+  _dram_size = (size_t)((size * G1MaxNewSizePercent)/100);
+  size_t page_sz = os::vm_page_size() -1 ;
+  _dram_size = (_dram_size + page_sz) & (~page_sz);
+  // align sizes.
+  _dram_size = align_down(_dram_size, alignment);
+  _nvdimm_size = size - _dram_size;
+  _nvdimm_size = (_nvdimm_size + page_sz) & (~page_sz);
+  _nvdimm_size = align_down(_nvdimm_size, alignment);
+}
+
 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 
   if (size == 0) {
     return;
   }
 
+  // if AllocateOldGen is used  
+  if (AllocateOldGenAt != NULL) {
+    _fd_for_nvdimm = os::create_file_for_heap(AllocateOldGenAt);
+    if (_fd_for_nvdimm == -1) {
+      vm_exit_during_initialization(
+        err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
+    }
+    if (UseParallelOldGC) {
+      // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
+      // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
+      os::allocate_file(_fd_for_nvdimm, MaxHeapSize);
+      os::set_nvdimm_fd(_fd_for_nvdimm);
+      os::set_nvdimm_present(true);
+    }
+  } else {
+    _fd_for_nvdimm = -1;
+  }
+
   if (heap_allocation_directory != NULL) {
     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
     if (_fd_for_heap == -1) {
       vm_exit_during_initialization(
         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));

@@ -616,29 +691,54 @@
   }
 
   // Heap size should be aligned to alignment, too.
   guarantee(is_aligned(size, alignment), "set by caller");
 
+  char* base_nv = NULL;
+  _nvdimm_base_nv = NULL;
+  
+  if (_fd_for_nvdimm != -1 && UseG1GC) {
+    if (!UseCompressedOops) {
+      // if compressed oops use requested address.
+      initialize_g1gc_nvdimm_dram_sizes(size, alignment);
+      base_nv = os::reserve_memory(size, NULL, alignment);
+      _nvdimm_base_nv = base_nv+_nvdimm_size; // hint for allocation address of DRAM heap
+    }
+  }
+
   if (UseCompressedOops) {
     initialize_compressed_heap(size, alignment, large);
     if (_size > size) {
       // We allocated heap with noaccess prefix.
       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
       // if we had to try at arbitrary address.
       establish_noaccess_prefix();
     }
   } else {
+    if (_fd_for_nvdimm != -1 && UseG1GC) {
+      initialize(_dram_size, alignment, large, NULL, false);
+    } else {
     initialize(size, alignment, large, NULL, false);
   }
+  }
 
   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
          "area must be distinguishable from marks for mark-sweep");
   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
          "area must be distinguishable from marks for mark-sweep");
 
   if (base() != NULL) {
     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
+    if (_fd_for_nvdimm != -1 && UseG1GC) {
+      os::set_nvdimm_present(true);
+      os::set_dram_heapbase((address)_base);
+      os::set_nvdimm_heapbase((address)_nvdimm_base);
+      os::set_nvdimm_fd(_fd_for_nvdimm);
+      _size += _nvdimm_size;
+      _base = _nvdimm_base;
+      log_info(gc, heap)("Java DRAM Heap at [%p - %p] & NVDIMM Old Gen at [%p - %p] %ld \n", _nvdimm_base+_nvdimm_size, (char*)(_nvdimm_base+_nvdimm_size+_dram_size), _nvdimm_base, (char*)(_nvdimm_base+_nvdimm_size), size);
+    }
   }
 
   if (_fd_for_heap != -1) {
     os::close(_fd_for_heap);
   }
< prev index next >