< prev index next >

src/os/linux/vm/os_linux.cpp

Print this page

        

@@ -3382,10 +3382,206 @@
   }
 
   return addr;
 }
 
+// Use 'fd' to reserve memory at the given address.
+// Retry 'max_tries' number of times
+// Return NULL if memory cannot be reserved at the requested address
+char* os::Linux::reserve_memory_with_backing_file_at(size_t bytes, char* requested_addr, int fd) {
+  const int max_tries = 10;
+  char* base[max_tries];
+  size_t size[max_tries];
+  const size_t gap = 0x000000;
+  int prot = PROT_READ | PROT_WRITE;
+  // Assert only that the size is a multiple of the page size, since
+  // that's all that mmap requires, and since that's all we really know
+  // about at this low abstraction level.  If we need higher alignment,
+  // we can either pass an alignment to this method or verify alignment
+  // in one of the methods further up the call chain.  See bug 5044738.
+  assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
+
+  // Repeatedly allocate blocks until the block is allocated at the
+  // right spot.
+
+  // Linux mmap allows caller to pass an address as hint; give it a try first,
+  // if kernel honors the hint then we can return immediately.
+  char* addr = (char*)mmap(requested_addr, bytes, prot, MAP_SHARED | MAP_FIXED, fd, 0);
+  if (addr == MAP_FAILED)
+    return NULL;
+  if (addr == requested_addr) {
+    return requested_addr;
+  }
+
+  if (addr != NULL) {
+    // mmap() is successful but it fails to reserve at the requested address
+    anon_munmap(addr, bytes);
+  }
+
+  int i;
+  for (i = 0; i < max_tries; ++i) {
+    base[i] = (char*)mmap(NULL, bytes, prot, MAP_SHARED, fd, 0);
+    
+    if (base[i] != NULL) {
+      // Is this the block we wanted?
+      if (base[i] == requested_addr) {
+        size[i] = bytes;
+        break;
+      }
+
+      // Does this overlap the block we wanted? Give back the overlapped
+      // parts and try again.
+
+      ptrdiff_t top_overlap = requested_addr + (bytes + gap) - base[i];
+      if (top_overlap >= 0 && (size_t)top_overlap < bytes) {
+        munmap(base[i], top_overlap);
+        base[i] += top_overlap;
+        size[i] = bytes - top_overlap;
+      }
+      else {
+        ptrdiff_t bottom_overlap = base[i] + bytes - requested_addr;
+        if (bottom_overlap >= 0 && (size_t)bottom_overlap < bytes) {
+          munmap(requested_addr, bottom_overlap);
+          size[i] = bytes - bottom_overlap;
+        }
+        else {
+          size[i] = bytes;
+        }
+      }
+    }
+  }
+
+  // Give back the unused reserved pieces.
+
+  for (int j = 0; j < i; ++j) {
+    if (base[j] != NULL) {
+      munmap(base[j], size[j]);
+    }
+  }
+
+  if (i < max_tries) {
+    return requested_addr;
+  }
+  else {
+    return NULL;
+  }
+}
+
+// Use 'fd' to reserve memory with the given alignment.
+// First attempt is made with the optimistic assumption that the OS will return a aligned pointer
+// For second attempt, we manually align the base pointer
+char* os::Linux::reserve_memory_with_backing_file_aligned(size_t size, size_t alignment, int fd) {
+
+  int prot = PROT_READ | PROT_WRITE;
+
+  // Optimistically assume that the OSes returns an aligned base pointer.
+  char* addr = (char*)::mmap(NULL, size, prot, MAP_SHARED, fd, 0);
+
+  if (addr == MAP_FAILED)
+    return NULL;
+
+  if ((((size_t)addr) & (alignment - 1)) == 0)
+    return addr;
+
+  munmap(addr, size);
+  size = align_size_up(size, alignment);
+
+  assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
+    "Alignment must be a multiple of allocation granularity (page size)");
+  assert((size & (alignment - 1)) == 0, "size must be 'alignment' aligned");
+
+  size_t extra_size = size + alignment;
+  assert(extra_size >= size, "overflow, size is too large to allow alignment");
+
+  char* extra_base = (char*)::mmap(NULL, extra_size, prot, MAP_SHARED, fd, 0);
+
+  if (extra_base == MAP_FAILED) {
+    return NULL;
+  }
+
+  // Do manual alignment
+  char* aligned_base = (char*)align_size_up((uintptr_t)extra_base, alignment);
+
+  // [  |                                       |  ]
+  // ^ extra_base
+  //    ^ extra_base + begin_offset == aligned_base
+  //     extra_base + begin_offset + size       ^
+  //                       extra_base + extra_size ^
+  // |<>| == begin_offset
+  //                              end_offset == |<>|
+  size_t begin_offset = aligned_base - extra_base;
+  size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
+
+  if (begin_offset > 0) {
+    munmap(extra_base, begin_offset);
+  }
+
+  if (end_offset > 0) {
+    munmap(extra_base + begin_offset + size, end_offset);
+  }
+
+  return aligned_base;
+  
+
+}
+
+// Reserve memory by creating a temporary file in the provided directory and using it for mmap() call
+char* os::Linux::reserve_memory_with_backing_file(size_t bytes, char* requested_addr,
+                                                  size_t alignment, const char* backingFileDir) {
+
+  int prot = PROT_READ | PROT_WRITE;
+  char* addr = NULL;
+  // Optimistically assume that the OSes returns an aligned base pointer.
+  int fd = create_tmpfile(backingFileDir, bytes, false);
+  if (fd == -1) {
+    vm_exit_during_initialization(err_msg("Could not create temporary file in %s for object heap", backingFileDir));
+    return NULL;
+  }
+  if (requested_addr!=0)
+    addr = reserve_memory_with_backing_file_at(bytes, requested_addr, fd);
+  else
+    addr = reserve_memory_with_backing_file_aligned(bytes, alignment, fd);
+
+  (void)close(fd);
+
+  return addr;
+
+}
+
+// Helper function to create a temp file in the given directory
+int os::Linux::create_tmpfile(const char* dir, size_t size, bool exec) {
+
+  static char name_template[] = "/jvmheap.XXXXXX";
+
+  char fullname[strlen(dir) + sizeof(name_template)];
+  (void)strcpy(fullname, dir);
+  (void)strcat(fullname, name_template);
+
+  sigset_t set, oldset;
+  sigfillset(&set);
+  (void)sigprocmask(SIG_BLOCK, &set, &oldset);
+
+  mode_t new_mask = exec ? (S_IRUSR | S_IWUSR | S_IXUSR) : (S_IRUSR | S_IWUSR);
+  mode_t prev_umask = umask(new_mask);
+  int fd = mkstemp(fullname);
+  umask(prev_umask);
+  if (fd < 0) {
+    warning("Could not create file for heap");
+    return -1;
+  }
+
+  (void)unlink(fullname);
+  (void)sigprocmask(SIG_SETMASK, &oldset, NULL);
+
+  if ((errno = posix_fallocate(fd, 0, (off_t)size)) != 0) {
+    warning("Could not allocate sufficient disk space for heap");
+    return -1;
+  }
+
+  return fd;
+}
+
 static void warn_on_large_pages_failure(char* req_addr, size_t bytes,
                                         int error) {
   assert(error == ENOMEM, "Only expect to fail if no memory is available");
 
   bool warn_on_failure = UseLargePages &&
< prev index next >