< prev index next >

src/os/posix/vm/os_posix.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 
  25 #include "utilities/globalDefinitions.hpp"
  26 #include "prims/jvm.h"
  27 #include "semaphore_posix.hpp"
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/interfaceSupport.hpp"
  30 #include "runtime/os.hpp"
  31 #include "utilities/macros.hpp"
  32 #include "utilities/vmError.hpp"
  33 
  34 #include <signal.h>
  35 #include <unistd.h>
  36 #include <sys/resource.h>
  37 #include <sys/utsname.h>
  38 #include <pthread.h>
  39 #include <semaphore.h>
  40 #include <signal.h>

  41 
  42 // Todo: provide a os::get_max_process_id() or similar. Number of processes
  43 // may have been configured, can be read more accurately from proc fs etc.
  44 #ifndef MAX_PID
  45 #define MAX_PID INT_MAX
  46 #endif
  47 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
  48 
  49 // Check core dump limit and report possible place where core can be found
  50 void os::check_dump_limit(char* buffer, size_t bufferSize) {
  51   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
  52     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
  53     VMError::record_coredump_status(buffer, false);
  54     return;
  55   }
  56 
  57   int n;
  58   struct rlimit rlim;
  59   bool success;
  60 


 122 
 123 bool os::unsetenv(const char* name) {
 124   assert(name != NULL, "Null pointer");
 125   return (::unsetenv(name) == 0);
 126 }
 127 
 128 int os::get_last_error() {
 129   return errno;
 130 }
 131 
 132 bool os::is_debugger_attached() {
 133   // not implemented
 134   return false;
 135 }
 136 
 137 void os::wait_for_keypress_at_exit(void) {
 138   // don't do anything on posix platforms
 139   return;
 140 }
 141 


























































































































































 142 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
 143 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
 144 // rather than unmapping and remapping the whole chunk to get requested alignment.
 145 char* os::reserve_memory_aligned(size_t size, size_t alignment) {
 146   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 147       "Alignment must be a multiple of allocation granularity (page size)");
 148   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 149 
 150   size_t extra_size = size + alignment;
 151   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 152 
 153   char* extra_base = os::reserve_memory(extra_size, NULL, alignment);














 154 
 155   if (extra_base == NULL) {
 156     return NULL;
 157   }
 158 
 159   // Do manual alignment
 160   char* aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
 161 
 162   // [  |                                       |  ]
 163   // ^ extra_base
 164   //    ^ extra_base + begin_offset == aligned_base
 165   //     extra_base + begin_offset + size       ^
 166   //                       extra_base + extra_size ^
 167   // |<>| == begin_offset
 168   //                              end_offset == |<>|
 169   size_t begin_offset = aligned_base - extra_base;
 170   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 171 
 172   if (begin_offset > 0) {
 173       os::release_memory(extra_base, begin_offset);
 174   }
 175 
 176   if (end_offset > 0) {
 177       os::release_memory(extra_base + begin_offset + size, end_offset);
 178   }
 179 







 180   return aligned_base;
 181 }
 182 
 183 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
 184     return vsnprintf(buf, len, fmt, args);
 185 }
 186 
 187 int os::get_fileno(FILE* fp) {
 188   return NOT_AIX(::)fileno(fp);
 189 }
 190 
 191 struct tm* os::gmtime_pd(const time_t* clock, struct tm*  res) {
 192   return gmtime_r(clock, res);
 193 }
 194 
 195 void os::Posix::print_load_average(outputStream* st) {
 196   st->print("load average:");
 197   double loadavg[3];
 198   os::loadavg(loadavg, 3);
 199   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);




  21  * questions.
  22  *
  23  */
  24 
  25 #include "utilities/globalDefinitions.hpp"
  26 #include "prims/jvm.h"
  27 #include "semaphore_posix.hpp"
  28 #include "runtime/frame.inline.hpp"
  29 #include "runtime/interfaceSupport.hpp"
  30 #include "runtime/os.hpp"
  31 #include "utilities/macros.hpp"
  32 #include "utilities/vmError.hpp"
  33 
  34 #include <signal.h>
  35 #include <unistd.h>
  36 #include <sys/resource.h>
  37 #include <sys/utsname.h>
  38 #include <pthread.h>
  39 #include <semaphore.h>
  40 #include <signal.h>
  41 #include <sys/mman.h>
  42 
  43 // Todo: provide a os::get_max_process_id() or similar. Number of processes
  44 // may have been configured, can be read more accurately from proc fs etc.
  45 #ifndef MAX_PID
  46 #define MAX_PID INT_MAX
  47 #endif
  48 #define IS_VALID_PID(p) (p > 0 && p < MAX_PID)
  49 
  50 // Check core dump limit and report possible place where core can be found
  51 void os::check_dump_limit(char* buffer, size_t bufferSize) {
  52   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
  53     jio_snprintf(buffer, bufferSize, "CreateCoredumpOnCrash is disabled from command line");
  54     VMError::record_coredump_status(buffer, false);
  55     return;
  56   }
  57 
  58   int n;
  59   struct rlimit rlim;
  60   bool success;
  61 


 123 
 124 bool os::unsetenv(const char* name) {
 125   assert(name != NULL, "Null pointer");
 126   return (::unsetenv(name) == 0);
 127 }
 128 
 129 int os::get_last_error() {
 130   return errno;
 131 }
 132 
 133 bool os::is_debugger_attached() {
 134   // not implemented
 135   return false;
 136 }
 137 
 138 void os::wait_for_keypress_at_exit(void) {
 139   // don't do anything on posix platforms
 140   return;
 141 }
 142 
 143 // Helper function to create a temp file in the given directory.
 144 int os::create_file_for_heap(const char* dir, size_t size) {
 145 
 146   const char name_template[] = "/jvmheap.XXXXXX";
 147 
 148   char *fullname = (char*)::malloc(strlen(dir) + sizeof(name_template));
 149   if (fullname == NULL) {
 150     vm_exit_during_initialization(err_msg("malloc failed"));
 151     return -1;
 152   }
 153   (void)strcpy(fullname, dir);
 154   (void)strcat(fullname, name_template);
 155 
 156   sigset_t set, oldset;
 157   int ret = sigfillset(&set);
 158   assert(ret == 0, "sigfillset error");
 159 
 160   // block all signals while we do the file operation.
 161   ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
 162   assert(ret == 0, "pthread_sigmask error");
 163 
 164   // set the file creation mask.
 165   mode_t file_mode = S_IRUSR | S_IWUSR;
 166 
 167   // create a new file.
 168   int fd = mkstemp(fullname);
 169 
 170   if (fd < 0) {
 171     // reset the signal mask.
 172     ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
 173     assert(ret == 0, "pthread_sigmask error");
 174     ::free(fullname);
 175     return -1;
 176   }
 177 
 178   // change file permissions; mkstemp creates file with permissions 0600 (glibc versions after 2.06) or 0666 (2.06 and earlier versions)
 179   ret = fchmod(fd, file_mode);
 180   assert(ret == 0, "fchmod error");
 181 
 182   // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
 183   ret = unlink(fullname);
 184   assert(ret == 0, "unlink error");
 185 
 186   // reset the signal mask.
 187   ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
 188   assert(ret == 0, "pthread_sigmask error");
 189 
 190   ::free(fullname);
 191   return fd;
 192 }
 193 
 194 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
 195   char * addr;
 196   int flags;
 197 
 198   flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
 199   if (requested_addr != NULL) {
 200     assert((uintptr_t)requested_addr % os::Linux::page_size() == 0, "unaligned address");
 201     flags |= MAP_FIXED;
 202   }
 203 
 204   // Map reserved/uncommitted pages PROT_NONE so we fail early if we
 205   // touch an uncommitted page. Otherwise, the read/write might
 206   // succeed if we have enough swap space to back the physical page.
 207   addr = (char*)::mmap(requested_addr, bytes, PROT_NONE,
 208     flags, -1, 0);
 209 
 210   if (addr != MAP_FAILED) {
 211     MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC);
 212     return addr;
 213   }
 214   return NULL;
 215 }
 216 
 217 static int util_posix_fallocate(int fd, off_t offset, off_t len) {
 218 #ifdef __APPLE__
 219   fstore_t store = { F_ALLOCATECONTIG, F_PEOFPOSMODE, 0, len };
 220   // First we try to get a continous chunk of disk space
 221   int ret = fcntl(fd, F_PREALLOCATE, &store);
 222   if (ret == -1) {
 223     // Maybe we are too fragmented, try to allocate non-continuous range
 224     store.fst_flags = F_ALLOCATEALL;
 225     ret = fcntl(fd, F_PREALLOCATE, &store);
 226     if (ret == -1)
 227       return -1;
 228   }
 229   return ftruncate(fd, len);
 230 #else
 231   return posix_fallocate(fd, offset, len);
 232 #endif
 233 }
 234 
 235 // Map the given address range to the provided file descriptor.
 236 char* os::map_memory_to_dax_file(char* base, size_t size, int fd) {
 237   assert(fd != -1, "File descriptor is not valid");
 238 
 239   // allocate space for the file
 240   if (util_posix_fallocate(fd, 0, (off_t)size) != 0) {
 241     vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory (%s)", os::strerror(errno)));
 242     return NULL;
 243   }
 244 
 245   int prot = PROT_READ | PROT_WRITE;
 246   int flags = MAP_SHARED;
 247   if (base != NULL) {
 248     flags |= MAP_FIXED;
 249   }
 250   char* addr = (char*)mmap(base, size, prot, flags, fd, 0);
 251 
 252   if (addr == MAP_FAILED || (base != NULL && addr != base)) {
 253     if (addr != MAP_FAILED) {
 254       if (!os::release_memory(addr, size)) {
 255         warning("Could not release memory on unsuccessful file mapping");
 256       }
 257     }
 258     return NULL;
 259   }
 260 
 261   return addr;
 262 }
 263 
 264 char* os::replace_existing_mapping_with_dax_file_mapping(char* base, size_t size, int fd) {
 265   assert(fd != -1, "File descriptor is not valid");
 266   assert(base != NULL, "base cannot be NULL");
 267 
 268   return map_memory_to_dax_file(base, size, fd);
 269 
 270 }
 271 
 272 char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) {
 273 
 274   // We would want to use the complex logic in pd_attempt_reserve_memory_at(), especially in Linux.
 275   // So we call pd_attempt_reserve_memory_at() to purely reserve mmemory
 276   // and then replace the anonymous mapping with file mapping.
 277   // Unfortunately for AIX, we need to pass new bool parameter to pd_attempt_reserve_memory_at()
 278   // to indicate not to use SHM
 279   #if defined(AIX)
 280     char* result = pd_attempt_reserve_memory_at(bytes, addr, file_desc == -1 /*can use SHM*/);
 281   #else
 282     char* result = pd_attempt_reserve_memory_at(bytes, addr);
 283   #endif
 284   if (result != NULL && file_desc != -1) {
 285     if (replace_existing_mapping_with_dax_file_mapping(result, bytes, file_desc) == NULL) {
 286       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
 287     }
 288     MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
 289     return result;
 290   }
 291   if (result != NULL) {
 292     MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
 293   }
 294   return result;
 295 }
 296 
 297 // Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
 298 // so on posix, unmap the section at the start and at the end of the chunk that we mapped
 299 // rather than unmapping and remapping the whole chunk to get requested alignment.
 300 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
 301   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
 302       "Alignment must be a multiple of allocation granularity (page size)");
 303   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
 304 
 305   size_t extra_size = size + alignment;
 306   assert(extra_size >= size, "overflow, size is too large to allow alignment");
 307 
 308   char* extra_base;
 309   if (file_desc != -1) {
 310     // For file mapping, we do not call os:reserve_memory(extra_size, NULL, alignment, file_desc) because
 311     // we need to deal with shrinking of the file space later when we release extra memory after alignment.
 312     // We also cannot called os:reserve_memory() with file_desc set to -1 because on aix we might get SHM memory.
 313     // So here to call a helper function while reserve memory for us. After we have a aligned base,
 314     // we will replace anonymous mapping with file mapping.
 315     extra_base = reserve_mmaped_memory(extra_size, NULL);
 316     if (extra_base != NULL) {
 317       MemTracker::record_virtual_memory_reserve((address)extra_base, extra_size, CALLER_PC);
 318     }
 319   }
 320   else {
 321     extra_base = os::reserve_memory(extra_size, NULL, alignment);
 322   }
 323 
 324   if (extra_base == NULL) {
 325     return NULL;
 326   }
 327 
 328   // Do manual alignment
 329   char* aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
 330 
 331   // [  |                                       |  ]
 332   // ^ extra_base
 333   //    ^ extra_base + begin_offset == aligned_base
 334   //     extra_base + begin_offset + size       ^
 335   //                       extra_base + extra_size ^
 336   // |<>| == begin_offset
 337   //                              end_offset == |<>|
 338   size_t begin_offset = aligned_base - extra_base;
 339   size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
 340 
 341   if (begin_offset > 0) {
 342       os::release_memory(extra_base, begin_offset);
 343   }
 344 
 345   if (end_offset > 0) {
 346       os::release_memory(extra_base + begin_offset + size, end_offset);
 347   }
 348 
 349   if (file_desc != -1) {
 350     // After we have an aligned address, we can replace anonymopus mapping with file mapping
 351     if (replace_existing_mapping_with_dax_file_mapping(aligned_base, size, file_desc) == NULL) {
 352       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
 353     }
 354     MemTracker::record_virtual_memory_commit((address)aligned_base, size, CALLER_PC);
 355   }
 356   return aligned_base;
 357 }
 358 
 359 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
 360     return vsnprintf(buf, len, fmt, args);
 361 }
 362 
 363 int os::get_fileno(FILE* fp) {
 364   return NOT_AIX(::)fileno(fp);
 365 }
 366 
 367 struct tm* os::gmtime_pd(const time_t* clock, struct tm*  res) {
 368   return gmtime_r(clock, res);
 369 }
 370 
 371 void os::Posix::print_load_average(outputStream* st) {
 372   st->print("load average:");
 373   double loadavg[3];
 374   os::loadavg(loadavg, 3);
 375   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);


< prev index next >