src/os/linux/vm/os_linux.cpp

Print this page
rev 2675 : merge with latest default trunk


2485     if (rv != (off_t)-1) {
2486       if (::write(fd, "", 1) == 1) {
2487         mmap(base, size,
2488              PROT_READ|PROT_WRITE|PROT_EXEC,
2489              MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2490       }
2491     }
2492     ::close(fd);
2493     unlink(buf);
2494   }
2495 }
2496 
2497 // NOTE: Linux kernel does not really reserve the pages for us.
2498 //       All it does is to check if there are enough free pages
2499 //       left at the time of mmap(). This could be a potential
2500 //       problem.
2501 bool os::commit_memory(char* addr, size_t size, bool exec) {
2502   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2503   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2504                                    MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);



2505   return res != (uintptr_t) MAP_FAILED;
2506 }
2507 
2508 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2509 #ifndef MAP_HUGETLB
2510 #define MAP_HUGETLB 0x40000
2511 #endif
2512 
2513 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2514 #ifndef MADV_HUGEPAGE
2515 #define MADV_HUGEPAGE 14
2516 #endif
2517 
2518 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
2519                        bool exec) {
2520   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2521     int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2522     uintptr_t res =
2523       (uintptr_t) ::mmap(addr, size, prot,
2524                          MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
2525                          -1, 0);



2526     return res != (uintptr_t) MAP_FAILED;
2527   }
2528 
2529   return commit_memory(addr, size, exec);
2530 }
2531 
2532 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2533   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2534     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2535     // be supported or the memory may already be backed by huge pages.
2536     ::madvise(addr, bytes, MADV_HUGEPAGE);
2537   }
2538 }
2539 
2540 void os::free_memory(char *addr, size_t bytes) {
2541   commit_memory(addr, bytes, false);
2542 }
2543 
2544 void os::numa_make_global(char *addr, size_t bytes) {
2545   Linux::numa_interleave_memory(addr, bytes);


3098   }
3099 
3100   // attach to the region
3101   addr = (char*)shmat(shmid, req_addr, 0);
3102   int err = errno;
3103 
3104   // Remove shmid. If shmat() is successful, the actual shared memory segment
3105   // will be deleted when it's detached by shmdt() or when the process
3106   // terminates. If shmat() is not successful this will remove the shared
3107   // segment immediately.
3108   shmctl(shmid, IPC_RMID, NULL);
3109 
3110   if ((intptr_t)addr == -1) {
3111      if (warn_on_failure) {
3112        jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3113        warning(msg);
3114      }
3115      return NULL;
3116   }
3117 




3118   return addr;
3119 }
3120 
3121 bool os::release_memory_special(char* base, size_t bytes) {
3122   // detaching the SHM segment will also delete it, see reserve_memory_special()
3123   int rslt = shmdt(base);
3124   return rslt == 0;
3125 }
3126 
3127 size_t os::large_page_size() {
3128   return _large_page_size;
3129 }
3130 
3131 // HugeTLBFS allows application to commit large page memory on demand;
3132 // with SysV SHM the entire memory region must be allocated as shared
3133 // memory.
3134 bool os::can_commit_large_page_memory() {
3135   return UseHugeTLBFS;
3136 }
3137 




2485     if (rv != (off_t)-1) {
2486       if (::write(fd, "", 1) == 1) {
2487         mmap(base, size,
2488              PROT_READ|PROT_WRITE|PROT_EXEC,
2489              MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
2490       }
2491     }
2492     ::close(fd);
2493     unlink(buf);
2494   }
2495 }
2496 
2497 // NOTE: Linux kernel does not really reserve the pages for us.
2498 //       All it does is to check if there are enough free pages
2499 //       left at the time of mmap(). This could be a potential
2500 //       problem.
2501 bool os::commit_memory(char* addr, size_t size, bool exec) {
2502   int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2503   uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
2504                                    MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
2505   if (UseNUMAInterleaving && (res != (uintptr_t) MAP_FAILED)) {
2506     numa_make_global(addr, size);
2507   }
2508   return res != (uintptr_t) MAP_FAILED;
2509 }
2510 
2511 // Define MAP_HUGETLB here so we can build HotSpot on old systems.
2512 #ifndef MAP_HUGETLB
2513 #define MAP_HUGETLB 0x40000
2514 #endif
2515 
2516 // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
2517 #ifndef MADV_HUGEPAGE
2518 #define MADV_HUGEPAGE 14
2519 #endif
2520 
2521 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
2522                        bool exec) {
2523   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2524     int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
2525     uintptr_t res =
2526       (uintptr_t) ::mmap(addr, size, prot,
2527                          MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
2528                          -1, 0);
2529     if (UseNUMAInterleaving && (res != (uintptr_t) MAP_FAILED)) {
2530       numa_make_global(addr, size);
2531     }
2532     return res != (uintptr_t) MAP_FAILED;
2533   }
2534 
2535   return commit_memory(addr, size, exec);
2536 }
2537 
2538 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2539   if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
2540     // We don't check the return value: madvise(MADV_HUGEPAGE) may not
2541     // be supported or the memory may already be backed by huge pages.
2542     ::madvise(addr, bytes, MADV_HUGEPAGE);
2543   }
2544 }
2545 
2546 void os::free_memory(char *addr, size_t bytes) {
2547   commit_memory(addr, bytes, false);
2548 }
2549 
2550 void os::numa_make_global(char *addr, size_t bytes) {
2551   Linux::numa_interleave_memory(addr, bytes);


3104   }
3105 
3106   // attach to the region
3107   addr = (char*)shmat(shmid, req_addr, 0);
3108   int err = errno;
3109 
3110   // Remove shmid. If shmat() is successful, the actual shared memory segment
3111   // will be deleted when it's detached by shmdt() or when the process
3112   // terminates. If shmat() is not successful this will remove the shared
3113   // segment immediately.
3114   shmctl(shmid, IPC_RMID, NULL);
3115 
3116   if ((intptr_t)addr == -1) {
3117      if (warn_on_failure) {
3118        jio_snprintf(msg, sizeof(msg), "Failed to attach shared memory (errno = %d).", err);
3119        warning(msg);
3120      }
3121      return NULL;
3122   }
3123 
3124   if (UseNUMAInterleaving) {
3125     numa_make_global(addr, bytes);
3126   }
3127 
3128   return addr;
3129 }
3130 
3131 bool os::release_memory_special(char* base, size_t bytes) {
3132   // detaching the SHM segment will also delete it, see reserve_memory_special()
3133   int rslt = shmdt(base);
3134   return rslt == 0;
3135 }
3136 
3137 size_t os::large_page_size() {
3138   return _large_page_size;
3139 }
3140 
3141 // HugeTLBFS allows application to commit large page memory on demand;
3142 // with SysV SHM the entire memory region must be allocated as shared
3143 // memory.
3144 bool os::can_commit_large_page_memory() {
3145   return UseHugeTLBFS;
3146 }
3147