< prev index next >

src/hotspot/os/linux/os_linux.cpp

Print this page




 112   #include <sched.h>
 113   #undef _GNU_SOURCE
 114 #else
 115   #include <sched.h>
 116 #endif
 117 
 118 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 119 // getrusage() is prepared to handle the associated failure.
 120 #ifndef RUSAGE_THREAD
 121   #define RUSAGE_THREAD   (1)               /* only the calling thread */
 122 #endif
 123 
 124 #define MAX_PATH    (2 * K)
 125 
 126 #define MAX_SECS 100000000
 127 
 128 // for timer info max values which include all bits
 129 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 130 
 131 #define LARGEPAGES_BIT (1 << 6)

 132 ////////////////////////////////////////////////////////////////////////////////
 133 // global variables
 134 julong os::Linux::_physical_memory = 0;
 135 
 136 address   os::Linux::_initial_thread_stack_bottom = NULL;
 137 uintptr_t os::Linux::_initial_thread_stack_size   = 0;
 138 
 139 int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
 140 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 141 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
 142 Mutex* os::Linux::_createThread_lock = NULL;
 143 pthread_t os::Linux::_main_thread;
 144 int os::Linux::_page_size = -1;
 145 bool os::Linux::_supports_fast_thread_cpu_time = false;
 146 uint32_t os::Linux::_os_version = 0;
 147 const char * os::Linux::_glibc_version = NULL;
 148 const char * os::Linux::_libpthread_version = NULL;
 149 
 150 static jlong initial_time_count=0;
 151 


3247 
3248   if (warn && !result) {
3249     warning("HugeTLBFS is not supported by the operating system.");
3250   }
3251 
3252   return result;
3253 }
3254 
3255 // Set the coredump_filter bits to include largepages in core dump (bit 6)
3256 //
3257 // From the coredump_filter documentation:
3258 //
3259 // - (bit 0) anonymous private memory
3260 // - (bit 1) anonymous shared memory
3261 // - (bit 2) file-backed private memory
3262 // - (bit 3) file-backed shared memory
3263 // - (bit 4) ELF header pages in file-backed private memory areas (it is
3264 //           effective only if the bit 2 is cleared)
3265 // - (bit 5) hugetlb private memory
3266 // - (bit 6) hugetlb shared memory


3267 //
3268 static void set_coredump_filter(void) {
3269   FILE *f;
3270   long cdm;

3271 
3272   if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3273     return;
3274   }
3275 
3276   if (fscanf(f, "%lx", &cdm) != 1) {
3277     fclose(f);
3278     return;
3279   }
3280 
3281   rewind(f);
3282 
3283   if ((cdm & LARGEPAGES_BIT) == 0) {
3284     cdm |= LARGEPAGES_BIT;







3285     fprintf(f, "%#lx", cdm);
3286   }
3287 
3288   fclose(f);
3289 }
3290 
3291 // Large page support
3292 
3293 static size_t _large_page_size = 0;
3294 
3295 size_t os::Linux::find_large_page_size() {
3296   size_t large_page_size = 0;
3297 
3298   // large_page_size on Linux is used to round up heap size. x86 uses either
3299   // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3300   // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3301   // page as large as 256M.
3302   //
3303   // Here we try to figure out page size by parsing /proc/meminfo and looking
3304   // for a line with the following format:


3403   if (!UseLargePages &&
3404       !UseTransparentHugePages &&
3405       !UseHugeTLBFS &&
3406       !UseSHM) {
3407     // Not using large pages.
3408     return;
3409   }
3410 
3411   if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3412     // The user explicitly turned off large pages.
3413     // Ignore the rest of the large pages flags.
3414     UseTransparentHugePages = false;
3415     UseHugeTLBFS = false;
3416     UseSHM = false;
3417     return;
3418   }
3419 
3420   size_t large_page_size = Linux::setup_large_page_size();
3421   UseLargePages          = Linux::setup_large_page_type(large_page_size);
3422 
3423   set_coredump_filter();
3424 }
3425 
3426 #ifndef SHM_HUGETLB
3427   #define SHM_HUGETLB 04000
3428 #endif
3429 
3430 #define shm_warning_format(format, ...)              \
3431   do {                                               \
3432     if (UseLargePages &&                             \
3433         (!FLAG_IS_DEFAULT(UseLargePages) ||          \
3434          !FLAG_IS_DEFAULT(UseSHM) ||                 \
3435          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {  \
3436       warning(format, __VA_ARGS__);                  \
3437     }                                                \
3438   } while (0)
3439 
3440 #define shm_warning(str) shm_warning_format("%s", str)
3441 
3442 #define shm_warning_with_errno(str)                \
3443   do {                                             \


3778 
3779 // With SysV SHM the entire memory region must be allocated as shared
3780 // memory.
3781 // HugeTLBFS allows application to commit large page memory on demand.
3782 // However, when committing memory with HugeTLBFS fails, the region
3783 // that was supposed to be committed will lose the old reservation
3784 // and allow other threads to steal that memory region. Because of this
3785 // behavior we can't commit HugeTLBFS memory.
3786 bool os::can_commit_large_page_memory() {
3787   return UseTransparentHugePages;
3788 }
3789 
3790 bool os::can_execute_large_page_memory() {
3791   return UseTransparentHugePages || UseHugeTLBFS;
3792 }
3793 
3794 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3795   assert(file_desc >= 0, "file_desc is not valid");
3796   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
3797   if (result != NULL) {
3798     if (replace_existing_mapping_with_dax_file_mapping(result, bytes, file_desc) == NULL) {
3799       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3800     }
3801   }
3802   return result;
3803 }
3804 
3805 // Reserve memory at an arbitrary address, only if that area is
3806 // available (and not reserved for something else).
3807 
3808 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3809   const int max_tries = 10;
3810   char* base[max_tries];
3811   size_t size[max_tries];
3812   const size_t gap = 0x000000;
3813 
3814   // Assert only that the size is a multiple of the page size, since
3815   // that's all that mmap requires, and since that's all we really know
3816   // about at this low abstraction level.  If we need higher alignment,
3817   // we can either pass an alignment to this method or verify alignment
3818   // in one of the methods further up the call chain.  See bug 5044738.


4912   // call to exit(3C). There can be only 32 of these functions registered
4913   // and atexit() does not set errno.
4914 
4915   if (PerfAllowAtExitRegistration) {
4916     // only register atexit functions if PerfAllowAtExitRegistration is set.
4917     // atexit functions can be delayed until process exit time, which
4918     // can be problematic for embedded VM situations. Embedded VMs should
4919     // call DestroyJavaVM() to assure that VM resources are released.
4920 
4921     // note: perfMemory_exit_helper atexit function may be removed in
4922     // the future if the appropriate cleanup code can be added to the
4923     // VM_Exit VMOperation's doit method.
4924     if (atexit(perfMemory_exit_helper) != 0) {
4925       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4926     }
4927   }
4928 
4929   // initialize thread priority policy
4930   prio_init();
4931 



4932   return JNI_OK;
4933 }
4934 
4935 // Mark the polling page as unreadable
4936 void os::make_polling_page_unreadable(void) {
4937   if (!guard_memory((char*)_polling_page, Linux::page_size())) {
4938     fatal("Could not disable polling page");
4939   }
4940 }
4941 
4942 // Mark the polling page as readable
4943 void os::make_polling_page_readable(void) {
4944   if (!linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
4945     fatal("Could not enable polling page");
4946   }
4947 }
4948 
4949 // older glibc versions don't have this macro (which expands to
4950 // an optimized bit-counting function) so we have to roll our own
4951 #ifndef CPU_COUNT




 112   #include <sched.h>
 113   #undef _GNU_SOURCE
 114 #else
 115   #include <sched.h>
 116 #endif
 117 
 118 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 119 // getrusage() is prepared to handle the associated failure.
 120 #ifndef RUSAGE_THREAD
 121   #define RUSAGE_THREAD   (1)               /* only the calling thread */
 122 #endif
 123 
 124 #define MAX_PATH    (2 * K)
 125 
 126 #define MAX_SECS 100000000
 127 
 128 // for timer info max values which include all bits
 129 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 130 
 131 #define LARGEPAGES_BIT (1 << 6)
 132 #define DAX_SHARED_BIT (1 << 8)
 133 ////////////////////////////////////////////////////////////////////////////////
 134 // global variables
 135 julong os::Linux::_physical_memory = 0;
 136 
 137 address   os::Linux::_initial_thread_stack_bottom = NULL;
 138 uintptr_t os::Linux::_initial_thread_stack_size   = 0;
 139 
 140 int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
 141 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 142 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
 143 Mutex* os::Linux::_createThread_lock = NULL;
 144 pthread_t os::Linux::_main_thread;
 145 int os::Linux::_page_size = -1;
 146 bool os::Linux::_supports_fast_thread_cpu_time = false;
 147 uint32_t os::Linux::_os_version = 0;
 148 const char * os::Linux::_glibc_version = NULL;
 149 const char * os::Linux::_libpthread_version = NULL;
 150 
 151 static jlong initial_time_count=0;
 152 


3248 
3249   if (warn && !result) {
3250     warning("HugeTLBFS is not supported by the operating system.");
3251   }
3252 
3253   return result;
3254 }
3255 
3256 // Set the coredump_filter bits to include largepages in core dump (bit 6)
3257 //
3258 // From the coredump_filter documentation:
3259 //
3260 // - (bit 0) anonymous private memory
3261 // - (bit 1) anonymous shared memory
3262 // - (bit 2) file-backed private memory
3263 // - (bit 3) file-backed shared memory
3264 // - (bit 4) ELF header pages in file-backed private memory areas (it is
3265 //           effective only if the bit 2 is cleared)
3266 // - (bit 5) hugetlb private memory
3267 // - (bit 6) hugetlb shared memory
3268 // - (bit 7) dax private memory
3269 // - (bit 8) dax shared memory
3270 //
3271 static void set_coredump_filter(bool largepages, bool dax_shared) {
3272   FILE *f;
3273   long cdm;
3274   bool filter_changed = false;
3275 
3276   if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3277     return;
3278   }
3279 
3280   if (fscanf(f, "%lx", &cdm) != 1) {
3281     fclose(f);
3282     return;
3283   }
3284 
3285   rewind(f);
3286 
3287   if (largepages && (cdm & LARGEPAGES_BIT) == 0) {
3288     cdm |= LARGEPAGES_BIT;
3289     filter_changed = true;
3290   }
3291   if (dax_shared && (cdm & DAX_SHARED_BIT) == 0) {
3292     cdm |= DAX_SHARED_BIT;
3293     filter_changed = true;
3294   }
3295   if (filter_changed) {
3296     fprintf(f, "%#lx", cdm);
3297   }
3298 
3299   fclose(f);
3300 }
3301 
3302 // Large page support
3303 
3304 static size_t _large_page_size = 0;
3305 
3306 size_t os::Linux::find_large_page_size() {
3307   size_t large_page_size = 0;
3308 
3309   // large_page_size on Linux is used to round up heap size. x86 uses either
3310   // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3311   // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3312   // page as large as 256M.
3313   //
3314   // Here we try to figure out page size by parsing /proc/meminfo and looking
3315   // for a line with the following format:


3414   if (!UseLargePages &&
3415       !UseTransparentHugePages &&
3416       !UseHugeTLBFS &&
3417       !UseSHM) {
3418     // Not using large pages.
3419     return;
3420   }
3421 
3422   if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3423     // The user explicitly turned off large pages.
3424     // Ignore the rest of the large pages flags.
3425     UseTransparentHugePages = false;
3426     UseHugeTLBFS = false;
3427     UseSHM = false;
3428     return;
3429   }
3430 
3431   size_t large_page_size = Linux::setup_large_page_size();
3432   UseLargePages          = Linux::setup_large_page_type(large_page_size);
3433 
3434   set_coredump_filter(true /*largepages*/, false /*dax_shared*/);
3435 }
3436 
3437 #ifndef SHM_HUGETLB
3438   #define SHM_HUGETLB 04000
3439 #endif
3440 
3441 #define shm_warning_format(format, ...)              \
3442   do {                                               \
3443     if (UseLargePages &&                             \
3444         (!FLAG_IS_DEFAULT(UseLargePages) ||          \
3445          !FLAG_IS_DEFAULT(UseSHM) ||                 \
3446          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {  \
3447       warning(format, __VA_ARGS__);                  \
3448     }                                                \
3449   } while (0)
3450 
3451 #define shm_warning(str) shm_warning_format("%s", str)
3452 
3453 #define shm_warning_with_errno(str)                \
3454   do {                                             \


3789 
3790 // With SysV SHM the entire memory region must be allocated as shared
3791 // memory.
3792 // HugeTLBFS allows application to commit large page memory on demand.
3793 // However, when committing memory with HugeTLBFS fails, the region
3794 // that was supposed to be committed will lose the old reservation
3795 // and allow other threads to steal that memory region. Because of this
3796 // behavior we can't commit HugeTLBFS memory.
3797 bool os::can_commit_large_page_memory() {
3798   return UseTransparentHugePages;
3799 }
3800 
3801 bool os::can_execute_large_page_memory() {
3802   return UseTransparentHugePages || UseHugeTLBFS;
3803 }
3804 
3805 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3806   assert(file_desc >= 0, "file_desc is not valid");
3807   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
3808   if (result != NULL) {
3809     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
3810       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3811     }
3812   }
3813   return result;
3814 }
3815 
3816 // Reserve memory at an arbitrary address, only if that area is
3817 // available (and not reserved for something else).
3818 
3819 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3820   const int max_tries = 10;
3821   char* base[max_tries];
3822   size_t size[max_tries];
3823   const size_t gap = 0x000000;
3824 
3825   // Assert only that the size is a multiple of the page size, since
3826   // that's all that mmap requires, and since that's all we really know
3827   // about at this low abstraction level.  If we need higher alignment,
3828   // we can either pass an alignment to this method or verify alignment
3829   // in one of the methods further up the call chain.  See bug 5044738.


4923   // call to exit(3C). There can be only 32 of these functions registered
4924   // and atexit() does not set errno.
4925 
4926   if (PerfAllowAtExitRegistration) {
4927     // only register atexit functions if PerfAllowAtExitRegistration is set.
4928     // atexit functions can be delayed until process exit time, which
4929     // can be problematic for embedded VM situations. Embedded VMs should
4930     // call DestroyJavaVM() to assure that VM resources are released.
4931 
4932     // note: perfMemory_exit_helper atexit function may be removed in
4933     // the future if the appropriate cleanup code can be added to the
4934     // VM_Exit VMOperation's doit method.
4935     if (atexit(perfMemory_exit_helper) != 0) {
4936       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4937     }
4938   }
4939 
4940   // initialize thread priority policy
4941   prio_init();
4942 
4943   if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
4944     set_coredump_filter(false /*largepages*/, true /*dax_shared*/); 
4945   }
4946   return JNI_OK;
4947 }
4948 
4949 // Mark the polling page as unreadable
4950 void os::make_polling_page_unreadable(void) {
4951   if (!guard_memory((char*)_polling_page, Linux::page_size())) {
4952     fatal("Could not disable polling page");
4953   }
4954 }
4955 
4956 // Mark the polling page as readable
4957 void os::make_polling_page_readable(void) {
4958   if (!linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
4959     fatal("Could not enable polling page");
4960   }
4961 }
4962 
4963 // older glibc versions don't have this macro (which expands to
4964 // an optimized bit-counting function) so we have to roll our own
4965 #ifndef CPU_COUNT


< prev index next >