< prev index next >

src/hotspot/os/linux/os_linux.cpp

Print this page




 111   #include <sched.h>
 112   #undef _GNU_SOURCE
 113 #else
 114   #include <sched.h>
 115 #endif
 116 
 117 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 118 // getrusage() is prepared to handle the associated failure.
 119 #ifndef RUSAGE_THREAD
 120   #define RUSAGE_THREAD   (1)               /* only the calling thread */
 121 #endif
 122 
 123 #define MAX_PATH    (2 * K)
 124 
 125 #define MAX_SECS 100000000
 126 
 127 // for timer info max values which include all bits
 128 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 129 
 130 #define LARGEPAGES_BIT (1 << 6)

 131 ////////////////////////////////////////////////////////////////////////////////
 132 // global variables
 133 julong os::Linux::_physical_memory = 0;
 134 
 135 address   os::Linux::_initial_thread_stack_bottom = NULL;
 136 uintptr_t os::Linux::_initial_thread_stack_size   = 0;
 137 
 138 int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
 139 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 140 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
 141 Mutex* os::Linux::_createThread_lock = NULL;
 142 pthread_t os::Linux::_main_thread;
 143 int os::Linux::_page_size = -1;
 144 bool os::Linux::_supports_fast_thread_cpu_time = false;
 145 uint32_t os::Linux::_os_version = 0;
 146 const char * os::Linux::_glibc_version = NULL;
 147 const char * os::Linux::_libpthread_version = NULL;
 148 
 149 static jlong initial_time_count=0;
 150 


3246 
3247   if (warn && !result) {
3248     warning("HugeTLBFS is not supported by the operating system.");
3249   }
3250 
3251   return result;
3252 }
3253 
3254 // Set the coredump_filter bits to include largepages in core dump (bit 6)
3255 //
3256 // From the coredump_filter documentation:
3257 //
3258 // - (bit 0) anonymous private memory
3259 // - (bit 1) anonymous shared memory
3260 // - (bit 2) file-backed private memory
3261 // - (bit 3) file-backed shared memory
3262 // - (bit 4) ELF header pages in file-backed private memory areas (it is
3263 //           effective only if the bit 2 is cleared)
3264 // - (bit 5) hugetlb private memory
3265 // - (bit 6) hugetlb shared memory


3266 //
3267 static void set_coredump_filter(void) {
3268   FILE *f;
3269   long cdm;

3270 
3271   if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3272     return;
3273   }
3274 
3275   if (fscanf(f, "%lx", &cdm) != 1) {
3276     fclose(f);
3277     return;
3278   }
3279 
3280   rewind(f);
3281 
3282   if ((cdm & LARGEPAGES_BIT) == 0) {
3283     cdm |= LARGEPAGES_BIT;







3284     fprintf(f, "%#lx", cdm);
3285   }
3286 
3287   fclose(f);
3288 }
3289 
3290 // Large page support
3291 
3292 static size_t _large_page_size = 0;
3293 
3294 size_t os::Linux::find_large_page_size() {
3295   size_t large_page_size = 0;
3296 
3297   // large_page_size on Linux is used to round up heap size. x86 uses either
3298   // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3299   // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3300   // page as large as 256M.
3301   //
3302   // Here we try to figure out page size by parsing /proc/meminfo and looking
3303   // for a line with the following format:


3402   if (!UseLargePages &&
3403       !UseTransparentHugePages &&
3404       !UseHugeTLBFS &&
3405       !UseSHM) {
3406     // Not using large pages.
3407     return;
3408   }
3409 
3410   if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3411     // The user explicitly turned off large pages.
3412     // Ignore the rest of the large pages flags.
3413     UseTransparentHugePages = false;
3414     UseHugeTLBFS = false;
3415     UseSHM = false;
3416     return;
3417   }
3418 
3419   size_t large_page_size = Linux::setup_large_page_size();
3420   UseLargePages          = Linux::setup_large_page_type(large_page_size);
3421 
3422   set_coredump_filter();
3423 }
3424 
3425 #ifndef SHM_HUGETLB
3426   #define SHM_HUGETLB 04000
3427 #endif
3428 
3429 #define shm_warning_format(format, ...)              \
3430   do {                                               \
3431     if (UseLargePages &&                             \
3432         (!FLAG_IS_DEFAULT(UseLargePages) ||          \
3433          !FLAG_IS_DEFAULT(UseSHM) ||                 \
3434          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {  \
3435       warning(format, __VA_ARGS__);                  \
3436     }                                                \
3437   } while (0)
3438 
3439 #define shm_warning(str) shm_warning_format("%s", str)
3440 
3441 #define shm_warning_with_errno(str)                \
3442   do {                                             \


3773 
3774 size_t os::large_page_size() {
3775   return _large_page_size;
3776 }
3777 
3778 // With SysV SHM the entire memory region must be allocated as shared
3779 // memory.
3780 // HugeTLBFS allows application to commit large page memory on demand.
3781 // However, when committing memory with HugeTLBFS fails, the region
3782 // that was supposed to be committed will lose the old reservation
3783 // and allow other threads to steal that memory region. Because of this
3784 // behavior we can't commit HugeTLBFS memory.
3785 bool os::can_commit_large_page_memory() {
3786   return UseTransparentHugePages;
3787 }
3788 
3789 bool os::can_execute_large_page_memory() {
3790   return UseTransparentHugePages || UseHugeTLBFS;
3791 }
3792 











3793 // Reserve memory at an arbitrary address, only if that area is
3794 // available (and not reserved for something else).
3795 
3796 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3797   const int max_tries = 10;
3798   char* base[max_tries];
3799   size_t size[max_tries];
3800   const size_t gap = 0x000000;
3801 
3802   // Assert only that the size is a multiple of the page size, since
3803   // that's all that mmap requires, and since that's all we really know
3804   // about at this low abstraction level.  If we need higher alignment,
3805   // we can either pass an alignment to this method or verify alignment
3806   // in one of the methods further up the call chain.  See bug 5044738.
3807   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3808 
3809   // Repeatedly allocate blocks until the block is allocated at the
3810   // right spot.
3811 
3812   // Linux mmap allows caller to pass an address as hint; give it a try first,


4900   // call to exit(3C). There can be only 32 of these functions registered
4901   // and atexit() does not set errno.
4902 
4903   if (PerfAllowAtExitRegistration) {
4904     // only register atexit functions if PerfAllowAtExitRegistration is set.
4905     // atexit functions can be delayed until process exit time, which
4906     // can be problematic for embedded VM situations. Embedded VMs should
4907     // call DestroyJavaVM() to assure that VM resources are released.
4908 
4909     // note: perfMemory_exit_helper atexit function may be removed in
4910     // the future if the appropriate cleanup code can be added to the
4911     // VM_Exit VMOperation's doit method.
4912     if (atexit(perfMemory_exit_helper) != 0) {
4913       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4914     }
4915   }
4916 
4917   // initialize thread priority policy
4918   prio_init();
4919 



4920   return JNI_OK;
4921 }
4922 
4923 // Mark the polling page as unreadable
4924 void os::make_polling_page_unreadable(void) {
4925   if (!guard_memory((char*)_polling_page, Linux::page_size())) {
4926     fatal("Could not disable polling page");
4927   }
4928 }
4929 
4930 // Mark the polling page as readable
4931 void os::make_polling_page_readable(void) {
4932   if (!linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
4933     fatal("Could not enable polling page");
4934   }
4935 }
4936 
4937 // older glibc versions don't have this macro (which expands to
4938 // an optimized bit-counting function) so we have to roll our own
4939 #ifndef CPU_COUNT




 111   #include <sched.h>
 112   #undef _GNU_SOURCE
 113 #else
 114   #include <sched.h>
 115 #endif
 116 
 117 // if RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
 118 // getrusage() is prepared to handle the associated failure.
 119 #ifndef RUSAGE_THREAD
 120   #define RUSAGE_THREAD   (1)               /* only the calling thread */
 121 #endif
 122 
 123 #define MAX_PATH    (2 * K)
 124 
 125 #define MAX_SECS 100000000
 126 
 127 // for timer info max values which include all bits
 128 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 129 
 130 #define LARGEPAGES_BIT (1 << 6)
 131 #define DAX_SHARED_BIT (1 << 8)
 132 ////////////////////////////////////////////////////////////////////////////////
 133 // global variables
 134 julong os::Linux::_physical_memory = 0;
 135 
 136 address   os::Linux::_initial_thread_stack_bottom = NULL;
 137 uintptr_t os::Linux::_initial_thread_stack_size   = 0;
 138 
 139 int (*os::Linux::_clock_gettime)(clockid_t, struct timespec *) = NULL;
 140 int (*os::Linux::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
 141 int (*os::Linux::_pthread_setname_np)(pthread_t, const char*) = NULL;
 142 Mutex* os::Linux::_createThread_lock = NULL;
 143 pthread_t os::Linux::_main_thread;
 144 int os::Linux::_page_size = -1;
 145 bool os::Linux::_supports_fast_thread_cpu_time = false;
 146 uint32_t os::Linux::_os_version = 0;
 147 const char * os::Linux::_glibc_version = NULL;
 148 const char * os::Linux::_libpthread_version = NULL;
 149 
 150 static jlong initial_time_count=0;
 151 


3247 
3248   if (warn && !result) {
3249     warning("HugeTLBFS is not supported by the operating system.");
3250   }
3251 
3252   return result;
3253 }
3254 
3255 // Set the coredump_filter bits to include largepages in core dump (bit 6)
3256 //
3257 // From the coredump_filter documentation:
3258 //
3259 // - (bit 0) anonymous private memory
3260 // - (bit 1) anonymous shared memory
3261 // - (bit 2) file-backed private memory
3262 // - (bit 3) file-backed shared memory
3263 // - (bit 4) ELF header pages in file-backed private memory areas (it is
3264 //           effective only if the bit 2 is cleared)
3265 // - (bit 5) hugetlb private memory
3266 // - (bit 6) hugetlb shared memory
3267 // - (bit 7) dax private memory
3268 // - (bit 8) dax shared memory
3269 //
3270 static void set_coredump_filter(bool largepages, bool dax_shared) {
3271   FILE *f;
3272   long cdm;
3273   bool filter_changed = false;
3274 
3275   if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
3276     return;
3277   }
3278 
3279   if (fscanf(f, "%lx", &cdm) != 1) {
3280     fclose(f);
3281     return;
3282   }
3283 
3284   rewind(f);
3285 
3286   if (largepages && (cdm & LARGEPAGES_BIT) == 0) {
3287     cdm |= LARGEPAGES_BIT;
3288     filter_changed = true;
3289   }
3290   if (dax_shared && (cdm & DAX_SHARED_BIT) == 0) {
3291     cdm |= DAX_SHARED_BIT;
3292     filter_changed = true;
3293   }
3294   if (filter_changed) {
3295     fprintf(f, "%#lx", cdm);
3296   }
3297 
3298   fclose(f);
3299 }
3300 
3301 // Large page support
3302 
3303 static size_t _large_page_size = 0;
3304 
3305 size_t os::Linux::find_large_page_size() {
3306   size_t large_page_size = 0;
3307 
3308   // large_page_size on Linux is used to round up heap size. x86 uses either
3309   // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
3310   // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
3311   // page as large as 256M.
3312   //
3313   // Here we try to figure out page size by parsing /proc/meminfo and looking
3314   // for a line with the following format:


3413   if (!UseLargePages &&
3414       !UseTransparentHugePages &&
3415       !UseHugeTLBFS &&
3416       !UseSHM) {
3417     // Not using large pages.
3418     return;
3419   }
3420 
3421   if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
3422     // The user explicitly turned off large pages.
3423     // Ignore the rest of the large pages flags.
3424     UseTransparentHugePages = false;
3425     UseHugeTLBFS = false;
3426     UseSHM = false;
3427     return;
3428   }
3429 
3430   size_t large_page_size = Linux::setup_large_page_size();
3431   UseLargePages          = Linux::setup_large_page_type(large_page_size);
3432 
3433   set_coredump_filter(true /*largepages*/, false /*dax_shared*/);
3434 }
3435 
3436 #ifndef SHM_HUGETLB
3437   #define SHM_HUGETLB 04000
3438 #endif
3439 
3440 #define shm_warning_format(format, ...)              \
3441   do {                                               \
3442     if (UseLargePages &&                             \
3443         (!FLAG_IS_DEFAULT(UseLargePages) ||          \
3444          !FLAG_IS_DEFAULT(UseSHM) ||                 \
3445          !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {  \
3446       warning(format, __VA_ARGS__);                  \
3447     }                                                \
3448   } while (0)
3449 
3450 #define shm_warning(str) shm_warning_format("%s", str)
3451 
3452 #define shm_warning_with_errno(str)                \
3453   do {                                             \


3784 
3785 size_t os::large_page_size() {
3786   return _large_page_size;
3787 }
3788 
3789 // With SysV SHM the entire memory region must be allocated as shared
3790 // memory.
3791 // HugeTLBFS allows application to commit large page memory on demand.
3792 // However, when committing memory with HugeTLBFS fails, the region
3793 // that was supposed to be committed will lose the old reservation
3794 // and allow other threads to steal that memory region. Because of this
3795 // behavior we can't commit HugeTLBFS memory.
3796 bool os::can_commit_large_page_memory() {
3797   return UseTransparentHugePages;
3798 }
3799 
3800 bool os::can_execute_large_page_memory() {
3801   return UseTransparentHugePages || UseHugeTLBFS;
3802 }
3803 
3804 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3805   assert(file_desc >= 0, "file_desc is not valid");
3806   char* result = pd_attempt_reserve_memory_at(bytes, requested_addr);
3807   if (result != NULL) {
3808     if (replace_existing_mapping_with_file_mapping(result, bytes, file_desc) == NULL) {
3809       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3810     }
3811   }
3812   return result;
3813 }
3814 
3815 // Reserve memory at an arbitrary address, only if that area is
3816 // available (and not reserved for something else).
3817 
3818 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3819   const int max_tries = 10;
3820   char* base[max_tries];
3821   size_t size[max_tries];
3822   const size_t gap = 0x000000;
3823 
3824   // Assert only that the size is a multiple of the page size, since
3825   // that's all that mmap requires, and since that's all we really know
3826   // about at this low abstraction level.  If we need higher alignment,
3827   // we can either pass an alignment to this method or verify alignment
3828   // in one of the methods further up the call chain.  See bug 5044738.
3829   assert(bytes % os::vm_page_size() == 0, "reserving unexpected size block");
3830 
3831   // Repeatedly allocate blocks until the block is allocated at the
3832   // right spot.
3833 
3834   // Linux mmap allows caller to pass an address as hint; give it a try first,


4922   // call to exit(3C). There can be only 32 of these functions registered
4923   // and atexit() does not set errno.
4924 
4925   if (PerfAllowAtExitRegistration) {
4926     // only register atexit functions if PerfAllowAtExitRegistration is set.
4927     // atexit functions can be delayed until process exit time, which
4928     // can be problematic for embedded VM situations. Embedded VMs should
4929     // call DestroyJavaVM() to assure that VM resources are released.
4930 
4931     // note: perfMemory_exit_helper atexit function may be removed in
4932     // the future if the appropriate cleanup code can be added to the
4933     // VM_Exit VMOperation's doit method.
4934     if (atexit(perfMemory_exit_helper) != 0) {
4935       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4936     }
4937   }
4938 
4939   // initialize thread priority policy
4940   prio_init();
4941 
4942   if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
4943     set_coredump_filter(false /*largepages*/, true /*dax_shared*/); 
4944   }
4945   return JNI_OK;
4946 }
4947 
4948 // Mark the polling page as unreadable
4949 void os::make_polling_page_unreadable(void) {
4950   if (!guard_memory((char*)_polling_page, Linux::page_size())) {
4951     fatal("Could not disable polling page");
4952   }
4953 }
4954 
4955 // Mark the polling page as readable
4956 void os::make_polling_page_readable(void) {
4957   if (!linux_mprotect((char *)_polling_page, Linux::page_size(), PROT_READ)) {
4958     fatal("Could not enable polling page");
4959   }
4960 }
4961 
4962 // older glibc versions don't have this macro (which expands to
4963 // an optimized bit-counting function) so we have to roll our own
4964 #ifndef CPU_COUNT


< prev index next >