4930 Linux::capture_initial_stack(JavaThread::stack_size_at_create()); 4931 4932 #if defined(IA32) 4933 workaround_expand_exec_shield_cs_limit(); 4934 #endif 4935 4936 Linux::libpthread_init(); 4937 Linux::sched_getcpu_init(); 4938 log_info(os)("HotSpot is running with %s, %s", 4939 Linux::glibc_version(), Linux::libpthread_version()); 4940 4941 if (UseNUMA) { 4942 if (!Linux::libnuma_init()) { 4943 UseNUMA = false; 4944 } else { 4945 if ((Linux::numa_max_node() < 1)) { 4946 // There's only one node(they start from 0), disable NUMA. 4947 UseNUMA = false; 4948 } 4949 } 4950 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way 4951 // we can make the adaptive lgrp chunk resizing work. If the user specified 4952 // both UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn and 4953 // disable adaptive resizing. 4954 if (UseNUMA && UseLargePages && !can_commit_large_page_memory()) { 4955 if (FLAG_IS_DEFAULT(UseNUMA)) { 4956 UseNUMA = false; 4957 } else { 4958 if (FLAG_IS_DEFAULT(UseLargePages) && 4959 FLAG_IS_DEFAULT(UseSHM) && 4960 FLAG_IS_DEFAULT(UseHugeTLBFS)) { 4961 UseLargePages = false; 4962 } else if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) { 4963 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)"); 4964 UseAdaptiveSizePolicy = false; 4965 UseAdaptiveNUMAChunkSizing = false; 4966 } 4967 } 4968 } 4969 if (!UseNUMA && ForceNUMA) { 4970 UseNUMA = true; 4971 } 4972 } 4973 4974 if (MaxFDLimit) { 4975 // set the number of file descriptors to max. print out error 4976 // if getrlimit/setrlimit fails but continue regardless. 4977 struct rlimit nbr_files; 4978 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 4979 if (status != 0) { 4980 log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno)); 4981 } else { 4982 nbr_files.rlim_cur = nbr_files.rlim_max; 4983 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 4984 if (status != 0) { 4985 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno)); 4986 } 4987 } 4988 } | 4930 Linux::capture_initial_stack(JavaThread::stack_size_at_create()); 4931 4932 #if defined(IA32) 4933 workaround_expand_exec_shield_cs_limit(); 4934 #endif 4935 4936 Linux::libpthread_init(); 4937 Linux::sched_getcpu_init(); 4938 log_info(os)("HotSpot is running with %s, %s", 4939 Linux::glibc_version(), Linux::libpthread_version()); 4940 4941 if (UseNUMA) { 4942 if (!Linux::libnuma_init()) { 4943 UseNUMA = false; 4944 } else { 4945 if ((Linux::numa_max_node() < 1)) { 4946 // There's only one node(they start from 0), disable NUMA. 4947 UseNUMA = false; 4948 } 4949 } 4950 4951 if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) { 4952 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way 4953 // we can make the adaptive lgrp chunk resizing work. If the user specified both 4954 // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn 4955 // and disable adaptive resizing. 4956 if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) { 4957 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, " 4958 "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)"); 4959 UseAdaptiveSizePolicy = false; 4960 UseAdaptiveNUMAChunkSizing = false; 4961 } 4962 } 4963 4964 if (!UseNUMA && ForceNUMA) { 4965 UseNUMA = true; 4966 } 4967 } 4968 4969 if (MaxFDLimit) { 4970 // set the number of file descriptors to max. print out error 4971 // if getrlimit/setrlimit fails but continue regardless. 4972 struct rlimit nbr_files; 4973 int status = getrlimit(RLIMIT_NOFILE, &nbr_files); 4974 if (status != 0) { 4975 log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno)); 4976 } else { 4977 nbr_files.rlim_cur = nbr_files.rlim_max; 4978 status = setrlimit(RLIMIT_NOFILE, &nbr_files); 4979 if (status != 0) { 4980 log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno)); 4981 } 4982 } 4983 } |