5110 // Java can be invoked as 5111 // 1. Without numactl and heap will be allocated/configured on all nodes as 5112 // per the system policy. 5113 // 2. With numactl --interleave: 5114 // Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same 5115 // API for membind case bitmask is reset. 5116 // Interleave is only hint and Kernel can fallback to other nodes if 5117 // no memory is available on the target nodes. 5118 // 3. With numactl --membind: 5119 // Use numa_get_membind(v2) API to get nodes bitmask. The same API for 5120 // interleave case returns bitmask of all nodes. 5121 // numa_all_nodes_ptr holds bitmask of all nodes. 5122 // numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct 5123 // bitmask when externally configured to run on all or fewer nodes. 5124 5125 if (!Linux::libnuma_init()) { 5126 UseNUMA = false; 5127 } else { 5128 if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) { 5129 // If there's only one node (they start from 0) or if the process 5130 // is bound explicitly to a single node using membind, disable NUMA. 5131 UseNUMA = false; 5132 } else { 5133 5134 LogTarget(Info,os) log; 5135 LogStream ls(log); 5136 5137 Linux::set_configured_numa_policy(Linux::identify_numa_policy()); 5138 5139 struct bitmask* bmp = Linux::_numa_membind_bitmask; 5140 const char* numa_mode = "membind"; 5141 5142 if (Linux::is_running_in_interleave_mode()) { 5143 bmp = Linux::_numa_interleave_bitmask; 5144 numa_mode = "interleave"; 5145 } 5146 5147 ls.print("UseNUMA is enabled and invoked in '%s' mode." 5148 " Heap will be configured using NUMA memory nodes:", numa_mode); 5149 5150 for (int node = 0; node <= Linux::numa_max_node(); node++) { 5151 if (Linux::_numa_bitmask_isbitset(bmp, node)) { 5152 ls.print(" %d", node); 5153 } 5154 } 5155 } 5156 } 5157 5158 if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) { 5159 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way 5160 // we can make the adaptive lgrp chunk resizing work. If the user specified both 5161 // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn 5162 // and disable adaptive resizing. 5163 if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) { 5164 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, " 5165 "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)"); 5166 UseAdaptiveSizePolicy = false; 5167 UseAdaptiveNUMAChunkSizing = false; 5168 } 5169 } 5170 5171 if (!UseNUMA && ForceNUMA) { 5172 UseNUMA = true; 5173 } 5174 } 5175 5176 // this is called _after_ the global arguments have been parsed 5177 jint os::init_2(void) { 5178 5179 // This could be set after os::Posix::init() but all platforms 5180 // have to set it the same so we have to mirror Solaris. 5181 DEBUG_ONLY(os::set_mutex_init_done();) 5182 5183 os::Posix::init_2(); 5184 5185 Linux::fast_thread_clock_init(); 5186 5187 // initialize suspend/resume support - must do this before signal_sets_init() 5188 if (SR_initialize() != 0) { 5189 perror("SR_initialize failed"); 5190 return JNI_ERR; 5191 } 5192 5193 Linux::signal_sets_init(); | 5110 // Java can be invoked as 5111 // 1. Without numactl and heap will be allocated/configured on all nodes as 5112 // per the system policy. 5113 // 2. With numactl --interleave: 5114 // Use numa_get_interleave_mask(v2) API to get nodes bitmask. The same 5115 // API for membind case bitmask is reset. 5116 // Interleave is only hint and Kernel can fallback to other nodes if 5117 // no memory is available on the target nodes. 5118 // 3. With numactl --membind: 5119 // Use numa_get_membind(v2) API to get nodes bitmask. The same API for 5120 // interleave case returns bitmask of all nodes. 5121 // numa_all_nodes_ptr holds bitmask of all nodes. 5122 // numa_get_interleave_mask(v2) and numa_get_membind(v2) APIs returns correct 5123 // bitmask when externally configured to run on all or fewer nodes. 5124 5125 if (!Linux::libnuma_init()) { 5126 UseNUMA = false; 5127 } else { 5128 if ((Linux::numa_max_node() < 1) || Linux::is_bound_to_single_node()) { 5129 // If there's only one node (they start from 0) or if the process 5130 // is bound explicitly to a single node using membind, disable NUMA unless 5131 // user explicilty forces NUMA optimizations on single-node/UMA systems 5132 UseNUMA = ForceNUMA; 5133 } else { 5134 5135 LogTarget(Info,os) log; 5136 LogStream ls(log); 5137 5138 Linux::set_configured_numa_policy(Linux::identify_numa_policy()); 5139 5140 struct bitmask* bmp = Linux::_numa_membind_bitmask; 5141 const char* numa_mode = "membind"; 5142 5143 if (Linux::is_running_in_interleave_mode()) { 5144 bmp = Linux::_numa_interleave_bitmask; 5145 numa_mode = "interleave"; 5146 } 5147 5148 ls.print("UseNUMA is enabled and invoked in '%s' mode." 5149 " Heap will be configured using NUMA memory nodes:", numa_mode); 5150 5151 for (int node = 0; node <= Linux::numa_max_node(); node++) { 5152 if (Linux::_numa_bitmask_isbitset(bmp, node)) { 5153 ls.print(" %d", node); 5154 } 5155 } 5156 } 5157 } 5158 5159 if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) { 5160 // With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way 5161 // we can make the adaptive lgrp chunk resizing work. If the user specified both 5162 // UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn 5163 // and disable adaptive resizing. 5164 if (UseAdaptiveSizePolicy || UseAdaptiveNUMAChunkSizing) { 5165 warning("UseNUMA is not fully compatible with SHM/HugeTLBFS large pages, " 5166 "disabling adaptive resizing (-XX:-UseAdaptiveSizePolicy -XX:-UseAdaptiveNUMAChunkSizing)"); 5167 UseAdaptiveSizePolicy = false; 5168 UseAdaptiveNUMAChunkSizing = false; 5169 } 5170 } 5171 } 5172 5173 // this is called _after_ the global arguments have been parsed 5174 jint os::init_2(void) { 5175 5176 // This could be set after os::Posix::init() but all platforms 5177 // have to set it the same so we have to mirror Solaris. 5178 DEBUG_ONLY(os::set_mutex_init_done();) 5179 5180 os::Posix::init_2(); 5181 5182 Linux::fast_thread_clock_init(); 5183 5184 // initialize suspend/resume support - must do this before signal_sets_init() 5185 if (SR_initialize() != 0) { 5186 perror("SR_initialize failed"); 5187 return JNI_ERR; 5188 } 5189 5190 Linux::signal_sets_init(); |