< prev index next >
src/hotspot/os/linux/os_linux.cpp
Print this page
rev 52796 : imported patch 8213827-numa-interleave-not-respected
rev 52797 : [mq]: 8213827-tschatzl-review
*** 2722,2731 ****
--- 2722,2734 ----
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
}
void os::numa_make_global(char *addr, size_t bytes) {
+ if (!UseNUMAInterleaving) {
+ return;
+ }
Linux::numa_interleave_memory(addr, bytes);
}
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
// bind policy to MPOL_PREFERRED for the current thread.
*** 2778,2796 ****
--- 2781,2808 ----
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
int highest_node_number = Linux::numa_max_node();
size_t i = 0;
+ // If externally invoked in interleave mode then get node bitmasks from interleave mode pointer.
+ if (Linux::_numa_interleave_ptr != NULL ) {
+ for (int node = 0; node <= highest_node_number; node++) {
+ if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_ptr, node)) {
+ ids[i++] = node;
+ }
+ }
+ } else {
// Map all node ids in which it is possible to allocate memory. Also nodes are
// not always consecutively available, i.e. available from 0 to the highest
// node number. If the nodes have been bound explicitly using numactl membind,
// then allocate memory from those nodes only.
for (int node = 0; node <= highest_node_number; node++) {
if (Linux::isnode_in_bound_nodes((unsigned int)node)) {
ids[i++] = node;
}
}
+ }
return i;
}
bool os::get_page_info(char *start, page_info* info) {
return false;
*** 2886,2900 ****
libnuma_dlsym(handle, "numa_bitmask_isbitset")));
set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
libnuma_dlsym(handle, "numa_distance")));
set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
libnuma_v2_dlsym(handle, "numa_get_membind")));
if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
! set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
! set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
// Create an index -> node mapping, since nodes are not always consecutive
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
rebuild_nindex_to_node_map();
// Create a cpu -> node mapping
_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
--- 2898,2918 ----
libnuma_dlsym(handle, "numa_bitmask_isbitset")));
set_numa_distance(CAST_TO_FN_PTR(numa_distance_func_t,
libnuma_dlsym(handle, "numa_distance")));
set_numa_get_membind(CAST_TO_FN_PTR(numa_get_membind_func_t,
libnuma_v2_dlsym(handle, "numa_get_membind")));
+ set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
+ libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
! set_numa_all_nodes_ptr((struct bitmask*)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
! set_numa_nodes_ptr((struct bitmask*)libnuma_dlsym(handle, "numa_nodes_ptr"));
!
! set_numa_interleave_ptr(_numa_get_interleave_mask());
! set_numa_membind_ptr(_numa_get_membind());
!
// Create an index -> node mapping, since nodes are not always consecutive
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
rebuild_nindex_to_node_map();
// Create a cpu -> node mapping
_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
*** 3017,3029 ****
--- 3035,3050 ----
os::Linux::numa_interleave_memory_v2_func_t os::Linux::_numa_interleave_memory_v2;
os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
os::Linux::numa_bitmask_isbitset_func_t os::Linux::_numa_bitmask_isbitset;
os::Linux::numa_distance_func_t os::Linux::_numa_distance;
os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
+ os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
unsigned long* os::Linux::_numa_all_nodes;
struct bitmask* os::Linux::_numa_all_nodes_ptr;
struct bitmask* os::Linux::_numa_nodes_ptr;
+ struct bitmask* os::Linux::_numa_interleave_ptr;
+ struct bitmask* os::Linux::_numa_membind_ptr;
bool os::pd_uncommit_memory(char* addr, size_t size) {
uintptr_t res = (uintptr_t) ::mmap(addr, size, PROT_NONE,
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE|MAP_ANONYMOUS, -1, 0);
return res != (uintptr_t) MAP_FAILED;
*** 5003,5019 ****
if (UseNUMA) {
if (!Linux::libnuma_init()) {
UseNUMA = false;
} else {
! if ((Linux::numa_max_node() < 1) || Linux::isbound_to_single_node()) {
! // If there's only one node (they start from 0) or if the process
! // is bound explicitly to a single node using membind, disable NUMA.
! UseNUMA = false;
}
}
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
// we can make the adaptive lgrp chunk resizing work. If the user specified both
// UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
// and disable adaptive resizing.
--- 5024,5083 ----
if (UseNUMA) {
if (!Linux::libnuma_init()) {
UseNUMA = false;
} else {
!
! // Identify whether running in membind or interleave mode.
! bool is_membind = false;
! bool is_interleaved = false;
!
! log_info(os)("UseNUMA is enabled");
!
! // Check for membind mode.
! for (int node = 0; node <= Linux::numa_max_node(); node++) {
! if (Linux::_numa_bitmask_isbitset(Linux::_numa_membind_ptr, node)) {
! is_membind = true;
! break;
}
}
+ // Check for interleave mode.
+ for (int node = 0; node <= Linux::numa_max_node(); node++) {
+ if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_ptr, node)) {
+ is_interleaved = true;
+ // Set membind to false as interleave mode allows all nodes to be used.
+ is_membind = false;
+ break;
+ }
+ }
+
+ struct bitmask* bmp;
+
+ if (is_interleaved) {
+ bmp = Linux::_numa_interleave_ptr;
+ Linux::set_numa_membind_ptr(NULL);
+ log_info(os)("Java is configured to run in interleave mode");
+ } else if (is_membind) {
+ bmp = Linux::_numa_membind_ptr;
+ Linux::set_numa_interleave_ptr(NULL);
+ log_info(os)("Java is configured to run in membind mode");
+ }
+
+ char buf[BUFSIZ] = {'\0'};
+ char* bufptr = buf;
+
+ for (int node = 0; node <= Linux::numa_max_node(); node++) {
+ if (Linux::_numa_bitmask_isbitset(bmp, node)) {
+ bufptr += sprintf(bufptr, "%d ", node);
+ }
+ }
+ bufptr[-1] = '\0';
+ log_info(os)("Heap will be configured using NUMA memory nodes: %s", buf);
+ }
+
+
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
// we can make the adaptive lgrp chunk resizing work. If the user specified both
// UseNUMA and UseLargePages (or UseSHM/UseHugeTLBFS) on the command line - warn
// and disable adaptive resizing.
< prev index next >