< prev index next >
src/hotspot/os/linux/os_linux.cpp
Print this page
rev 52796 : imported patch 8213827-numa-interleave-not-respected
rev 52797 : [mq]: 8213827-tschatzl-review
*** 2722,2733 ****
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
}
void os::numa_make_global(char *addr, size_t bytes) {
! if (!UseNUMAInterleaving)
! return ;
Linux::numa_interleave_memory(addr, bytes);
}
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
// bind policy to MPOL_PREFERRED for the current thread.
--- 2722,2734 ----
commit_memory(addr, bytes, alignment_hint, !ExecMem);
}
}
void os::numa_make_global(char *addr, size_t bytes) {
! if (!UseNUMAInterleaving) {
! return;
! }
Linux::numa_interleave_memory(addr, bytes);
}
// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
// bind policy to MPOL_PREFERRED for the current thread.
*** 2780,2807 ****
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
int highest_node_number = Linux::numa_max_node();
size_t i = 0;
// Map all node ids in which it is possible to allocate memory. Also nodes are
// not always consecutively available, i.e. available from 0 to the highest
// node number. If the nodes have been bound explicitly using numactl membind,
// then allocate memory from those nodes only.
for (int node = 0; node <= highest_node_number; node++) {
if (Linux::isnode_in_bound_nodes((unsigned int)node)) {
ids[i++] = node;
}
}
-
- // If externally invoked in interleave mode then get node bitmasks from interleave mode pointer.
- if (Linux::_numa_interleave_ptr != NULL ) {
- i = 0;
- for (int node = 0; node <= highest_node_number; node++) {
- if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_ptr, node)) {
- ids[i++] = node;
- }
- }
}
return i;
}
bool os::get_page_info(char *start, page_info* info) {
--- 2781,2807 ----
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
int highest_node_number = Linux::numa_max_node();
size_t i = 0;
+ // If externally invoked in interleave mode then get node bitmasks from interleave mode pointer.
+ if (Linux::_numa_interleave_ptr != NULL ) {
+ for (int node = 0; node <= highest_node_number; node++) {
+ if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_ptr, node)) {
+ ids[i++] = node;
+ }
+ }
+ } else {
// Map all node ids in which it is possible to allocate memory. Also nodes are
// not always consecutively available, i.e. available from 0 to the highest
// node number. If the nodes have been bound explicitly using numactl membind,
// then allocate memory from those nodes only.
for (int node = 0; node <= highest_node_number; node++) {
if (Linux::isnode_in_bound_nodes((unsigned int)node)) {
ids[i++] = node;
}
}
}
return i;
}
bool os::get_page_info(char *start, page_info* info) {
*** 2902,2919 ****
libnuma_v2_dlsym(handle, "numa_get_membind")));
set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
if (numa_available() != -1) {
- struct bitmask *bmp;
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
! set_numa_all_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
! set_numa_nodes_ptr((struct bitmask **)libnuma_dlsym(handle, "numa_nodes_ptr"));
! bmp = _numa_get_interleave_mask();
! set_numa_interleave_ptr(&bmp);
! bmp = _numa_get_membind();
! set_numa_membind_ptr(&bmp);
// Create an index -> node mapping, since nodes are not always consecutive
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
rebuild_nindex_to_node_map();
// Create a cpu -> node mapping
_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
--- 2902,2918 ----
libnuma_v2_dlsym(handle, "numa_get_membind")));
set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
if (numa_available() != -1) {
set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
! set_numa_all_nodes_ptr((struct bitmask*)libnuma_dlsym(handle, "numa_all_nodes_ptr"));
! set_numa_nodes_ptr((struct bitmask*)libnuma_dlsym(handle, "numa_nodes_ptr"));
!
! set_numa_interleave_ptr(_numa_get_interleave_mask());
! set_numa_membind_ptr(_numa_get_membind());
!
// Create an index -> node mapping, since nodes are not always consecutive
_nindex_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
rebuild_nindex_to_node_map();
// Create a cpu -> node mapping
_cpu_to_node = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<int>(0, true);
*** 5027,5080 ****
if (!Linux::libnuma_init()) {
UseNUMA = false;
} else {
// Identify whether running in membind or interleave mode.
! struct bitmask *bmp;
! bool _is_membind = false;
! bool _is_interleaved = false;
! char _buf[BUFSIZ] = {'\0'};
! char *_bufptr = _buf;
log_info(os)("UseNUMA is enabled");
// Check for membind mode.
! bmp = Linux::_numa_membind_ptr;
! for (int node = 0; node <= Linux::numa_max_node() ; node++) {
! if (Linux::_numa_bitmask_isbitset(bmp, node)) {
! _is_membind = true;
}
}
// Check for interleave mode.
! bmp = Linux::_numa_interleave_ptr;
! for (int node = 0; node <= Linux::numa_max_node() ; node++) {
! if (Linux::_numa_bitmask_isbitset(bmp, node)) {
! _is_interleaved = true;
// Set membind to false as interleave mode allows all nodes to be used.
! _is_membind = false;
}
}
! if (_is_membind) {
! bmp = Linux::_numa_membind_ptr;
! Linux::set_numa_interleave_ptr (NULL);
! log_info(os) (" Java is configured to run in membind mode");
! }
! if (_is_interleaved) {
bmp = Linux::_numa_interleave_ptr;
! Linux::set_numa_membind_ptr (NULL);
! log_info(os) (" Java is configured to run in interleave mode");
}
! for (int node = 0; node <= Linux::numa_max_node() ; node++) {
if (Linux::_numa_bitmask_isbitset(bmp, node)) {
! _bufptr += sprintf (_bufptr, "%d, ", node);
}
}
! _bufptr[-2 ] = '\0';
! log_info(os) (" Heap will be configured using NUMA memory nodes: %s", _buf);
}
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
--- 5026,5080 ----
if (!Linux::libnuma_init()) {
UseNUMA = false;
} else {
// Identify whether running in membind or interleave mode.
! bool is_membind = false;
! bool is_interleaved = false;
log_info(os)("UseNUMA is enabled");
+
// Check for membind mode.
! for (int node = 0; node <= Linux::numa_max_node(); node++) {
! if (Linux::_numa_bitmask_isbitset(Linux::_numa_membind_ptr, node)) {
! is_membind = true;
! break;
}
}
// Check for interleave mode.
! for (int node = 0; node <= Linux::numa_max_node(); node++) {
! if (Linux::_numa_bitmask_isbitset(Linux::_numa_interleave_ptr, node)) {
! is_interleaved = true;
// Set membind to false as interleave mode allows all nodes to be used.
! is_membind = false;
! break;
}
}
! struct bitmask* bmp;
! if (is_interleaved) {
bmp = Linux::_numa_interleave_ptr;
! Linux::set_numa_membind_ptr(NULL);
! log_info(os)("Java is configured to run in interleave mode");
! } else if (is_membind) {
! bmp = Linux::_numa_membind_ptr;
! Linux::set_numa_interleave_ptr(NULL);
! log_info(os)("Java is configured to run in membind mode");
}
! char buf[BUFSIZ] = {'\0'};
! char* bufptr = buf;
!
! for (int node = 0; node <= Linux::numa_max_node(); node++) {
if (Linux::_numa_bitmask_isbitset(bmp, node)) {
! bufptr += sprintf(bufptr, "%d ", node);
}
}
! bufptr[-1] = '\0';
! log_info(os)("Heap will be configured using NUMA memory nodes: %s", buf);
}
if (UseParallelGC && UseNUMA && UseLargePages && !can_commit_large_page_memory()) {
// With SHM and HugeTLBFS large pages we cannot uncommit a page, so there's no way
< prev index next >