--- old/src/hotspot/share/gc/g1/g1NUMA.cpp 2019-10-07 14:25:53.506811333 -0400 +++ new/src/hotspot/share/gc/g1/g1NUMA.cpp 2019-10-07 14:25:53.278799048 -0400 @@ -24,34 +24,12 @@ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1NUMA.inline.hpp" +#include "gc/g1/g1NUMA.hpp" +#include "runtime/globals.hpp" #include "runtime/os.hpp" G1NUMA* G1NUMA::_inst = NULL; -void G1NUMA::init_numa_id_to_index_map(const int* numa_ids, uint num_numa_ids) { - int max_numa_id = 0; - for (uint i = 0; i < num_numa_ids; i++) { - if (numa_ids[i] > max_numa_id) { - max_numa_id = numa_ids[i]; - } - } - - _len_numa_id_to_index_map = max_numa_id + 1; - _numa_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_numa_id_to_index_map, mtGC); - // Set all indices with invalid numa id. - for (int i = 0; i < _len_numa_id_to_index_map; i++) { - _numa_id_to_index_map[i] = G1MemoryNodeManager::InvalidNodeIndex; - } - - // Set the indices for the actually retrieved numa ids. - for (uint i = 0; i < num_numa_ids; i++) { - int numa_id = numa_ids[i]; - guarantee(is_valid_numa_id(numa_id), "must be representable in map, numa id(%d)", numa_id); - _numa_id_to_index_map[numa_id] = i; - } -} - // Request the given memory to locate on preferred node. // There are 2 things to consider. // First, size comparison for G1HeapRegionSize and page size. @@ -69,8 +47,8 @@ // * Page size: |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----| // * G1HeapRegionSize: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-| void G1NUMA::request_memory_on_node(address aligned_address, size_t size_in_bytes) { - assert(is_aligned(aligned_address, _page_size), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address)); - assert(is_aligned(size_in_bytes, _page_size), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes); + assert(is_aligned(aligned_address, page_size()), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address)); + assert(is_aligned(size_in_bytes, page_size()), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes); if (size_in_bytes == 0) { return; @@ -78,10 +56,10 @@ // If we don't have preferred numa id, touch the given area with round-robin manner. size_t chunk_size; - if (HeapRegion::GrainBytes >= _page_size) { + if (HeapRegion::GrainBytes >= page_size()) { chunk_size = HeapRegion::GrainBytes; } else { - chunk_size = _page_size; + chunk_size = page_size(); } assert(is_aligned(size_in_bytes, chunk_size), "Size to touch " SIZE_FORMAT " should be aligned to " SIZE_FORMAT, @@ -106,7 +84,13 @@ } while (start_addr < end_addr); } -bool G1NUMA::initialize() { +G1NUMA::G1NUMA() : + _numa_id_to_index_map(NULL), + _len_numa_id_to_index_map(0), + _page_size(0), + _numa_ids(NULL), + _num_active_numa_ids(0) +{ assert(UseNUMA, "Invariant"); size_t num_numa_ids = os::numa_get_groups_num(); @@ -114,9 +98,38 @@ _numa_ids = NEW_C_HEAP_ARRAY(int, num_numa_ids, mtGC); _num_active_numa_ids = (uint)os::numa_get_leaf_groups(_numa_ids, num_numa_ids); - init_numa_id_to_index_map(_numa_ids, _num_active_numa_ids); + int max_numa_id = 0; + for (uint i = 0; i < _num_active_numa_ids; i++) { + if (_numa_ids[i] > max_numa_id) { + max_numa_id = _numa_ids[i]; + } + } + + _len_numa_id_to_index_map = max_numa_id + 1; + _numa_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_numa_id_to_index_map, mtGC); + // Set all indices with invalid numa id. + for (int i = 0; i < _len_numa_id_to_index_map; i++) { + _numa_id_to_index_map[i] = G1MemoryNodeManager::InvalidNodeIndex; + } + + // Set the indices for the actually retrieved numa ids. + for (uint i = 0; i < _num_active_numa_ids; i++) { + _numa_id_to_index_map[_numa_ids[i]] = i; + } +} + +void G1NUMA::set_numa(G1NUMA* numa) { + guarantee(_inst == NULL, "Should be called once."); + _inst = numa; +} - return true; +uint G1NUMA::index_of_numa_id(int numa_id) const { + assert(numa_id >= 0, "invalid numa id %d", numa_id); + assert(numa_id < _len_numa_id_to_index_map, "invalid numa id %d", numa_id); + uint numa_index = _numa_id_to_index_map[numa_id]; + assert(numa_index != G1MemoryNodeManager::InvalidNodeIndex, + "invalid numa id %d", numa_id); + return numa_index; } uint G1NUMA::index_of_current_thread() const { @@ -136,19 +149,23 @@ uint G1NUMA::preferred_index_for_address(HeapWord* address) const { uint region_index = G1CollectedHeap::heap()->addr_to_region(address); - if (HeapRegion::GrainBytes >= _page_size) { + if (HeapRegion::GrainBytes >= page_size()) { // Simple case, pages are smaller than the region so we // can just alternate over the nodes. return region_index % _num_active_numa_ids; } else { // Multiple regions in one page, so we need to make sure the // regions within a page is preferred on the same node. - size_t regions_per_page = _page_size / HeapRegion::GrainBytes; + size_t regions_per_page = page_size() / HeapRegion::GrainBytes; return (region_index / regions_per_page) % _num_active_numa_ids; } } uint G1NUMA::index_of_address(HeapWord* address) const { int numa_id = os::numa_get_address_id((uintptr_t)address); - return index_of_numa_id(numa_id); + if (numa_id == os::InvalidId) { + return G1MemoryNodeManager::InvalidNodeIndex; + } else { + return index_of_numa_id(numa_id); + } }