/* * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1NUMA.inline.hpp" #include "runtime/os.hpp" G1NUMA* G1NUMA::_inst = NULL; void G1NUMA::init_numa_id_to_index_map(const int* numa_ids, uint num_numa_ids) { int max_numa_id = 0; for (uint i = 0; i < num_numa_ids; i++) { if (numa_ids[i] > max_numa_id) { max_numa_id = numa_ids[i]; } } _len_numa_id_to_index_map = max_numa_id + 1; _numa_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_numa_id_to_index_map, mtGC); // Set all indices with invalid numa id. for (int i = 0; i < _len_numa_id_to_index_map; i++) { _numa_id_to_index_map[i] = G1MemoryNodeManager::InvalidNodeIndex; } // Set the indices for the actually retrieved numa ids. for (uint i = 0; i < num_numa_ids; i++) { int numa_id = numa_ids[i]; guarantee(is_valid_numa_id(numa_id), "must be representable in map, numa id(%d)", numa_id); _numa_id_to_index_map[numa_id] = i; } } // Request the given memory to locate on preferred node. // There are 2 things to consider. // First, size comparison for G1HeapRegionSize and page size. // Second, the memory is assumed to be evenly split. As the preferred id will be // decided in round-robin manner, we can know the numa id from address or even // from HeapRegion index. // // Examples of 4 numa ids with non-preferred numa id. // 1. G1HeapRegionSize is larger than or equal to page size. // * Page size: |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-| // * G1HeapRegionSize: |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----| // 2. G1HeapRegionSize is smaller than page size. // Memory will be touched one page at a time because G1RegionToSpaceMapper commits // pages one by one. // * Page size: |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----| // * G1HeapRegionSize: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-| void G1NUMA::request_memory_on_node(address aligned_address, size_t size_in_bytes) { assert(is_aligned(aligned_address, _page_size), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address)); assert(is_aligned(size_in_bytes, _page_size), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes); if (size_in_bytes == 0) { return; } // If we don't have preferred numa id, touch the given area with round-robin manner. size_t chunk_size; if (HeapRegion::GrainBytes >= _page_size) { chunk_size = HeapRegion::GrainBytes; } else { chunk_size = _page_size; } assert(is_aligned(size_in_bytes, chunk_size), "Size to touch " SIZE_FORMAT " should be aligned to " SIZE_FORMAT, size_in_bytes, chunk_size); address start_addr = aligned_address; address end_addr = aligned_address + size_in_bytes; log_debug(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT "), chunk_size=" SIZE_FORMAT "KB with round-robin manner.", p2i(start_addr), p2i(end_addr), chunk_size / K); do { // Numa id of each HeapRegion is decided. uint numa_index = preferred_index_for_address((HeapWord*)start_addr); log_trace(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be numa id (%d).", p2i(start_addr), p2i(start_addr + chunk_size), _numa_ids[numa_index]); os::numa_make_local((char*)start_addr, chunk_size, _numa_ids[numa_index]); start_addr += chunk_size; } while (start_addr < end_addr); } bool G1NUMA::initialize() { assert(UseNUMA, "Invariant"); size_t num_numa_ids = os::numa_get_groups_num(); _numa_ids = NEW_C_HEAP_ARRAY(int, num_numa_ids, mtGC); _num_active_numa_ids = (uint)os::numa_get_leaf_groups(_numa_ids, num_numa_ids); init_numa_id_to_index_map(_numa_ids, _num_active_numa_ids); return true; } uint G1NUMA::index_of_current_thread() const { int numa_id = os::numa_get_group_id(); return index_of_numa_id(numa_id); } void G1NUMA::set_page_size(size_t page_size) { _page_size = page_size; } G1NUMA::~G1NUMA() { FREE_C_HEAP_ARRAY(int, _numa_id_to_index_map); FREE_C_HEAP_ARRAY(int, _numa_ids); _inst = NULL; } uint G1NUMA::preferred_index_for_address(HeapWord* address) const { uint region_index = G1CollectedHeap::heap()->addr_to_region(address); if (HeapRegion::GrainBytes >= _page_size) { // Simple case, pages are smaller than the region so we // can just alternate over the nodes. return region_index % _num_active_numa_ids; } else { // Multiple regions in one page, so we need to make sure the // regions within a page is preferred on the same node. size_t regions_per_page = _page_size / HeapRegion::GrainBytes; return (region_index / regions_per_page) % _num_active_numa_ids; } } uint G1NUMA::index_of_address(HeapWord* address) const { int numa_id = os::numa_get_address_id((uintptr_t)address); return index_of_numa_id(numa_id); }