1 /*
   2  * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc/g1/g1NUMA.inline.hpp"
  28 #include "runtime/os.hpp"
  29 
  30 G1NUMA* G1NUMA::_inst = NULL;
  31 
  32 void G1NUMA::init_numa_id_to_index_map(const int* numa_ids, uint num_numa_ids) {
  33   int max_numa_id = 0;
  34   for (uint i = 0; i < num_numa_ids; i++) {
  35     if (numa_ids[i] > max_numa_id) {
  36       max_numa_id = numa_ids[i];
  37     }
  38   }
  39 
  40   _len_numa_id_to_index_map = max_numa_id + 1;
  41   _numa_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_numa_id_to_index_map, mtGC);
  42   // Set all indices with invalid numa id.
  43   for (int i = 0; i < _len_numa_id_to_index_map; i++) {
  44     _numa_id_to_index_map[i] = G1MemoryNodeManager::InvalidNodeIndex;
  45   }
  46 
  47   // Set the indices for the actually retrieved numa ids.
  48   for (uint i = 0; i < num_numa_ids; i++) {
  49     int numa_id = numa_ids[i];
  50     guarantee(is_valid_numa_id(numa_id), "must be representable in map, numa id(%d)", numa_id);
  51     _numa_id_to_index_map[numa_id] = i;
  52   }
  53 }
  54 
  55 // Request the given memory to locate on preferred node.
  56 // There are 2 things to consider.
  57 // First, size comparison for G1HeapRegionSize and page size.
  58 // Second, the memory is assumed to be evenly split. As the preferred id will be
  59 // decided in round-robin manner, we can know the numa id from address or even
  60 // from HeapRegion index. 
  61 //
  62 // Examples of 4 numa ids with non-preferred numa id.
  63 //   1. G1HeapRegionSize is larger than or equal to page size.
  64 //      * Page size:             |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-|
  65 //      * G1HeapRegionSize:      |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----|
  66 //   2. G1HeapRegionSize is smaller than page size.
  67 //      Memory will be touched one page at a time because G1RegionToSpaceMapper commits
  68 //      pages one by one.
  69 //      * Page size:             |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
  70 //      * G1HeapRegionSize:      |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
  71 void G1NUMA::request_memory_on_node(address aligned_address, size_t size_in_bytes) {
  72   assert(is_aligned(aligned_address, _page_size), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address));
  73   assert(is_aligned(size_in_bytes, _page_size), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes);
  74 
  75   if (size_in_bytes == 0) {
  76     return;
  77   }
  78 
  79   // If we don't have preferred numa id, touch the given area with round-robin manner.
  80   size_t chunk_size;
  81   if (HeapRegion::GrainBytes >= _page_size) {
  82     chunk_size = HeapRegion::GrainBytes;
  83   } else {
  84     chunk_size = _page_size;
  85   }
  86 
  87   assert(is_aligned(size_in_bytes, chunk_size), "Size to touch " SIZE_FORMAT " should be aligned to " SIZE_FORMAT,
  88          size_in_bytes, chunk_size);
  89 
  90   address start_addr = aligned_address;
  91   address end_addr = aligned_address + size_in_bytes;
  92 
  93   log_debug(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT
  94                             "), chunk_size=" SIZE_FORMAT "KB with round-robin manner.",
  95                             p2i(start_addr), p2i(end_addr), chunk_size / K);
  96 
  97   do {
  98     // Numa id of each HeapRegion is decided.
  99     uint numa_index = preferred_index_for_address((HeapWord*)start_addr);
 100 
 101     log_trace(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be numa id (%d).",
 102                              p2i(start_addr), p2i(start_addr + chunk_size), _numa_ids[numa_index]);
 103     os::numa_make_local((char*)start_addr, chunk_size, _numa_ids[numa_index]);
 104 
 105     start_addr += chunk_size;
 106   } while (start_addr < end_addr);
 107 }
 108 
 109 bool G1NUMA::initialize() {
 110   assert(UseNUMA, "Invariant");
 111 
 112   size_t num_numa_ids = os::numa_get_groups_num();
 113 
 114   _numa_ids = NEW_C_HEAP_ARRAY(int, num_numa_ids, mtGC);
 115   _num_active_numa_ids = (uint)os::numa_get_leaf_groups(_numa_ids, num_numa_ids);
 116 
 117   init_numa_id_to_index_map(_numa_ids, _num_active_numa_ids);
 118 
 119   return true;
 120 }
 121 
 122 uint G1NUMA::index_of_current_thread() const {
 123   int numa_id = os::numa_get_group_id();
 124   return index_of_numa_id(numa_id);
 125 }
 126 
 127 void G1NUMA::set_page_size(size_t page_size) {
 128   _page_size = page_size;
 129 }
 130 
 131 G1NUMA::~G1NUMA() {
 132   FREE_C_HEAP_ARRAY(int, _numa_id_to_index_map);
 133   FREE_C_HEAP_ARRAY(int, _numa_ids);
 134   _inst = NULL;
 135 }
 136 
 137 uint G1NUMA::preferred_index_for_address(HeapWord* address) const {
 138   uint region_index = G1CollectedHeap::heap()->addr_to_region(address);
 139   if (HeapRegion::GrainBytes >= _page_size) {
 140     // Simple case, pages are smaller than the region so we
 141     // can just alternate over the nodes.
 142     return region_index % _num_active_numa_ids;
 143   } else {
 144     // Multiple regions in one page, so we need to make sure the
 145     // regions within a page is preferred on the same node.
 146     size_t regions_per_page = _page_size / HeapRegion::GrainBytes;
 147     return (region_index / regions_per_page) % _num_active_numa_ids;
 148   }
 149 }
 150 
 151 uint G1NUMA::index_of_address(HeapWord* address) const {
 152     int numa_id = os::numa_get_address_id((uintptr_t)address);
 153     return index_of_numa_id(numa_id);
 154 }