14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1NUMA.hpp"
27 #include "gc/g1/heapRegion.hpp"
28 #include "logging/log.hpp"
29 #include "runtime/globals.hpp"
30 #include "runtime/os.hpp"
31
32 G1NUMA* G1NUMA::_inst = NULL;
33
34 void* G1NUMA::base_address() const {
35 assert(_base_address != NULL, "Base address is not yet set");
36 return _base_address;
37 }
38
39 size_t G1NUMA::region_size() const {
40 assert(_region_size > 0, "Heap region size is not yet set");
41 return _region_size;
42 }
43
44 size_t G1NUMA::page_size() const {
45 assert(_page_size > 0, "Page size not is yet set");
46 return _page_size;
47 }
48
49 bool G1NUMA::is_enabled() const { return num_active_nodes() > 1; }
50
51 G1NUMA* G1NUMA::create() {
52 guarantee(_inst == NULL, "Should be called once.");
53 _inst = new G1NUMA();
54
55 // NUMA only supported on Linux.
56 #ifdef LINUX
57 _inst->initialize(UseNUMA);
58 #else
62 return _inst;
63 }
64
65 // Returns memory node ids
66 const int* G1NUMA::node_ids() const {
67 return _node_ids;
68 }
69
70 uint G1NUMA::index_of_node_id(int node_id) const {
71 assert(node_id >= 0, "invalid node id %d", node_id);
72 assert(node_id < _len_node_id_to_index_map, "invalid node id %d", node_id);
73 uint node_index = _node_id_to_index_map[node_id];
74 assert(node_index != G1NUMA::UnknownNodeIndex,
75 "invalid node id %d", node_id);
76 return node_index;
77 }
78
79 G1NUMA::G1NUMA() :
80 _node_id_to_index_map(NULL), _len_node_id_to_index_map(0),
81 _node_ids(NULL), _num_active_node_ids(0),
82 _base_address(NULL), _region_size(0), _page_size(0) {
83 }
84
85 void G1NUMA::initialize_without_numa() {
86 // If NUMA is not enabled or supported, initialize as having a singel node.
87 _num_active_node_ids = 1;
88 _node_ids = NEW_C_HEAP_ARRAY(int, _num_active_node_ids, mtGC);
89 _node_ids[0] = 0;
90 // Map index 0 to node 0
91 _len_node_id_to_index_map = 1;
92 _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
93 _node_id_to_index_map[0] = 0;
94 }
95
96 void G1NUMA::initialize(bool use_numa) {
97 if (!use_numa) {
98 initialize_without_numa();
99 return;
100 }
101
102 assert(UseNUMA, "Invariant");
114 // Create a mapping between node_id and index.
115 _len_node_id_to_index_map = max_node_id + 1;
116 _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
117
118 // Set all indices with unknown node id.
119 for (int i = 0; i < _len_node_id_to_index_map; i++) {
120 _node_id_to_index_map[i] = G1NUMA::UnknownNodeIndex;
121 }
122
123 // Set the indices for the actually retrieved node ids.
124 for (uint i = 0; i < _num_active_node_ids; i++) {
125 _node_id_to_index_map[_node_ids[i]] = i;
126 }
127 }
128
129 G1NUMA::~G1NUMA() {
130 FREE_C_HEAP_ARRAY(int, _node_id_to_index_map);
131 FREE_C_HEAP_ARRAY(int, _node_ids);
132 }
133
134 void G1NUMA::set_region_info(void* base_address, size_t region_size, size_t page_size) {
135 _base_address = base_address;
136 _region_size = region_size;
137 _page_size = page_size;
138 }
139
140 uint G1NUMA::num_active_nodes() const {
141 assert(_num_active_node_ids > 0, "just checking");
142 return _num_active_node_ids;
143 }
144
145 uint G1NUMA::index_of_current_thread() const {
146 if (!is_enabled()) {
147 return 0;
148 }
149 return index_of_node_id(os::numa_get_group_id());
150 }
151
152 uint G1NUMA::preferred_node_index_for_index(uint region_index) const {
153 if (region_size() >= page_size()) {
154 // Simple case, pages are smaller than the region so we
155 // can just alternate over the nodes.
186 // If we already pretouched, we can check actual node index here.
187 return index_of_address(hr->bottom());
188 }
189
190 return preferred_node_index_for_index(hr->hrm_index());
191 }
192
193 // Request to spread the given memory evenly across the available NUMA
194 // nodes. Which node to request for a given address is given by the
195 // region size and the page size. Below are two examples on 4 NUMA nodes system:
196 // 1. G1HeapRegionSize(_region_size) is larger than or equal to page size.
197 // * Page #: |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-|
198 // * HeapRegion #: |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----|
199 // * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
200 // 2. G1HeapRegionSize(_region_size) is smaller than page size.
201 // Memory will be touched one page at a time because G1RegionToSpaceMapper commits
202 // pages one by one.
203 // * Page #: |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
204 // * HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
205 // * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
206 void G1NUMA::request_memory_on_node(size_t start_page, size_t size_in_pages, uint region_index) {
207 if (!is_enabled()) {
208 return;
209 }
210
211 if (size_in_pages == 0) {
212 return;
213 }
214
215 char* aligned_address = (char*)base_address() + start_page * page_size();
216 size_t size_in_bytes = size_in_pages * page_size();
217 uint node_index = preferred_node_index_for_index(region_index);
218
219 assert(is_aligned(aligned_address, page_size()), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address));
220 assert(is_aligned(size_in_bytes, page_size()), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes);
221
222 log_debug(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be numa id (%d).",
223 p2i(aligned_address), p2i(aligned_address + size_in_bytes), _node_ids[node_index]);
224 os::numa_make_local(aligned_address, size_in_bytes, _node_ids[node_index]);
225 }
226
227
228 uint G1NUMA::max_search_depth() const {
229 // Multiple of 3 is just random number to limit iterations.
230 // There would be some cases that 1 page may be consisted of multiple HeapRegions.
231 return 3 * MAX2((uint)(page_size() / region_size()), (uint)1) * num_active_nodes();
232 }
|
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1NUMA.hpp"
27 #include "gc/g1/heapRegion.hpp"
28 #include "logging/log.hpp"
29 #include "runtime/globals.hpp"
30 #include "runtime/os.hpp"
31
32 G1NUMA* G1NUMA::_inst = NULL;
33
34 size_t G1NUMA::region_size() const {
35 assert(_region_size > 0, "Heap region size is not yet set");
36 return _region_size;
37 }
38
39 size_t G1NUMA::page_size() const {
40 assert(_page_size > 0, "Page size not is yet set");
41 return _page_size;
42 }
43
44 bool G1NUMA::is_enabled() const { return num_active_nodes() > 1; }
45
46 G1NUMA* G1NUMA::create() {
47 guarantee(_inst == NULL, "Should be called once.");
48 _inst = new G1NUMA();
49
50 // NUMA only supported on Linux.
51 #ifdef LINUX
52 _inst->initialize(UseNUMA);
53 #else
57 return _inst;
58 }
59
60 // Returns memory node ids
61 const int* G1NUMA::node_ids() const {
62 return _node_ids;
63 }
64
65 uint G1NUMA::index_of_node_id(int node_id) const {
66 assert(node_id >= 0, "invalid node id %d", node_id);
67 assert(node_id < _len_node_id_to_index_map, "invalid node id %d", node_id);
68 uint node_index = _node_id_to_index_map[node_id];
69 assert(node_index != G1NUMA::UnknownNodeIndex,
70 "invalid node id %d", node_id);
71 return node_index;
72 }
73
74 G1NUMA::G1NUMA() :
75 _node_id_to_index_map(NULL), _len_node_id_to_index_map(0),
76 _node_ids(NULL), _num_active_node_ids(0),
77 _region_size(0), _page_size(0) {
78 }
79
80 void G1NUMA::initialize_without_numa() {
81 // If NUMA is not enabled or supported, initialize as having a singel node.
82 _num_active_node_ids = 1;
83 _node_ids = NEW_C_HEAP_ARRAY(int, _num_active_node_ids, mtGC);
84 _node_ids[0] = 0;
85 // Map index 0 to node 0
86 _len_node_id_to_index_map = 1;
87 _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
88 _node_id_to_index_map[0] = 0;
89 }
90
91 void G1NUMA::initialize(bool use_numa) {
92 if (!use_numa) {
93 initialize_without_numa();
94 return;
95 }
96
97 assert(UseNUMA, "Invariant");
109 // Create a mapping between node_id and index.
110 _len_node_id_to_index_map = max_node_id + 1;
111 _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
112
113 // Set all indices with unknown node id.
114 for (int i = 0; i < _len_node_id_to_index_map; i++) {
115 _node_id_to_index_map[i] = G1NUMA::UnknownNodeIndex;
116 }
117
118 // Set the indices for the actually retrieved node ids.
119 for (uint i = 0; i < _num_active_node_ids; i++) {
120 _node_id_to_index_map[_node_ids[i]] = i;
121 }
122 }
123
124 G1NUMA::~G1NUMA() {
125 FREE_C_HEAP_ARRAY(int, _node_id_to_index_map);
126 FREE_C_HEAP_ARRAY(int, _node_ids);
127 }
128
129 void G1NUMA::set_region_info(size_t region_size, size_t page_size) {
130 _region_size = region_size;
131 _page_size = page_size;
132 }
133
134 uint G1NUMA::num_active_nodes() const {
135 assert(_num_active_node_ids > 0, "just checking");
136 return _num_active_node_ids;
137 }
138
139 uint G1NUMA::index_of_current_thread() const {
140 if (!is_enabled()) {
141 return 0;
142 }
143 return index_of_node_id(os::numa_get_group_id());
144 }
145
146 uint G1NUMA::preferred_node_index_for_index(uint region_index) const {
147 if (region_size() >= page_size()) {
148 // Simple case, pages are smaller than the region so we
149 // can just alternate over the nodes.
180 // If we already pretouched, we can check actual node index here.
181 return index_of_address(hr->bottom());
182 }
183
184 return preferred_node_index_for_index(hr->hrm_index());
185 }
186
187 // Request to spread the given memory evenly across the available NUMA
188 // nodes. Which node to request for a given address is given by the
189 // region size and the page size. Below are two examples on 4 NUMA nodes system:
190 // 1. G1HeapRegionSize(_region_size) is larger than or equal to page size.
191 // * Page #: |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-|
192 // * HeapRegion #: |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----|
193 // * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
194 // 2. G1HeapRegionSize(_region_size) is smaller than page size.
195 // Memory will be touched one page at a time because G1RegionToSpaceMapper commits
196 // pages one by one.
197 // * Page #: |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
198 // * HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
199 // * NUMA node #: |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
200 void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index) {
201 if (!is_enabled()) {
202 return;
203 }
204
205 if (size_in_bytes == 0) {
206 return;
207 }
208
209 uint node_index = preferred_node_index_for_index(region_index);
210
211 assert(is_aligned(aligned_address, page_size()), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address));
212 assert(is_aligned(size_in_bytes, page_size()), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes);
213
214 log_debug(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be numa id (%d).",
215 p2i(aligned_address), p2i((char*)aligned_address + size_in_bytes), _node_ids[node_index]);
216 os::numa_make_local((char*)aligned_address, size_in_bytes, _node_ids[node_index]);
217 }
218
219 uint G1NUMA::max_search_depth() const {
220 // Multiple of 3 is just random number to limit iterations.
221 // There would be some cases that 1 page may be consisted of multiple HeapRegions.
222 return 3 * MAX2((uint)(page_size() / region_size()), (uint)1) * num_active_nodes();
223 }
|