1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1BiasedArray.hpp" 27 #include "gc/g1/g1MemoryNodeManager.hpp" 28 #include "gc/g1/g1RegionToSpaceMapper.hpp" 29 #include "logging/log.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/virtualspace.hpp" 32 #include "runtime/java.hpp" 33 #include "runtime/os.inline.hpp" 34 #include "services/memTracker.hpp" 35 #include "utilities/align.hpp" 36 #include "utilities/bitMap.inline.hpp" 37 #include "utilities/formatBuffer.hpp" 38 39 G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, 40 size_t used_size, 41 size_t page_size, 42 size_t region_granularity, 43 size_t commit_factor, 44 MemoryType type) : 45 _listener(NULL), 46 _storage(rs, used_size, page_size, type), 47 _region_granularity(region_granularity), 48 _commit_map(rs.size() * commit_factor / region_granularity, mtGC) { 49 guarantee(is_power_of_2(page_size), "must be"); 50 guarantee(is_power_of_2(region_granularity), "must be"); 51 52 MemTracker::record_virtual_memory_type((address)rs.base(), type); 53 } 54 55 // G1RegionToSpaceMapper implementation where the region granularity is larger than 56 // or the same as the commit granularity. 57 // Basically, the space corresponding to one region region spans several OS pages. 58 class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper { 59 private: 60 size_t _pages_per_region; 61 62 public: 63 G1RegionsLargerThanCommitSizeMapper(ReservedSpace rs, 64 size_t actual_size, 65 size_t page_size, 66 size_t alloc_granularity, 67 size_t commit_factor, 68 MemoryType type) : 69 G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), 70 _pages_per_region(alloc_granularity / (page_size * commit_factor)) { 71 72 guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); 73 } 74 75 virtual void commit_regions(uint start_idx, size_t num_regions, uint node_index, WorkGang* pretouch_gang) { 76 const size_t start_page = (size_t)start_idx * _pages_per_region; 77 const size_t size_in_pages = num_regions * _pages_per_region; 78 bool zero_filled = _storage.commit(start_page, size_in_pages, node_index); 79 if (AlwaysPreTouch) { 80 _storage.pretouch(start_page, size_in_pages, pretouch_gang); 81 } 82 _commit_map.set_range(start_idx, start_idx + num_regions); 83 fire_on_commit(start_idx, num_regions, zero_filled); 84 } 85 86 virtual void uncommit_regions(uint start_idx, size_t num_regions) { 87 _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region); 88 _commit_map.clear_range(start_idx, start_idx + num_regions); 89 } 90 }; 91 92 // G1RegionToSpaceMapper implementation where the region granularity is smaller 93 // than the commit granularity. 94 // Basically, the contents of one OS page span several regions. 95 class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { 96 // Helper class used to get node index evenly starting from the given node index. 97 // When the G1HeapRegionSize is smaller than page size, G1RegionToSpaceMapper will 98 // commit one heap region each time if multiple pages needs to be committed. In such case, 99 // node index also should reflect it. I.e. node indices should be used evenly. 100 class G1NodeDistributor : public StackObj { 101 uint _requested_node_index; 102 uint _next_node_index; 103 uint _max_node_index; 104 public: 105 G1NodeDistributor(uint node_index) : 106 _requested_node_index(node_index), 107 // At the constructor body, _next_node_index will start from the first node index. 108 _next_node_index(G1MemoryNodeManager::mgr()->num_active_nodes() - 1), 109 _max_node_index(G1MemoryNodeManager::mgr()->num_active_nodes()) { 110 next(); 111 } 112 113 uint next_node_index() const { 114 return _next_node_index; 115 } 116 117 void next() { 118 if (_requested_node_index == G1MemoryNodeManager::AnyNodeIndex) { 119 _next_node_index = (_next_node_index + 1) % _max_node_index; 120 } else { 121 _next_node_index = _requested_node_index; 122 } 123 } 124 }; 125 126 private: 127 class CommitRefcountArray : public G1BiasedMappedArray<uint> { 128 protected: 129 virtual uint default_value() const { return 0; } 130 }; 131 132 size_t _regions_per_page; 133 134 CommitRefcountArray _refcounts; 135 136 uintptr_t region_idx_to_page_idx(uint region) const { 137 return region / _regions_per_page; 138 } 139 140 public: 141 G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, 142 size_t actual_size, 143 size_t page_size, 144 size_t alloc_granularity, 145 size_t commit_factor, 146 MemoryType type) : 147 G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), 148 _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() { 149 150 guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); 151 _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size); 152 } 153 154 virtual void commit_regions(uint start_idx, size_t num_regions, uint node_index, WorkGang* pretouch_gang) { 155 size_t const NoPage = ~(size_t)0; 156 157 size_t first_committed = NoPage; 158 size_t num_committed = 0; 159 160 bool all_zero_filled = true; 161 162 G1NodeDistributor itr(node_index); 163 164 for (uint i = start_idx; i < start_idx + num_regions; i++) { 165 // If there are many pages to touch, different node ids will be used. 166 uint processed_node_index = itr.next_node_index(); 167 assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i); 168 size_t idx = region_idx_to_page_idx(i); 169 uint old_refcount = _refcounts.get_by_index(idx); 170 171 bool zero_filled = false; 172 if (old_refcount == 0) { 173 if (first_committed == NoPage) { 174 first_committed = idx; 175 num_committed = 1; 176 } else { 177 num_committed++; 178 } 179 zero_filled = _storage.commit(idx, 1, processed_node_index); 180 itr.next(); 181 } 182 all_zero_filled &= zero_filled; 183 184 _refcounts.set_by_index(idx, old_refcount + 1); 185 _commit_map.set_bit(i); 186 } 187 if (AlwaysPreTouch && num_committed > 0) { 188 _storage.pretouch(first_committed, num_committed, pretouch_gang); 189 } 190 fire_on_commit(start_idx, num_regions, all_zero_filled); 191 } 192 193 virtual void uncommit_regions(uint start_idx, size_t num_regions) { 194 for (uint i = start_idx; i < start_idx + num_regions; i++) { 195 assert(_commit_map.at(i), "Trying to uncommit storage at region %u that is not committed", i); 196 size_t idx = region_idx_to_page_idx(i); 197 uint old_refcount = _refcounts.get_by_index(idx); 198 assert(old_refcount > 0, "must be"); 199 if (old_refcount == 1) { 200 _storage.uncommit(idx, 1); 201 } 202 _refcounts.set_by_index(idx, old_refcount - 1); 203 _commit_map.clear_bit(i); 204 } 205 } 206 }; 207 208 void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) { 209 if (_listener != NULL) { 210 _listener->on_commit(start_idx, num_regions, zero_filled); 211 } 212 } 213 214 static bool map_nvdimm_space(ReservedSpace rs) { 215 assert(AllocateOldGenAt != NULL, ""); 216 int _backing_fd = os::create_file_for_heap(AllocateOldGenAt); 217 if (_backing_fd == -1) { 218 log_error(gc, init)("Could not create file for Old generation at location %s", AllocateOldGenAt); 219 return false; 220 } 221 // commit this memory in nv-dimm 222 char* ret = os::attempt_reserve_memory_at(rs.size(), rs.base(), _backing_fd); 223 224 if (ret != rs.base()) { 225 if (ret != NULL) { 226 os::unmap_memory(rs.base(), rs.size()); 227 } 228 log_error(gc, init)("Error in mapping Old Gen to given AllocateOldGenAt = %s", AllocateOldGenAt); 229 os::close(_backing_fd); 230 return false; 231 } 232 233 os::close(_backing_fd); 234 return true; 235 } 236 237 G1RegionToHeteroSpaceMapper::G1RegionToHeteroSpaceMapper(ReservedSpace rs, 238 size_t actual_size, 239 size_t page_size, 240 size_t alloc_granularity, 241 size_t commit_factor, 242 MemoryType type) : 243 G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), 244 _rs(rs), 245 _dram_mapper(NULL), 246 _num_committed_dram(0), 247 _num_committed_nvdimm(0), 248 _start_index_of_dram(0), 249 _page_size(page_size), 250 _commit_factor(commit_factor), 251 _type(type) { 252 assert(actual_size == 2 * MaxHeapSize, "For 2-way heterogenuous heap, reserved space is two times MaxHeapSize"); 253 } 254 255 bool G1RegionToHeteroSpaceMapper::initialize() { 256 // Since we need to re-map the reserved space - 'Xmx' to nv-dimm and 'Xmx' to dram, we need to release the reserved memory first. 257 // Because on some OSes (e.g. Windows) you cannot do a file mapping on memory reserved with regular mapping. 258 os::release_memory(_rs.base(), _rs.size()); 259 // First half of size Xmx is for nv-dimm. 260 ReservedSpace rs_nvdimm = _rs.first_part(MaxHeapSize); 261 assert(rs_nvdimm.base() == _rs.base(), "We should get the same base address"); 262 263 // Second half of reserved memory is mapped to dram. 264 ReservedSpace rs_dram = _rs.last_part(MaxHeapSize); 265 266 assert(rs_dram.size() == rs_nvdimm.size() && rs_nvdimm.size() == MaxHeapSize, "They all should be same"); 267 268 // Reserve dram memory 269 char* base = os::attempt_reserve_memory_at(rs_dram.size(), rs_dram.base()); 270 if (base != rs_dram.base()) { 271 if (base != NULL) { 272 os::release_memory(base, rs_dram.size()); 273 } 274 log_error(gc, init)("Error in re-mapping memory on dram during G1 heterogenous memory initialization"); 275 return false; 276 } 277 278 // We reserve and commit this entire space to NV-DIMM. 279 if (!map_nvdimm_space(rs_nvdimm)) { 280 log_error(gc, init)("Error in re-mapping memory to nv-dimm during G1 heterogenous memory initialization"); 281 return false; 282 } 283 284 if (_region_granularity >= (_page_size * _commit_factor)) { 285 _dram_mapper = new G1RegionsLargerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type); 286 } else { 287 _dram_mapper = new G1RegionsSmallerThanCommitSizeMapper(rs_dram, rs_dram.size(), _page_size, _region_granularity, _commit_factor, _type); 288 } 289 290 _start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity); 291 return true; 292 } 293 294 void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, uint node_index, WorkGang* pretouch_gang) { 295 uint end_idx = (start_idx + (uint)num_regions - 1); 296 297 uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0; 298 uint num_nvdimm = (uint)num_regions - num_dram; 299 300 if (num_nvdimm > 0) { 301 // We do not need to commit nv-dimm regions, since they are committed in the beginning. 302 _num_committed_nvdimm += num_nvdimm; 303 } 304 if (num_dram > 0) { 305 _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, node_index, num_dram, pretouch_gang); 306 _num_committed_dram += num_dram; 307 } 308 } 309 310 void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) { 311 uint end_idx = (start_idx + (uint)num_regions - 1); 312 uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0; 313 uint num_nvdimm = (uint)num_regions - num_dram; 314 315 if (num_nvdimm > 0) { 316 // We do not uncommit memory for nv-dimm regions. 317 _num_committed_nvdimm -= num_nvdimm; 318 } 319 320 if (num_dram > 0) { 321 _dram_mapper->uncommit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram); 322 _num_committed_dram -= num_dram; 323 } 324 } 325 326 uint G1RegionToHeteroSpaceMapper::num_committed_dram() const { 327 return _num_committed_dram; 328 } 329 330 uint G1RegionToHeteroSpaceMapper::num_committed_nvdimm() const { 331 return _num_committed_nvdimm; 332 } 333 334 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_heap_mapper(ReservedSpace rs, 335 size_t actual_size, 336 size_t page_size, 337 size_t region_granularity, 338 size_t commit_factor, 339 MemoryType type) { 340 if (AllocateOldGenAt != NULL) { 341 G1RegionToHeteroSpaceMapper* mapper = new G1RegionToHeteroSpaceMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); 342 if (!mapper->initialize()) { 343 delete mapper; 344 return NULL; 345 } 346 return (G1RegionToSpaceMapper*)mapper; 347 } else { 348 return create_mapper(rs, actual_size, page_size, region_granularity, commit_factor, type); 349 } 350 } 351 352 G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, 353 size_t actual_size, 354 size_t page_size, 355 size_t region_granularity, 356 size_t commit_factor, 357 MemoryType type) { 358 if (region_granularity >= (page_size * commit_factor)) { 359 return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); 360 } else { 361 return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); 362 } 363 } 364 365 void G1RegionToSpaceMapper::commit_and_set_special() { 366 _storage.commit_and_set_special(); 367 }