< prev index next >

src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp

Print this page
rev 56323 : imported patch 8220310.mut.0
rev 56324 : imported patch 8220310.mut.1_thomas
rev 56326 : [mq]: 8220310.mut.1-3_kim

*** 22,31 **** --- 22,32 ---- * */ #include "precompiled.hpp" #include "gc/g1/g1BiasedArray.hpp" + #include "gc/g1/g1MemoryNodeManager.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/virtualspace.hpp" #include "runtime/java.hpp"
*** 40,50 **** size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type) : _listener(NULL), ! _storage(rs, used_size, page_size), _region_granularity(region_granularity), _commit_map(rs.size() * commit_factor / region_granularity, mtGC) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); --- 41,51 ---- size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type) : _listener(NULL), ! _storage(rs, used_size, page_size, type), _region_granularity(region_granularity), _commit_map(rs.size() * commit_factor / region_granularity, mtGC) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be");
*** 69,83 **** _pages_per_region(alloc_granularity / (page_size * commit_factor)) { guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); } ! virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { ! size_t const start_page = (size_t)start_idx * _pages_per_region; ! bool zero_filled = _storage.commit(start_page, num_regions * _pages_per_region); if (AlwaysPreTouch) { ! _storage.pretouch(start_page, num_regions * _pages_per_region, pretouch_gang); } _commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); } --- 70,85 ---- _pages_per_region(alloc_granularity / (page_size * commit_factor)) { guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); } ! virtual void commit_regions(uint start_idx, size_t num_regions, uint node_index, WorkGang* pretouch_gang) { ! const size_t start_page = (size_t)start_idx * _pages_per_region; ! const size_t size_in_pages = num_regions * _pages_per_region; ! bool zero_filled = _storage.commit(start_page, size_in_pages, node_index); if (AlwaysPreTouch) { ! _storage.pretouch(start_page, size_in_pages, pretouch_gang); } _commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); }
*** 89,98 **** --- 91,130 ---- // G1RegionToSpaceMapper implementation where the region granularity is smaller // than the commit granularity. // Basically, the contents of one OS page span several regions. class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { + // Helper class used to get node index evenly starting from the given node index. + // When the G1HeapRegionSize is smaller than page size, G1RegionToSpaceMapper will + // commit one heap region each time if multiple pages needs to be committed. In such case, + // node index also should reflect it. I.e. node indices should be used evenly. + class G1NodeDistributor : public StackObj { + uint _requested_node_index; + uint _next_node_index; + uint _max_node_index; + public: + G1NodeDistributor(uint node_index) : + _requested_node_index(node_index), + // At the constructor body, _next_node_index will start from the first node index. + _next_node_index(G1MemoryNodeManager::mgr()->num_active_nodes() - 1), + _max_node_index(G1MemoryNodeManager::mgr()->num_active_nodes()) { + next(); + } + + uint next_node_index() const { + return _next_node_index; + } + + void next() { + if (_requested_node_index == G1MemoryNodeManager::AnyNodeIndex) { + _next_node_index = (_next_node_index + 1) % _max_node_index; + } else { + _next_node_index = _requested_node_index; + } + } + }; + private: class CommitRefcountArray : public G1BiasedMappedArray<uint> { protected: virtual uint default_value() const { return 0; } };
*** 117,135 **** guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size); } ! virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { size_t const NoPage = ~(size_t)0; size_t first_committed = NoPage; size_t num_committed = 0; bool all_zero_filled = true; for (uint i = start_idx; i < start_idx + num_regions; i++) { assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i); size_t idx = region_idx_to_page_idx(i); uint old_refcount = _refcounts.get_by_index(idx); bool zero_filled = false; --- 149,171 ---- guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size); } ! virtual void commit_regions(uint start_idx, size_t num_regions, uint node_index, WorkGang* pretouch_gang) { size_t const NoPage = ~(size_t)0; size_t first_committed = NoPage; size_t num_committed = 0; bool all_zero_filled = true; + G1NodeDistributor itr(node_index); + for (uint i = start_idx; i < start_idx + num_regions; i++) { + // If there are many pages to touch, different node ids will be used. + uint processed_node_index = itr.next_node_index(); assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i); size_t idx = region_idx_to_page_idx(i); uint old_refcount = _refcounts.get_by_index(idx); bool zero_filled = false;
*** 138,148 **** first_committed = idx; num_committed = 1; } else { num_committed++; } ! zero_filled = _storage.commit(idx, 1); } all_zero_filled &= zero_filled; _refcounts.set_by_index(idx, old_refcount + 1); _commit_map.set_bit(i); --- 174,185 ---- first_committed = idx; num_committed = 1; } else { num_committed++; } ! zero_filled = _storage.commit(idx, 1, processed_node_index); ! itr.next(); } all_zero_filled &= zero_filled; _refcounts.set_by_index(idx, old_refcount + 1); _commit_map.set_bit(i);
*** 252,273 **** _start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity); return true; } ! void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { uint end_idx = (start_idx + (uint)num_regions - 1); uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0; uint num_nvdimm = (uint)num_regions - num_dram; if (num_nvdimm > 0) { // We do not need to commit nv-dimm regions, since they are committed in the beginning. _num_committed_nvdimm += num_nvdimm; } if (num_dram > 0) { ! _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, num_dram, pretouch_gang); _num_committed_dram += num_dram; } } void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) { --- 289,310 ---- _start_index_of_dram = (uint)(rs_nvdimm.size() / _region_granularity); return true; } ! void G1RegionToHeteroSpaceMapper::commit_regions(uint start_idx, size_t num_regions, uint node_index, WorkGang* pretouch_gang) { uint end_idx = (start_idx + (uint)num_regions - 1); uint num_dram = end_idx >= _start_index_of_dram ? MIN2((end_idx - _start_index_of_dram + 1), (uint)num_regions) : 0; uint num_nvdimm = (uint)num_regions - num_dram; if (num_nvdimm > 0) { // We do not need to commit nv-dimm regions, since they are committed in the beginning. _num_committed_nvdimm += num_nvdimm; } if (num_dram > 0) { ! _dram_mapper->commit_regions(start_idx > _start_index_of_dram ? (start_idx - _start_index_of_dram) : 0, node_index, num_dram, pretouch_gang); _num_committed_dram += num_dram; } } void G1RegionToHeteroSpaceMapper::uncommit_regions(uint start_idx, size_t num_regions) {
< prev index next >