< prev index next >

src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp

Print this page
rev 56448 : imported patch 8220310.mut.0
rev 56449 : imported patch 8220310.mut.1
rev 56450 : imported patch 8220310.mut.2
rev 56451 : imported patch 8220310.mut.3-thomas

*** 22,31 **** --- 22,32 ---- * */ #include "precompiled.hpp" #include "gc/g1/g1BiasedArray.hpp" + #include "gc/g1/g1MemoryNodeManager.hpp" #include "gc/g1/g1RegionToSpaceMapper.hpp" #include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "memory/virtualspace.hpp" #include "runtime/java.hpp"
*** 40,50 **** size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type) : _listener(NULL), ! _storage(rs, used_size, page_size), _region_granularity(region_granularity), _commit_map(rs.size() * commit_factor / region_granularity, mtGC) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); --- 41,51 ---- size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type) : _listener(NULL), ! _storage(rs, used_size, page_size, type), _region_granularity(region_granularity), _commit_map(rs.size() * commit_factor / region_granularity, mtGC) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be");
*** 70,83 **** guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); } virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { ! size_t const start_page = (size_t)start_idx * _pages_per_region; ! bool zero_filled = _storage.commit(start_page, num_regions * _pages_per_region); if (AlwaysPreTouch) { ! _storage.pretouch(start_page, num_regions * _pages_per_region, pretouch_gang); } _commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); } --- 71,90 ---- guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); } virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { ! const size_t start_page = (size_t)start_idx * _pages_per_region; ! const size_t size_in_pages = num_regions * _pages_per_region; ! bool zero_filled = _storage.commit(start_page, size_in_pages); ! for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) { ! size_t page = region_index * _pages_per_region; ! uint node_index = G1MemoryNodeManager::mgr()->preferred_node_index_for_index(region_index); ! _storage.request_memory_on_node(page, _pages_per_region, node_index); ! } if (AlwaysPreTouch) { ! _storage.pretouch(start_page, size_in_pages, pretouch_gang); } _commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); }
*** 124,133 **** --- 131,141 ---- size_t first_committed = NoPage; size_t num_committed = 0; bool all_zero_filled = true; + G1MemoryNodeManager* mgr = G1MemoryNodeManager::mgr(); for (uint i = start_idx; i < start_idx + num_regions; i++) { assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i); size_t idx = region_idx_to_page_idx(i); uint old_refcount = _refcounts.get_by_index(idx);
*** 139,148 **** --- 147,158 ---- num_committed = 1; } else { num_committed++; } zero_filled = _storage.commit(idx, 1); + uint node_index = mgr->preferred_node_index_for_index(i); + _storage.request_memory_on_node(idx, 1, node_index); } all_zero_filled &= zero_filled; _refcounts.set_by_index(idx, old_refcount + 1); _commit_map.set_bit(i);
< prev index next >