< prev index next >

src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp

Print this page

        

*** 44,54 **** size_t commit_factor, MemoryType type) : _listener(NULL), _storage(rs, used_size, page_size), _region_granularity(region_granularity), ! _commit_map(rs.size() * commit_factor / region_granularity, mtGC), _memory_type(type) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); MemTracker::record_virtual_memory_type((address)rs.base(), type); --- 44,54 ---- size_t commit_factor, MemoryType type) : _listener(NULL), _storage(rs, used_size, page_size), _region_granularity(region_granularity), ! _region_commit_map(rs.size() * commit_factor / region_granularity, mtGC), _memory_type(type) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); MemTracker::record_virtual_memory_type((address)rs.base(), type);
*** 86,188 **** } } if (AlwaysPreTouch) { _storage.pretouch(start_page, size_in_pages, pretouch_gang); } ! _commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); } virtual void uncommit_regions(uint start_idx, size_t num_regions) { _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region); ! _commit_map.clear_range(start_idx, start_idx + num_regions); } }; // G1RegionToSpaceMapper implementation where the region granularity is smaller // than the commit granularity. // Basically, the contents of one OS page span several regions. class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { - private: - class CommitRefcountArray : public G1BiasedMappedArray<uint> { - protected: - virtual uint default_value() const { return 0; } - }; - size_t _regions_per_page; ! CommitRefcountArray _refcounts; ! uintptr_t region_idx_to_page_idx(uint region) const { ! return region / _regions_per_page; } public: G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, size_t actual_size, size_t page_size, size_t alloc_granularity, size_t commit_factor, MemoryType type) : G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), ! _regions_per_page((page_size * commit_factor) / alloc_granularity), _refcounts() { guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); - _refcounts.initialize((HeapWord*)rs.base(), (HeapWord*)(rs.base() + align_up(rs.size(), page_size)), page_size); } virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { size_t const NoPage = ~(size_t)0; size_t first_committed = NoPage; size_t num_committed = 0; ! bool all_zero_filled = true; ! G1NUMA* numa = G1NUMA::numa(); ! for (uint region_idx = start_idx; region_idx < start_idx + num_regions; region_idx++) { ! assert(!_commit_map.at(region_idx), "Trying to commit storage at region %u that is already committed", region_idx); ! size_t page_idx = region_idx_to_page_idx(region_idx); ! uint old_refcount = _refcounts.get_by_index(page_idx); ! ! bool zero_filled = false; ! if (old_refcount == 0) { ! if (first_committed == NoPage) { ! first_committed = page_idx; ! num_committed = 1; ! } else { num_committed++; } ! zero_filled = _storage.commit(page_idx, 1); ! if (_memory_type == mtJavaHeap) { ! void* address = _storage.page_start(page_idx); ! size_t size_in_bytes = _storage.page_size(); ! numa->request_memory_on_node(address, size_in_bytes, region_idx); } } - all_zero_filled &= zero_filled; ! _refcounts.set_by_index(page_idx, old_refcount + 1); ! _commit_map.set_bit(region_idx); ! } if (AlwaysPreTouch && num_committed > 0) { _storage.pretouch(first_committed, num_committed, pretouch_gang); } fire_on_commit(start_idx, num_regions, all_zero_filled); } virtual void uncommit_regions(uint start_idx, size_t num_regions) { ! for (uint i = start_idx; i < start_idx + num_regions; i++) { ! assert(_commit_map.at(i), "Trying to uncommit storage at region %u that is not committed", i); ! size_t idx = region_idx_to_page_idx(i); ! uint old_refcount = _refcounts.get_by_index(idx); ! assert(old_refcount > 0, "must be"); ! if (old_refcount == 1) { ! _storage.uncommit(idx, 1); } - _refcounts.set_by_index(idx, old_refcount - 1); - _commit_map.clear_bit(i); } } }; void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) { --- 86,209 ---- } } if (AlwaysPreTouch) { _storage.pretouch(start_page, size_in_pages, pretouch_gang); } ! _region_commit_map.set_range(start_idx, start_idx + num_regions); fire_on_commit(start_idx, num_regions, zero_filled); } virtual void uncommit_regions(uint start_idx, size_t num_regions) { _storage.uncommit((size_t)start_idx * _pages_per_region, num_regions * _pages_per_region); ! _region_commit_map.clear_range(start_idx, start_idx + num_regions); } }; // G1RegionToSpaceMapper implementation where the region granularity is smaller // than the commit granularity. // Basically, the contents of one OS page span several regions. class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { size_t _regions_per_page; ! size_t region_idx_to_page_idx(uint region_idx) const { ! return region_idx / _regions_per_page; ! } ! ! bool is_page_committed(size_t page_idx) { ! size_t region = page_idx * _regions_per_page; ! size_t region_limit = region + _regions_per_page; ! // Committed if there is a bit set in the range. ! return _region_commit_map.get_next_one_offset(region, region_limit) != region_limit; ! } ! void numa_request_on_node(size_t page_idx) { ! if (_memory_type == mtJavaHeap) { ! uint region = (uint)(page_idx * _regions_per_page); ! void* address = _storage.page_start(page_idx); ! size_t size_in_bytes = _storage.page_size(); ! G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region); ! } } public: G1RegionsSmallerThanCommitSizeMapper(ReservedSpace rs, size_t actual_size, size_t page_size, size_t alloc_granularity, size_t commit_factor, MemoryType type) : G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), ! _regions_per_page((page_size * commit_factor) / alloc_granularity) { guarantee((page_size * commit_factor) >= alloc_granularity, "allocation granularity smaller than commit granularity"); } virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) { + assert(num_regions > 0, "Must commit at least one region"); + assert(_region_commit_map.get_next_one_offset(start_idx, start_idx + num_regions) == start_idx + num_regions, + "Should be no committed regions in the range [%u, " SIZE_FORMAT ")", + start_idx, start_idx + num_regions); + size_t const NoPage = ~(size_t)0; size_t first_committed = NoPage; size_t num_committed = 0; ! size_t start_page = region_idx_to_page_idx(start_idx); ! size_t end_page = region_idx_to_page_idx((uint)(start_idx + num_regions - 1)); ! bool all_zero_filled = true; ! for (size_t page = start_page; page <= end_page; page++) { ! if (!is_page_committed(page)) { ! // Page not committed. ! if (num_committed == 0) { ! first_committed = page; ! } num_committed++; + + if (!_storage.commit(page, 1)) { + // Found dirty region during commit. + all_zero_filled = false; } ! ! // Move memory to correct NUMA node for the heap. ! numa_request_on_node(page); ! } else { ! // Page already committed. ! all_zero_filled = false; } } ! // Update the commit map for the given range. ! _region_commit_map.set_range(start_idx, start_idx + num_regions); ! if (AlwaysPreTouch && num_committed > 0) { _storage.pretouch(first_committed, num_committed, pretouch_gang); } + fire_on_commit(start_idx, num_regions, all_zero_filled); } virtual void uncommit_regions(uint start_idx, size_t num_regions) { ! assert(num_regions > 0, "Must uncommit at least one region"); ! assert(_region_commit_map.get_next_zero_offset(start_idx, start_idx + num_regions) == start_idx + num_regions, ! "Should only be committed regions in the range [%u, " SIZE_FORMAT ")", ! start_idx, start_idx + num_regions); ! ! size_t start_page = region_idx_to_page_idx(start_idx); ! size_t end_page = region_idx_to_page_idx((uint)(start_idx + num_regions - 1)); ! ! // Clear commit map for the given range. ! _region_commit_map.clear_range(start_idx, start_idx + num_regions); ! ! for (size_t page = start_page; page <= end_page; page++) { ! // We know all pages were committed before clearing the map. If the ! // the page is still marked as committed after the clear we should ! // not uncommit it. ! if (!is_page_committed(page)) { ! _storage.uncommit(page, 1); } } } }; void G1RegionToSpaceMapper::fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
< prev index next >