--- old/src/hotspot/share/gc/g1/heapRegionManager.cpp 2020-01-03 17:56:51.216844573 +0800 +++ new/src/hotspot/share/gc/g1/heapRegionManager.cpp 2020-01-03 17:56:51.219844681 +0800 @@ -65,11 +65,16 @@ _card_counts_mapper(NULL), _available_map(mtGC), _num_committed(0), + _concurrent_resizing_map(mtGC), +#ifdef ASSERT + _num_concurrent_resizing(0), +#endif _allocated_heapregions_length(0), _regions(), _heap_mapper(NULL), _prev_bitmap_mapper(NULL), _next_bitmap_mapper(NULL), - _free_list("Free list", new MasterFreeRegionListChecker()) + _free_list("Free list", new MasterFreeRegionListChecker()), + _concurrent_uncommitting_list("Concurrent uncommitting list", new MasterFreeRegionListChecker()) { } HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap) { @@ -101,12 +106,21 @@ _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); _available_map.initialize(_regions.length()); + _concurrent_resizing_map.initialize(_regions.length()); } bool HeapRegionManager::is_available(uint region) const { return _available_map.at(region); } +bool HeapRegionManager::is_in_concurrent_resizing(uint region) const { + return _concurrent_resizing_map.at(region); +} + +bool HeapRegionManager::is_unavailable_for_allocation(uint region) const { + return !_available_map.at(region) && !_concurrent_resizing_map.at(region); +} + HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) { HeapRegion* hr = NULL; bool from_head = !type.is_young(); @@ -274,8 +288,8 @@ uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) { uint expand_candidate = UINT_MAX; for (uint i = 0; i < max_length(); i++) { - if (is_available(i)) { - // Already in use continue + if (is_available(i) || is_in_concurrent_resizing(i)) { + // Cannot be used continue; } // Always save the candidate so we can expand later on. @@ -307,7 +321,7 @@ while (length_found < num && cur < max_length()) { HeapRegion* hr = _regions.get_by_index(cur); - if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { + if ((!empty_only && is_unavailable_for_allocation(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { // This region is a potential candidate for allocation into. length_found++; } else { @@ -322,7 +336,7 @@ for (uint i = found; i < (found + num); i++) { HeapRegion* hr = _regions.get_by_index(i); // sanity check - guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), + guarantee((!empty_only && is_unavailable_for_allocation(i)) || (is_available(i) && hr != NULL && hr->is_empty()), "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)); } @@ -374,13 +388,13 @@ return num_regions; } *res_idx = cur; - while (cur < max_length() && !is_available(cur)) { + while (cur < max_length() && is_unavailable_for_allocation(cur)) { cur++; } num_regions = cur - *res_idx; #ifdef ASSERT for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { - assert(!is_available(i), "just checking"); + assert(is_unavailable_for_allocation(i), "just checking"); } assert(cur == max_length() || num_regions == 0 || is_available(cur), "The region at the current position %u must be available or at the end of the heap.", cur); @@ -395,7 +409,7 @@ uint curr = max_length() - 1; while (true) { HeapRegion *hr = _regions.get_by_index(curr); - if (hr == NULL || !is_available(curr)) { + if (hr == NULL || is_unavailable_for_allocation(curr)) { uint res = expand_at(curr, 1, NULL); if (res == 1) { *expanded = true; @@ -422,7 +436,7 @@ // Ensure that each G1 region in the range is free, returning false if not. // Commit those that are not yet available, and keep count. for (uint curr_index = start_index; curr_index <= last_index; curr_index++) { - if (!is_available(curr_index)) { + if (is_unavailable_for_allocation(curr_index)) { commits++; expand_at(curr_index, 1, pretouch_workers); } @@ -691,3 +705,174 @@ } G1CollectedHeap::heap()->phase_times()->record_serial_rebuild_freelist_time_ms((Ticks::now() - serial_time).seconds() * 1000.0); } + +void HeapRegionManager::set_region_for_concurrent_resizing(uint index) { +#ifdef ASSERT + assert_at_safepoint_on_vm_thread(); + assert(is_available(index), "Expected available region at index %u", index); + assert(at(index)->is_empty(), "Expected empty region at index %u", index); + assert(at(index)->is_free(), "Expected free region at index %u", index); + _num_concurrent_resizing++; +#endif + + _num_committed--; + _available_map.par_clear_bit(index); + _concurrent_resizing_map.par_set_bit(index); +} + +void HeapRegionManager::clear_region_for_concurrent_resizing(uint index) { +#ifdef ASSERT + assert_at_safepoint_on_vm_thread(); + assert(!is_available(index) && is_in_concurrent_resizing(index), "Expected concurrent resizing region at index %u", index); + assert(at(index)->is_empty(), "Expected empty region at index %u", index); + assert(at(index)->is_free(), "Expected free region at index %u", index); + _num_concurrent_resizing--; +#endif + + _concurrent_resizing_map.par_clear_bit(index); +} + + +void HeapRegionManager::concurrent_uncommit_regions_memory(uint start, size_t num_regions) { + // Reset node index to distinguish with committed regions. + for (uint i = start; i < start + num_regions; i++) { + at(i)->set_node_index(G1NUMA::UnknownNodeIndex); + } + + // Print before uncommitting. + if (G1CollectedHeap::heap()->hr_printer()->is_active()) { + for (uint i = start; i < start + num_regions; i++) { + HeapRegion* hr = at(i); + G1CollectedHeap::heap()->hr_printer()->uncommit(hr); + } + } + + // For those mappers which can be operated parallelly + // We will uncommit memory concurrently + + // Uncommit region memory + if (_heap_mapper->can_parallelly_commit_and_uncommit()) { + _heap_mapper->uncommit_regions(start, num_regions); + } + + // Also uncommit auxiliary data + if (_prev_bitmap_mapper->can_parallelly_commit_and_uncommit()) { + _prev_bitmap_mapper->uncommit_regions(start, num_regions); + } + if (_next_bitmap_mapper->can_parallelly_commit_and_uncommit()) { + _next_bitmap_mapper->uncommit_regions(start, num_regions); + } + + if (_bot_mapper->can_parallelly_commit_and_uncommit()) { + _bot_mapper->uncommit_regions(start, num_regions); + } + if (_cardtable_mapper->can_parallelly_commit_and_uncommit()) { + _cardtable_mapper->uncommit_regions(start, num_regions); + } + + if (_card_counts_mapper->can_parallelly_commit_and_uncommit()) { + _card_counts_mapper->uncommit_regions(start, num_regions); + } +} + +void HeapRegionManager::synchronize_uncommit_regions_memory(uint start, size_t num_regions) { + // For those mappers which cannot be operated parallelly + // We have to uncommit in VM thread in synchronization + + // Uncommit region memory + if (!_heap_mapper->can_parallelly_commit_and_uncommit()) { + _heap_mapper->uncommit_regions(start, num_regions); + } + + // Also uncommit auxiliary data + if (!_prev_bitmap_mapper->can_parallelly_commit_and_uncommit()) { + _prev_bitmap_mapper->uncommit_regions(start, num_regions); + } + if (!_next_bitmap_mapper->can_parallelly_commit_and_uncommit()) { + _next_bitmap_mapper->uncommit_regions(start, num_regions); + } + + if (!_bot_mapper->can_parallelly_commit_and_uncommit()) { + _bot_mapper->uncommit_regions(start, num_regions); + } + if (!_cardtable_mapper->can_parallelly_commit_and_uncommit()) { + _cardtable_mapper->uncommit_regions(start, num_regions); + } + + if (!_card_counts_mapper->can_parallelly_commit_and_uncommit()) { + _card_counts_mapper->uncommit_regions(start, num_regions); + } +} + +// Uncommit regions memory in concurrent thread +void HeapRegionManager::concurrent_uncommit_regions() { + assert(_concurrent_uncommitting_list.length() != 0, "sanity"); + + uint start_region = (uint)-1; + uint num_regions = 0; + FreeRegionListIterator iter(&_concurrent_uncommitting_list); + while (iter.more_available()) { + HeapRegion* hr = iter.get_next(); + uint index = hr->hrm_index(); + if (num_regions == 0) { + assert(start_region == (uint)-1, "sanity"); + start_region = index; + num_regions++; + } else if (index == (start_region + num_regions)) { + // Found continuous region + num_regions++; + } else { + // Not continuous region + concurrent_uncommit_regions_memory(start_region, num_regions); + start_region = index; + num_regions = 1; + } + } + concurrent_uncommit_regions_memory(start_region, num_regions); +} + +void HeapRegionManager::prepare_concurrent_uncommit_regions(uint num_regions_to_remove) { + assert(_free_list.length() >= num_regions_to_remove, "sanity"); + assert(_num_concurrent_resizing == 0, "sanity"); + + for (uint i = 0; i < num_regions_to_remove; i++) { + HeapRegion* hr = _free_list.remove_region(false /* from_head */); + // Make the region unavailable in hrm + set_region_for_concurrent_resizing(hr->hrm_index()); + _concurrent_uncommitting_list.add_ordered(hr); + } +} + +void HeapRegionManager::synchronize_concurrent_resizing_regions() { + assert_at_safepoint_on_vm_thread(); + + uint start_region = (uint)-1; + uint num_regions = 0; + + uint length = _concurrent_uncommitting_list.length(); + assert(length != 0, "sanity"); + + for (uint i = 0; i < length; i++) { + HeapRegion* hr = _concurrent_uncommitting_list.remove_region(true /* from_head */); + uint index = hr->hrm_index(); + if (num_regions == 0) { + assert(start_region == (uint)-1, "sanity"); + start_region = index; + num_regions++; + } else if (index == (start_region + num_regions)) { + // Found continuous region + num_regions++; + } else { + // Not continuous region + synchronize_uncommit_regions_memory(start_region, num_regions); + start_region = index; + num_regions = 1; + } + clear_region_for_concurrent_resizing(hr->hrm_index()); + } + synchronize_uncommit_regions_memory(start_region, num_regions); + + assert(_concurrent_uncommitting_list.length() == 0, "sanity"); + assert(_num_concurrent_resizing == 0, "sanity"); + assert(_concurrent_resizing_map.is_empty(), "sanity"); +}