--- old/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-06-01 07:00:28.150699394 +0200 +++ new/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-06-01 07:00:27.879690660 +0200 @@ -120,6 +120,7 @@ _max_reserve(max_reserve), _current_max_capacity(max_capacity), _capacity(0), + _claimed(0), _used(0), _used_high(0), _used_low(0), @@ -255,8 +256,9 @@ size_t ZPageAllocator::unused() const { const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); const ssize_t used = (ssize_t)Atomic::load(&_used); + const ssize_t claimed = (ssize_t)Atomic::load(&_claimed); const ssize_t max_reserve = (ssize_t)_max_reserve; - const ssize_t unused = capacity - used - max_reserve; + const ssize_t unused = capacity - used - claimed - max_reserve; return unused > 0 ? (size_t)unused : 0; } @@ -300,42 +302,36 @@ } } -void ZPageAllocator::increase_used(size_t size, bool allocation, bool relocation) { - if (allocation) { - if (relocation) { - // Allocating a page for the purpose of relocation has a - // negative contribution to the number of reclaimed bytes. - _reclaimed -= size; - } - _allocated += size; +void ZPageAllocator::increase_used(size_t size, bool relocation) { + if (relocation) { + // Allocating a page for the purpose of relocation has a + // negative contribution to the number of reclaimed bytes. + _reclaimed -= size; } + _allocated += size; // Update atomically since we have concurrent readers - Atomic::add(&_used, size); - - if (_used > _used_high) { - _used_high = _used; + const size_t used = Atomic::add(&_used, size); + if (used > _used_high) { + _used_high = used; } } -void ZPageAllocator::decrease_used(size_t size, bool free, bool reclaimed) { - if (free) { - // Only pages explicitly released with the reclaimed flag set - // counts as reclaimed bytes. This flag is true when we release - // a page after relocation, and is false when we release a page - // to undo an allocation. - if (reclaimed) { - _reclaimed += size; - } else { - _allocated -= size; - } +void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { + // Only pages explicitly released with the reclaimed flag set + // counts as reclaimed bytes. This flag is true when we release + // a page after relocation, and is false when we release a page + // to undo an allocation. + if (reclaimed) { + _reclaimed += size; + } else { + _allocated -= size; } // Update atomically since we have concurrent readers - Atomic::sub(&_used, size); - - if (_used < _used_low) { - _used_low = _used; + const size_t used = Atomic::sub(&_used, size); + if (used < _used_low) { + _used_low = used; } } @@ -376,7 +372,7 @@ } bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { - size_t available = _current_max_capacity - _used; + size_t available = _current_max_capacity - _used - _claimed; if (no_reserve) { // The reserve should not be considered available @@ -387,7 +383,7 @@ } bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const { - size_t available = _capacity - _used; + size_t available = _capacity - _used - _claimed; if (no_reserve) { // The reserve should not be considered available @@ -446,11 +442,11 @@ } // Updated used statistics - increase_used(size, true /* allocation */, flags.relocation()); + increase_used(size, flags.relocation()); // Send event - event.commit(type, size, flags.non_blocking(), flags.no_reserve(), - _used, _current_max_capacity - _used, _capacity - _used); + event.commit(type, size, flags.non_blocking(), flags.no_reserve(), _used, + _current_max_capacity - _used - _claimed, _capacity - _used - _claimed); // Success return true; @@ -632,7 +628,7 @@ // Adjust capacity and used to reflect the failed capacity increase const size_t remaining = allocation->size() - freed; - decrease_used(remaining, false /* free */, false /* reclaimed */); + decrease_used(remaining, false /* reclaimed */); decrease_capacity(remaining, true /* set_max_capacity */); } @@ -701,7 +697,7 @@ ZLocker locker(&_lock); // Update used statistics - decrease_used(page->size(), true /* free */, reclaimed); + decrease_used(page->size(), reclaimed); // Set time when last used page->set_last_used(); @@ -743,8 +739,8 @@ return 0; } - // Adjust used to reflect that these pages are no longer available - increase_used(flushed, false /* allocation */, false /* relocation */); + // Record flushed pages as claimed + Atomic::add(&_claimed, flushed); } // Unmap, uncommit, and destroy flushed pages @@ -759,8 +755,8 @@ SuspendibleThreadSetJoiner joiner(!ZVerifyViews); ZLocker locker(&_lock); - // Adjust used and capacity to reflect the uncommit - decrease_used(flushed, false /* free */, false /* reclaimed */); + // Adjust claimed and capacity to reflect the uncommit + Atomic::sub(&_claimed, flushed); decrease_capacity(flushed, false /* set_max_capacity */); } --- old/src/hotspot/share/gc/z/zPageAllocator.hpp 2020-06-01 07:00:28.607714123 +0200 +++ new/src/hotspot/share/gc/z/zPageAllocator.hpp 2020-06-01 07:00:28.337705421 +0200 @@ -48,6 +48,7 @@ const size_t _max_reserve; volatile size_t _current_max_capacity; volatile size_t _capacity; + volatile size_t _claimed; volatile size_t _used; size_t _used_high; size_t _used_low; @@ -64,8 +65,8 @@ size_t increase_capacity(size_t size); void decrease_capacity(size_t size, bool set_max_capacity); - void increase_used(size_t size, bool allocation, bool relocation); - void decrease_used(size_t size, bool free, bool reclaimed); + void increase_used(size_t size, bool relocation); + void decrease_used(size_t size, bool reclaimed); bool commit_page(ZPage* page); void uncommit_page(ZPage* page);