< prev index next >

src/hotspot/share/gc/z/zPageAllocator.cpp

Print this page

        

*** 118,127 **** --- 118,128 ---- _min_capacity(min_capacity), _max_capacity(max_capacity), _max_reserve(max_reserve), _current_max_capacity(max_capacity), _capacity(0), + _claimed(0), _used(0), _used_high(0), _used_low(0), _allocated(0), _reclaimed(0),
*** 253,264 **** } size_t ZPageAllocator::unused() const { const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); const ssize_t used = (ssize_t)Atomic::load(&_used); const ssize_t max_reserve = (ssize_t)_max_reserve; ! const ssize_t unused = capacity - used - max_reserve; return unused > 0 ? (size_t)unused : 0; } size_t ZPageAllocator::allocated() const { return _allocated; --- 254,266 ---- } size_t ZPageAllocator::unused() const { const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); const ssize_t used = (ssize_t)Atomic::load(&_used); + const ssize_t claimed = (ssize_t)Atomic::load(&_claimed); const ssize_t max_reserve = (ssize_t)_max_reserve; ! const ssize_t unused = capacity - used - claimed - max_reserve; return unused > 0 ? (size_t)unused : 0; } size_t ZPageAllocator::allocated() const { return _allocated;
*** 298,343 **** // Update atomically since we have concurrent readers Atomic::store(&_current_max_capacity, _capacity); } } ! void ZPageAllocator::increase_used(size_t size, bool allocation, bool relocation) { ! if (allocation) { ! if (relocation) { ! // Allocating a page for the purpose of relocation has a ! // negative contribution to the number of reclaimed bytes. ! _reclaimed -= size; ! } ! _allocated += size; } // Update atomically since we have concurrent readers ! Atomic::add(&_used, size); ! ! if (_used > _used_high) { ! _used_high = _used; } } ! void ZPageAllocator::decrease_used(size_t size, bool free, bool reclaimed) { ! if (free) { ! // Only pages explicitly released with the reclaimed flag set ! // counts as reclaimed bytes. This flag is true when we release ! // a page after relocation, and is false when we release a page ! // to undo an allocation. ! if (reclaimed) { ! _reclaimed += size; ! } else { ! _allocated -= size; ! } } // Update atomically since we have concurrent readers ! Atomic::sub(&_used, size); ! ! if (_used < _used_low) { ! _used_low = _used; } } bool ZPageAllocator::commit_page(ZPage* page) { // Commit physical memory --- 300,339 ---- // Update atomically since we have concurrent readers Atomic::store(&_current_max_capacity, _capacity); } } ! void ZPageAllocator::increase_used(size_t size, bool relocation) { ! if (relocation) { ! // Allocating a page for the purpose of relocation has a ! // negative contribution to the number of reclaimed bytes. ! _reclaimed -= size; } + _allocated += size; // Update atomically since we have concurrent readers ! const size_t used = Atomic::add(&_used, size); ! if (used > _used_high) { ! _used_high = used; } } ! void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { ! // Only pages explicitly released with the reclaimed flag set ! // counts as reclaimed bytes. This flag is true when we release ! // a page after relocation, and is false when we release a page ! // to undo an allocation. ! if (reclaimed) { ! _reclaimed += size; ! } else { ! _allocated -= size; } // Update atomically since we have concurrent readers ! const size_t used = Atomic::sub(&_used, size); ! if (used < _used_low) { ! _used_low = used; } } bool ZPageAllocator::commit_page(ZPage* page) { // Commit physical memory
*** 374,395 **** // Delete page safely _safe_delete(page); } bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { ! size_t available = _current_max_capacity - _used; if (no_reserve) { // The reserve should not be considered available available -= MIN2(available, _max_reserve); } return available >= size; } bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const { ! size_t available = _capacity - _used; if (no_reserve) { // The reserve should not be considered available available -= MIN2(available, _max_reserve); } else if (_capacity != _current_max_capacity) { --- 370,391 ---- // Delete page safely _safe_delete(page); } bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { ! size_t available = _current_max_capacity - _used - _claimed; if (no_reserve) { // The reserve should not be considered available available -= MIN2(available, _max_reserve); } return available >= size; } bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const { ! size_t available = _capacity - _used - _claimed; if (no_reserve) { // The reserve should not be considered available available -= MIN2(available, _max_reserve); } else if (_capacity != _current_max_capacity) {
*** 444,458 **** return false; } } // Updated used statistics ! increase_used(size, true /* allocation */, flags.relocation()); // Send event ! event.commit(type, size, flags.non_blocking(), flags.no_reserve(), ! _used, _current_max_capacity - _used, _capacity - _used); // Success return true; } --- 440,454 ---- return false; } } // Updated used statistics ! increase_used(size, flags.relocation()); // Send event ! event.commit(type, size, flags.non_blocking(), flags.no_reserve(), _used, ! _current_max_capacity - _used - _claimed, _capacity - _used - _claimed); // Success return true; }
*** 630,640 **** ZLocker<ZLock> locker(&_lock); // Adjust capacity and used to reflect the failed capacity increase const size_t remaining = allocation->size() - freed; ! decrease_used(remaining, false /* free */, false /* reclaimed */); decrease_capacity(remaining, true /* set_max_capacity */); } ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { retry: --- 626,636 ---- ZLocker<ZLock> locker(&_lock); // Adjust capacity and used to reflect the failed capacity increase const size_t remaining = allocation->size() - freed; ! decrease_used(remaining, false /* reclaimed */); decrease_capacity(remaining, true /* set_max_capacity */); } ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { retry:
*** 699,709 **** void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { ZLocker<ZLock> locker(&_lock); // Update used statistics ! decrease_used(page->size(), true /* free */, reclaimed); // Set time when last used page->set_last_used(); // Cache page --- 695,705 ---- void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { ZLocker<ZLock> locker(&_lock); // Update used statistics ! decrease_used(page->size(), reclaimed); // Set time when last used page->set_last_used(); // Cache page
*** 741,752 **** if (flushed == 0) { // Nothing flushed return 0; } ! // Adjust used to reflect that these pages are no longer available ! increase_used(flushed, false /* allocation */, false /* relocation */); } // Unmap, uncommit, and destroy flushed pages ZListRemoveIterator<ZPage> iter(&pages); for (ZPage* page; iter.next(&page);) { --- 737,748 ---- if (flushed == 0) { // Nothing flushed return 0; } ! // Record flushed pages as claimed ! Atomic::add(&_claimed, flushed); } // Unmap, uncommit, and destroy flushed pages ZListRemoveIterator<ZPage> iter(&pages); for (ZPage* page; iter.next(&page);) {
*** 757,768 **** { SuspendibleThreadSetJoiner joiner(!ZVerifyViews); ZLocker<ZLock> locker(&_lock); ! // Adjust used and capacity to reflect the uncommit ! decrease_used(flushed, false /* free */, false /* reclaimed */); decrease_capacity(flushed, false /* set_max_capacity */); } return flushed; } --- 753,764 ---- { SuspendibleThreadSetJoiner joiner(!ZVerifyViews); ZLocker<ZLock> locker(&_lock); ! // Adjust claimed and capacity to reflect the uncommit ! Atomic::sub(&_claimed, flushed); decrease_capacity(flushed, false /* set_max_capacity */); } return flushed; }
< prev index next >