--- old/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-06-01 07:12:29.578949085 +0200 +++ new/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-06-01 07:12:29.255938675 +0200 @@ -128,6 +128,9 @@ _reclaimed(0), _stalled(), _satisfied(), + _unmap_lock(), + _unmap_queue(), + _unmap_stop(false), _uncommit_lock(), _uncommit_enabled(false), _uncommit_stop(false), @@ -362,12 +365,12 @@ bool ZPageAllocator::map_page(const ZPage* page) const { // Map physical memory - return _physical.map(page->physical_memory(), page->start()); + return _physical.map(page->start(), page->physical_memory()); } void ZPageAllocator::unmap_page(const ZPage* page) const { // Unmap physical memory - _physical.unmap(page->physical_memory(), page->start()); + _physical.unmap(page->start(), page->size()); } void ZPageAllocator::destroy_page(ZPage* page) { @@ -381,6 +384,29 @@ _safe_delete(page); } +void ZPageAllocator::enqueue_unmap_page(ZPage* page) { + ZLocker locker(&_unmap_lock); + _unmap_queue.insert_last(page); + _unmap_lock.notify_all(); +} + +ZPage* ZPageAllocator::dequeue_unmap_page() { + ZLocker locker(&_unmap_lock); + + for (;;) { + if (_unmap_stop) { + return NULL; + } + + ZPage* const page = _unmap_queue.remove_first(); + if (page != NULL) { + return page; + } + + _unmap_lock.wait(); + } +} + bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { size_t available = _current_max_capacity - _used - _claimed; @@ -531,6 +557,8 @@ // Allocate virtual memory. To make error handling a lot more straight // forward, we allocate virtual memory before destroying flushed pages. + // Flushed pages are also unmapped and destroyed asynchronously, so we + // can't immediately reuse that part of the address space anyway. const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) { log_error(gc)("Out of address space"); @@ -540,13 +568,12 @@ ZPhysicalMemory pmem; size_t flushed = 0; - // Unmap, transfer physical memory, and destroy flushed pages + // Transfer physical memory, and enqueue pages for unmap and destroy ZListRemoveIterator iter(allocation->pages()); for (ZPage* page; iter.next(&page);) { flushed += page->size(); - unmap_page(page); pmem.transfer_segments(page->physical_memory()); - destroy_page(page); + enqueue_unmap_page(page); } if (flushed > 0) { @@ -719,6 +746,26 @@ satisfy_stalled(); } +void ZPageAllocator::unmap_run() { + for (;;) { + ZPage* const page = dequeue_unmap_page(); + if (page == NULL) { + // Stop + return; + } + + // Unmap and destroy page + unmap_page(page); + destroy_page(page); + } +} + +void ZPageAllocator::unmap_stop() { + ZLocker locker(&_unmap_lock); + _unmap_stop = true; + _unmap_lock.notify_all(); +} + size_t ZPageAllocator::uncommit(uint64_t* timeout) { // We need to join the suspendible thread set while manipulating capacity and // used, to make sure GC safepoints will have a consistent view. However, when @@ -850,25 +897,30 @@ void ZPageAllocator::debug_map_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_map(page->physical_memory(), page->start()); + _physical.debug_map(page->start(), page->physical_memory()); } void ZPageAllocator::debug_unmap_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_unmap(page->physical_memory(), page->start()); + _physical.debug_unmap(page->start(), page->size()); } void ZPageAllocator::pages_do(ZPageClosure* cl) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - ZListIterator iter(&_satisfied); - for (ZPageAllocation* allocation; iter.next(&allocation);) { - ZListIterator iter(allocation->pages()); - for (ZPage* page; iter.next(&page);) { + ZListIterator iter_satisfied(&_satisfied); + for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) { + ZListIterator iter_pages(allocation->pages()); + for (ZPage* page; iter_pages.next(&page);) { cl->do_page(page); } } + ZListIterator iter_unmap_queue(&_unmap_queue); + for (ZPage* page; iter_unmap_queue.next(&page);) { + cl->do_page(page); + } + _cache.pages_do(cl); }