--- old/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-06-01 07:12:27.649886917 +0200 +++ new/src/hotspot/share/gc/z/zCollectedHeap.cpp 2020-06-01 07:12:27.380878247 +0200 @@ -25,6 +25,8 @@ #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zCollectedHeap.hpp" +#include "gc/z/zDirector.hpp" +#include "gc/z/zDriver.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeap.inline.hpp" #include "gc/z/zNMethod.hpp" @@ -32,6 +34,8 @@ #include "gc/z/zOop.inline.hpp" #include "gc/z/zServiceability.hpp" #include "gc/z/zStat.hpp" +#include "gc/z/zUncommitter.hpp" +#include "gc/z/zUnmapper.hpp" #include "gc/z/zUtils.inline.hpp" #include "memory/iterator.hpp" #include "memory/universe.hpp" @@ -52,6 +56,7 @@ _heap(), _director(new ZDirector()), _driver(new ZDriver()), + _unmapper(new ZUnmapper()), _uncommitter(new ZUncommitter()), _stat(new ZStat()), _runtime_workers() {} @@ -81,6 +86,7 @@ void ZCollectedHeap::stop() { _director->stop(); _driver->stop(); + _unmapper->stop(); _uncommitter->stop(); _stat->stop(); } @@ -278,6 +284,7 @@ void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const { tc->do_thread(_director); tc->do_thread(_driver); + tc->do_thread(_unmapper); tc->do_thread(_uncommitter); tc->do_thread(_stat); _heap.worker_threads_do(tc); @@ -330,6 +337,8 @@ st->cr(); _driver->print_on(st); st->cr(); + _unmapper->print_on(st); + st->cr(); _uncommitter->print_on(st); st->cr(); _stat->print_on(st); --- old/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-06-01 07:12:28.082900871 +0200 +++ new/src/hotspot/share/gc/z/zCollectedHeap.hpp 2020-06-01 07:12:27.814892234 +0200 @@ -27,13 +27,15 @@ #include "gc/shared/collectedHeap.hpp" #include "gc/shared/softRefPolicy.hpp" #include "gc/z/zBarrierSet.hpp" -#include "gc/z/zDirector.hpp" -#include "gc/z/zDriver.hpp" #include "gc/z/zHeap.hpp" #include "gc/z/zInitialize.hpp" #include "gc/z/zRuntimeWorkers.hpp" -#include "gc/z/zStat.hpp" -#include "gc/z/zUncommitter.hpp" + +class ZDirector; +class ZDriver; +class ZStat; +class ZUnmapper; +class ZUncommitter; class ZCollectedHeap : public CollectedHeap { friend class VMStructs; @@ -45,6 +47,7 @@ ZHeap _heap; ZDirector* _director; ZDriver* _driver; + ZUnmapper* _unmapper; ZUncommitter* _uncommitter; ZStat* _stat; ZRuntimeWorkers _runtime_workers; --- old/src/hotspot/share/gc/z/zHeap.cpp 2020-06-01 07:12:28.572916663 +0200 +++ new/src/hotspot/share/gc/z/zHeap.cpp 2020-06-01 07:12:28.236905835 +0200 @@ -223,6 +223,14 @@ _page_allocator.free_page(page, reclaimed); } +void ZHeap::unmap_run() { + _page_allocator.unmap_run(); +} + +void ZHeap::unmap_stop() { + _page_allocator.unmap_stop(); +} + void ZHeap::uncommit_run() { _page_allocator.uncommit_run(); } --- old/src/hotspot/share/gc/z/zHeap.hpp 2020-06-01 07:12:29.093933454 +0200 +++ new/src/hotspot/share/gc/z/zHeap.hpp 2020-06-01 07:12:28.747922303 +0200 @@ -113,6 +113,10 @@ void undo_alloc_page(ZPage* page); void free_page(ZPage* page, bool reclaimed); + // Unmap memory + void unmap_run(); + void unmap_stop(); + // Uncommit memory void uncommit_run(); void uncommit_stop(); --- old/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-06-01 07:12:29.578949085 +0200 +++ new/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-06-01 07:12:29.255938675 +0200 @@ -128,6 +128,9 @@ _reclaimed(0), _stalled(), _satisfied(), + _unmap_lock(), + _unmap_queue(), + _unmap_stop(false), _uncommit_lock(), _uncommit_enabled(false), _uncommit_stop(false), @@ -362,12 +365,12 @@ bool ZPageAllocator::map_page(const ZPage* page) const { // Map physical memory - return _physical.map(page->physical_memory(), page->start()); + return _physical.map(page->start(), page->physical_memory()); } void ZPageAllocator::unmap_page(const ZPage* page) const { // Unmap physical memory - _physical.unmap(page->physical_memory(), page->start()); + _physical.unmap(page->start(), page->size()); } void ZPageAllocator::destroy_page(ZPage* page) { @@ -381,6 +384,29 @@ _safe_delete(page); } +void ZPageAllocator::enqueue_unmap_page(ZPage* page) { + ZLocker locker(&_unmap_lock); + _unmap_queue.insert_last(page); + _unmap_lock.notify_all(); +} + +ZPage* ZPageAllocator::dequeue_unmap_page() { + ZLocker locker(&_unmap_lock); + + for (;;) { + if (_unmap_stop) { + return NULL; + } + + ZPage* const page = _unmap_queue.remove_first(); + if (page != NULL) { + return page; + } + + _unmap_lock.wait(); + } +} + bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { size_t available = _current_max_capacity - _used - _claimed; @@ -531,6 +557,8 @@ // Allocate virtual memory. To make error handling a lot more straight // forward, we allocate virtual memory before destroying flushed pages. + // Flushed pages are also unmapped and destroyed asynchronously, so we + // can't immediately reuse that part of the address space anyway. const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) { log_error(gc)("Out of address space"); @@ -540,13 +568,12 @@ ZPhysicalMemory pmem; size_t flushed = 0; - // Unmap, transfer physical memory, and destroy flushed pages + // Transfer physical memory, and enqueue pages for unmap and destroy ZListRemoveIterator iter(allocation->pages()); for (ZPage* page; iter.next(&page);) { flushed += page->size(); - unmap_page(page); pmem.transfer_segments(page->physical_memory()); - destroy_page(page); + enqueue_unmap_page(page); } if (flushed > 0) { @@ -719,6 +746,26 @@ satisfy_stalled(); } +void ZPageAllocator::unmap_run() { + for (;;) { + ZPage* const page = dequeue_unmap_page(); + if (page == NULL) { + // Stop + return; + } + + // Unmap and destroy page + unmap_page(page); + destroy_page(page); + } +} + +void ZPageAllocator::unmap_stop() { + ZLocker locker(&_unmap_lock); + _unmap_stop = true; + _unmap_lock.notify_all(); +} + size_t ZPageAllocator::uncommit(uint64_t* timeout) { // We need to join the suspendible thread set while manipulating capacity and // used, to make sure GC safepoints will have a consistent view. However, when @@ -850,25 +897,30 @@ void ZPageAllocator::debug_map_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_map(page->physical_memory(), page->start()); + _physical.debug_map(page->start(), page->physical_memory()); } void ZPageAllocator::debug_unmap_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _physical.debug_unmap(page->physical_memory(), page->start()); + _physical.debug_unmap(page->start(), page->size()); } void ZPageAllocator::pages_do(ZPageClosure* cl) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - ZListIterator iter(&_satisfied); - for (ZPageAllocation* allocation; iter.next(&allocation);) { - ZListIterator iter(allocation->pages()); - for (ZPage* page; iter.next(&page);) { + ZListIterator iter_satisfied(&_satisfied); + for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) { + ZListIterator iter_pages(allocation->pages()); + for (ZPage* page; iter_pages.next(&page);) { cl->do_page(page); } } + ZListIterator iter_unmap_queue(&_unmap_queue); + for (ZPage* page; iter_unmap_queue.next(&page);) { + cl->do_page(page); + } + _cache.pages_do(cl); } --- old/src/hotspot/share/gc/z/zPageAllocator.hpp 2020-06-01 07:12:30.029963620 +0200 +++ new/src/hotspot/share/gc/z/zPageAllocator.hpp 2020-06-01 07:12:29.760954950 +0200 @@ -56,6 +56,9 @@ ssize_t _reclaimed; ZList _stalled; ZList _satisfied; + ZConditionLock _unmap_lock; + ZList _unmap_queue; + bool _unmap_stop; ZConditionLock _uncommit_lock; bool _uncommit_enabled; bool _uncommit_stop; @@ -78,6 +81,9 @@ void destroy_page(ZPage* page); + void enqueue_unmap_page(ZPage* page); + ZPage* dequeue_unmap_page(); + bool is_alloc_allowed(size_t size, bool no_reserve) const; bool is_alloc_allowed_from_cache(size_t size, bool no_reserve) const; @@ -119,6 +125,9 @@ ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags); void free_page(ZPage* page, bool reclaimed); + void unmap_run(); + void unmap_stop(); + void uncommit_run(); void uncommit_stop(); --- old/src/hotspot/share/gc/z/zPhysicalMemory.cpp 2020-06-01 07:12:30.461977542 +0200 +++ new/src/hotspot/share/gc/z/zPhysicalMemory.cpp 2020-06-01 07:12:30.191968841 +0200 @@ -323,17 +323,15 @@ return true; } -void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const { +void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const { // From an NMT point of view we treat the first heap view (marked0) as committed const uintptr_t addr = ZAddress::marked0(offset); - const size_t size = pmem.size(); MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC); } -void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const { +void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const { if (MemTracker::tracking_level() > NMT_minimal) { const uintptr_t addr = ZAddress::marked0(offset); - const size_t size = pmem.size(); Tracker tracker(Tracker::uncommit); tracker.record((address)addr, size); } @@ -407,7 +405,7 @@ os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); } -bool ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const { +bool ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const { size_t size = 0; // Map segments @@ -437,8 +435,8 @@ return true; } -void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const { - _backing.unmap(addr, pmem.size()); +void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const { + _backing.unmap(addr, size); } void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const { @@ -453,58 +451,60 @@ } } -bool ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const { +bool ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const { + const size_t size = pmem.size(); + if (ZVerifyViews) { // Map good view - if (!map_view(pmem, ZAddress::good(offset))) { + if (!map_view(ZAddress::good(offset), pmem)) { fatal("Failed to map memory"); } } else { // Map all views - if (!map_view(pmem, ZAddress::marked0(offset))) { + if (!map_view(ZAddress::marked0(offset), pmem)) { return false; } - if (!map_view(pmem, ZAddress::marked1(offset))) { - unmap_view(pmem, ZAddress::marked0(offset)); + if (!map_view(ZAddress::marked1(offset), pmem)) { + unmap_view(ZAddress::marked0(offset), size); return false; } - if (!map_view(pmem, ZAddress::remapped(offset))) { - unmap_view(pmem, ZAddress::marked1(offset)); - unmap_view(pmem, ZAddress::marked0(offset)); + if (!map_view(ZAddress::remapped(offset), pmem)) { + unmap_view(ZAddress::marked1(offset), size); + unmap_view(ZAddress::marked0(offset), size); return false; } } - nmt_commit(pmem, offset); + nmt_commit(offset, size); // Success return true; } -void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const { - nmt_uncommit(pmem, offset); +void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const { + nmt_uncommit(offset, size); if (ZVerifyViews) { // Unmap good view - unmap_view(pmem, ZAddress::good(offset)); + unmap_view(ZAddress::good(offset), size); } else { // Unmap all views - unmap_view(pmem, ZAddress::marked0(offset)); - unmap_view(pmem, ZAddress::marked1(offset)); - unmap_view(pmem, ZAddress::remapped(offset)); + unmap_view(ZAddress::marked0(offset), size); + unmap_view(ZAddress::marked1(offset), size); + unmap_view(ZAddress::remapped(offset), size); } } -void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const { +void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const { // Map good view assert(ZVerifyViews, "Should be enabled"); - if (!map_view(pmem, ZAddress::good(offset))) { + if (!map_view(ZAddress::good(offset), pmem)) { fatal("Failed to map memory"); } } -void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const { +void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const { // Unmap good view assert(ZVerifyViews, "Should be enabled"); - unmap_view(pmem, ZAddress::good(offset)); + unmap_view(ZAddress::good(offset), size); } --- old/src/hotspot/share/gc/z/zPhysicalMemory.hpp 2020-06-01 07:12:30.906991884 +0200 +++ new/src/hotspot/share/gc/z/zPhysicalMemory.hpp 2020-06-01 07:12:30.635983150 +0200 @@ -85,12 +85,12 @@ ZPhysicalMemoryBacking _backing; ZMemoryManager _manager; - void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const; - void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const; + void nmt_commit(uintptr_t offset, size_t size) const; + void nmt_uncommit(uintptr_t offset, size_t size) const; void pretouch_view(uintptr_t addr, size_t size) const; - bool map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const; - void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const; + bool map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const; + void unmap_view(uintptr_t addr, size_t size) const; public: ZPhysicalMemoryManager(size_t max_capacity); @@ -108,11 +108,11 @@ void pretouch(uintptr_t offset, size_t size) const; - bool map(const ZPhysicalMemory& pmem, uintptr_t offset) const; - void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const; + bool map(uintptr_t offset, const ZPhysicalMemory& pmem) const; + void unmap(uintptr_t offset, size_t size) const; - void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const; - void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const; + void debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const; + void debug_unmap(uintptr_t offset, size_t size) const; }; #endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP --- /dev/null 2020-05-08 10:28:18.126332474 +0200 +++ new/src/hotspot/share/gc/z/zUnmapper.cpp 2020-06-01 07:12:31.063996943 +0200 @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zHeap.inline.hpp" +#include "gc/z/zUnmapper.hpp" + +ZUnmapper::ZUnmapper() { + set_name("ZUnmapper"); + create_and_start(); +} + +void ZUnmapper::run_service() { + ZHeap::heap()->unmap_run(); +} + +void ZUnmapper::stop_service() { + ZHeap::heap()->unmap_stop(); +} --- /dev/null 2020-05-08 10:28:18.126332474 +0200 +++ new/src/hotspot/share/gc/z/zUnmapper.hpp 2020-06-01 07:12:31.533012059 +0200 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZUNMAPPER_HPP +#define SHARE_GC_Z_ZUNMAPPER_HPP + +#include "gc/shared/concurrentGCThread.hpp" + +class ZUnmapper : public ConcurrentGCThread { +protected: + virtual void run_service(); + virtual void stop_service(); + +public: + ZUnmapper(); +}; + +#endif // SHARE_GC_Z_ZUNMAPPER_HPP --- /dev/null 2020-05-08 10:28:18.126332474 +0200 +++ new/test/hotspot/jtreg/gc/z/TestPageCacheFlush.java 2020-06-01 07:12:31.997027012 +0200 @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package gc.z; + +/* + * @test TestPageCacheFlush + * @requires vm.gc.Z & !vm.graal.enabled + * @summary Test ZGC page cache flushing + * @library / /test/lib + * @run main/othervm gc.z.TestPageCacheFlush + */ + +import java.util.LinkedList; +import jdk.test.lib.process.ProcessTools; + +import static gc.testlibrary.Allocation.blackHole; + +public class TestPageCacheFlush { + static class Test { + private static final int K = 1024; + private static final int M = K * K; + private static volatile LinkedList keepAlive; + + public static void fillPageCache(int size) { + System.out.println("Begin allocate (" + size + ")"); + + keepAlive = new LinkedList<>(); + + try { + for (;;) { + keepAlive.add(new byte[size]); + } + } catch (OutOfMemoryError e) { + keepAlive = null; + System.gc(); + } + + System.out.println("End allocate (" + size + ")"); + } + + public static void main(String[] args) throws Exception { + // Allocate small objects to fill the page cache with small pages + fillPageCache(10 * K); + + // Allocate large objects to provoke page cache flushing to rebuild + // cached small pages into large pages + fillPageCache(10 * M); + } + } + + public static void main(String[] args) throws Exception { + ProcessTools.executeProcess(ProcessTools.createJavaProcessBuilder( + "-XX:+UseZGC", + "-Xms128M", + "-Xmx128M", + "-Xlog:gc,gc+init,gc+heap=debug", + Test.class.getName())) + .outputTo(System.out) + .errorTo(System.out) + .shouldContain("Page Cache Flushed:") + .shouldHaveExitValue(0); + } +}