/* * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #include "precompiled.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zAddress.inline.hpp" #include "gc/z/zCollectedHeap.hpp" #include "gc/z/zFuture.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zLock.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageAllocator.hpp" #include "gc/z/zPageCache.hpp" #include "gc/z/zSafeDelete.inline.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zTask.hpp" #include "gc/z/zTracer.inline.hpp" #include "gc/z/zWorkers.hpp" #include "jfr/jfrEvents.hpp" #include "logging/log.hpp" #include "runtime/globals.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond); static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond); static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond); static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall"); enum ZPageAllocationStall { ZPageAllocationStallSuccess, ZPageAllocationStallFailed, ZPageAllocationStallStartGC }; class ZPageAllocation : public StackObj { friend class ZList; private: const uint8_t _type; const size_t _size; const ZAllocationFlags _flags; const uint32_t _seqnum; ZList _pages; ZListNode _node; ZFuture _stall_result; public: ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) : _type(type), _size(size), _flags(flags), _seqnum(ZGlobalSeqNum), _pages(), _node(), _stall_result() {} uint8_t type() const { return _type; } size_t size() const { return _size; } ZAllocationFlags flags() const { return _flags; } uint32_t seqnum() const { return _seqnum; } ZPageAllocationStall wait() { return _stall_result.get(); } ZList* pages() { return &_pages; } void satisfy(ZPageAllocationStall result) { _stall_result.set(result); } }; ZPageAllocator::ZPageAllocator(ZWorkers* workers, size_t min_capacity, size_t initial_capacity, size_t max_capacity, size_t max_reserve) : _lock(), _cache(), _virtual(max_capacity), _physical(max_capacity), _min_capacity(min_capacity), _max_capacity(max_capacity), _max_reserve(max_reserve), _current_max_capacity(max_capacity), _capacity(0), _claimed(0), _used(0), _used_high(0), _used_low(0), _allocated(0), _reclaimed(0), _stalled(), _satisfied(), _safe_delete(), _uncommit(false), _initialized(false) { if (!_virtual.is_initialized() || !_physical.is_initialized()) { return; } log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M); log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M); log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M); log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M); if (ZPageSizeMedium > 0) { log_info(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M); } else { log_info(gc, init)("Medium Page Size: N/A"); } log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled"); // Warn if system limits could stop us from reaching max capacity _physical.warn_commit_limits(max_capacity); // Check if uncommit should be enabled _uncommit = _physical.should_enable_uncommit(min_capacity, max_capacity); // Pre-map initial capacity if (!prime_cache(workers, initial_capacity)) { log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M); return; } // Successfully initialized _initialized = true; } class ZPreTouchTask : public ZTask { private: const ZPhysicalMemoryManager* const _physical; volatile uintptr_t _start; const uintptr_t _end; public: ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) : ZTask("ZPreTouchTask"), _physical(physical), _start(start), _end(end) {} virtual void work() { for (;;) { // Get granule offset const size_t size = ZGranuleSize; const uintptr_t offset = Atomic::fetch_and_add(&_start, size); if (offset >= _end) { // Done break; } // Pre-touch granule _physical->pretouch(offset, size); } } }; bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) { ZAllocationFlags flags; flags.set_non_blocking(); flags.set_low_address(); ZPage* const page = alloc_page(ZPageTypeLarge, size, flags); if (page == NULL) { return false; } if (AlwaysPreTouch) { // Pre-touch page ZPreTouchTask task(&_physical, page->start(), page->end()); workers->run_parallel(&task); } free_page(page, false /* reclaimed */); return true; } bool ZPageAllocator::is_initialized() const { return _initialized; } size_t ZPageAllocator::min_capacity() const { return _min_capacity; } size_t ZPageAllocator::max_capacity() const { return _max_capacity; } size_t ZPageAllocator::soft_max_capacity() const { // Note that SoftMaxHeapSize is a manageable flag const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize); const size_t current_max_capacity = Atomic::load(&_current_max_capacity); return MIN2(soft_max_capacity, current_max_capacity); } size_t ZPageAllocator::capacity() const { return Atomic::load(&_capacity); } size_t ZPageAllocator::max_reserve() const { return _max_reserve; } size_t ZPageAllocator::used_high() const { return _used_high; } size_t ZPageAllocator::used_low() const { return _used_low; } size_t ZPageAllocator::used() const { return Atomic::load(&_used); } size_t ZPageAllocator::unused() const { const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); const ssize_t used = (ssize_t)Atomic::load(&_used); const ssize_t claimed = (ssize_t)Atomic::load(&_claimed); const ssize_t max_reserve = (ssize_t)_max_reserve; const ssize_t unused = capacity - used - claimed - max_reserve; return unused > 0 ? (size_t)unused : 0; } size_t ZPageAllocator::allocated() const { return _allocated; } size_t ZPageAllocator::reclaimed() const { return _reclaimed > 0 ? (size_t)_reclaimed : 0; } void ZPageAllocator::reset_statistics() { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); _allocated = 0; _reclaimed = 0; _used_high = _used_low = _used; } size_t ZPageAllocator::increase_capacity(size_t size) { const size_t increase = MIN2(size, _current_max_capacity - _capacity); // Update atomically since we have concurrent readers Atomic::add(&_capacity, increase); return increase; } void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) { // Update atomically since we have concurrent readers Atomic::sub(&_capacity, size); if (set_max_capacity) { // Adjust current max capacity to avoid further attempts to increase capacity log_error(gc)("Forced to lower max Java heap size from " SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)", _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity), _capacity / M, percent_of(_capacity, _max_capacity)); // Update atomically since we have concurrent readers Atomic::store(&_current_max_capacity, _capacity); } } void ZPageAllocator::increase_used(size_t size, bool relocation) { if (relocation) { // Allocating a page for the purpose of relocation has a // negative contribution to the number of reclaimed bytes. _reclaimed -= size; } _allocated += size; // Update atomically since we have concurrent readers const size_t used = Atomic::add(&_used, size); if (used > _used_high) { _used_high = used; } } void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { // Only pages explicitly released with the reclaimed flag set // counts as reclaimed bytes. This flag is true when we release // a page after relocation, and is false when we release a page // to undo an allocation. if (reclaimed) { _reclaimed += size; } else { _allocated -= size; } // Update atomically since we have concurrent readers const size_t used = Atomic::sub(&_used, size); if (used < _used_low) { _used_low = used; } } bool ZPageAllocator::commit_page(ZPage* page) { // Commit physical memory return _physical.commit(page->physical_memory()); } void ZPageAllocator::uncommit_page(ZPage* page) { if (!_uncommit) { // Uncommit not supported/enabled return; } // Uncommit physical memory _physical.uncommit(page->physical_memory()); } bool ZPageAllocator::map_page(const ZPage* page) const { // Map physical memory return _physical.map(page->physical_memory(), page->start()); } void ZPageAllocator::unmap_page(const ZPage* page) const { // Unmap physical memory _physical.unmap(page->physical_memory(), page->start()); } void ZPageAllocator::destroy_page(ZPage* page) { // Free virtual memory _virtual.free(page->virtual_memory()); // Free physical memory _physical.free(page->physical_memory()); // Delete page safely _safe_delete(page); } bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { size_t available = _current_max_capacity - _used - _claimed; if (no_reserve) { // The reserve should not be considered available available -= MIN2(available, _max_reserve); } return available >= size; } bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const { size_t available = _capacity - _used - _claimed; if (no_reserve) { // The reserve should not be considered available available -= MIN2(available, _max_reserve); } else if (_capacity != _current_max_capacity) { // Always increase capacity before using the reserve return false; } return available >= size; } bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList* pages) { if (!is_alloc_allowed(size, no_reserve)) { // Out of memory return false; } // Try allocate from the page cache if (is_alloc_allowed_from_cache(size, no_reserve)) { ZPage* const page = _cache.alloc_page(type, size); if (page != NULL) { // Success pages->insert_last(page); return true; } } // Try increase capacity const size_t increased = increase_capacity(size); if (increased < size) { // Could not increase capacity enough to satisfy the allocation // completely. Flush the page cache to satisfy the remainder. const size_t remaining = size - increased; _cache.flush_for_allocation(remaining, pages); } // Success return true; } bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) { EventZPageAllocation event; const uint8_t type = allocation->type(); const size_t size = allocation->size(); const ZAllocationFlags flags = allocation->flags(); ZList* const pages = allocation->pages(); // Try allocate without using the reserve if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) { // If allowed to, try allocate using the reserve if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) { // Out of memory return false; } } // Updated used statistics increase_used(size, flags.relocation()); // Send event event.commit(type, size, flags.non_blocking(), flags.no_reserve(), _used, _current_max_capacity - _used - _claimed, _capacity - _used - _claimed); // Success return true; } static void check_out_of_memory_during_initialization() { if (!is_init_completed()) { vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small"); } } bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) { ZStatTimer timer(ZCriticalPhaseAllocationStall); EventZAllocationStall event; ZPageAllocationStall result; // We can only block if the VM is fully initialized check_out_of_memory_during_initialization(); do { // Start asynchronous GC ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); // Wait for allocation to complete, fail or request a GC result = allocation->wait(); } while (result == ZPageAllocationStallStartGC); { // // We grab the lock here for two different reasons: // // 1) Guard deletion of underlying semaphore. This is a workaround for // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy // the semaphore immediately after returning from sem_wait(). The // reason is that sem_post() can touch the semaphore after a waiting // thread have returned from sem_wait(). To avoid this race we are // forcing the waiting thread to acquire/release the lock held by the // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 // // 2) Guard the list of satisfied pages. // ZLocker locker(&_lock); _satisfied.remove(allocation); } // Send event event.commit(allocation->type(), allocation->size()); return (result == ZPageAllocationStallSuccess); } bool ZPageAllocator::alloc_page_stage0(ZPageAllocation* allocation) { { ZLocker locker(&_lock); if (alloc_page_common(allocation)) { // Success return true; } // Failed if (allocation->flags().non_blocking()) { // Don't stall return false; } // Enqueue allocation request _stalled.insert_last(allocation); } // Stall return alloc_page_stall(allocation); } ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) { const size_t size = allocation->size(); // Allocate virtual memory. To make error handling a lot more straight // forward, we allocate virtual memory before destroying flushed pages. const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) { log_error(gc)("Out of address space"); return NULL; } ZPhysicalMemory pmem; size_t flushed = 0; // Unmap, transfer physical memory, and destroy flushed pages ZListRemoveIterator iter(allocation->pages()); for (ZPage* page; iter.next(&page);) { flushed += page->size(); unmap_page(page); pmem.transfer_segments(page->physical_memory()); destroy_page(page); } if (flushed > 0) { // Update statistics ZStatInc(ZCounterPageCacheFlush, flushed); log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M); } // Allocate any remaining physical memory if (flushed < size) { const size_t remaining = size - flushed; _physical.alloc(pmem, remaining); } // Create new page return new ZPage(allocation->type(), vmem, pmem); } static bool is_alloc_satisfied(ZPageAllocation* allocation) { // The allocation is immediately satisfied if the list of pages contains // exactly one page, with the type and size that was requested. return allocation->pages()->size() == 1 && allocation->pages()->first()->type() == allocation->type() && allocation->pages()->first()->size() == allocation->size(); } ZPage* ZPageAllocator::alloc_page_stage1(ZPageAllocation* allocation) { // Fast path if (is_alloc_satisfied(allocation)) { return allocation->pages()->remove_first(); } // Slow path ZPage* const page = alloc_page_create(allocation); if (page == NULL) { // Out of address space return NULL; } // Commit page if (!commit_page(page)) { // Failed or partially failed. Split of any successfully committed // part of the page into a new page and insert it into list of pages, // so that it will be re-inserted into the page cache. ZPage* const committed_page = page->split_committed(); if (committed_page != NULL) { if (map_page(committed_page)) { // Success allocation->pages()->insert_last(committed_page); } else { // Failed uncommit_page(committed_page); destroy_page(committed_page); } } destroy_page(page); return NULL; } // Map page if (!map_page(page)) { // Failed uncommit_page(page); destroy_page(page); return NULL; } // Success return page; } void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) { size_t freed = 0; // Free any allocated pages ZListRemoveIterator iter(allocation->pages()); for (ZPage* page; iter.next(&page);) { freed += page->size(); free_page(page, false /* reclaimed */); } ZLocker locker(&_lock); // Adjust capacity and used to reflect the failed capacity increase const size_t remaining = allocation->size() - freed; decrease_used(remaining, false /* reclaimed */); decrease_capacity(remaining, true /* set_max_capacity */); } ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { retry: ZPageAllocation allocation(type, size, flags); // Allocate one or more pages from the page cache. If the allocation // succeeds but the returned pages don't cover the complete allocation, // then we are allowed to allocate the remaining memory directly from // the physical memory manager. if (!alloc_page_stage0(&allocation)) { // Out of memory return NULL; } ZPage* const page = alloc_page_stage1(&allocation); if (page == NULL) { // Failed to commit or map. Clean up and retry, in the hope that // we can still allocate by flushing the page cache (more agressively). alloc_page_failed(&allocation); goto retry; } // Reset page. This updates the page's sequence number and must // be done after page allocation, which potentially blocked in // a safepoint where the global sequence number was updated. page->reset(); // Update allocation statistics. Exclude worker threads to avoid // artificial inflation of the allocation rate due to relocation. if (!flags.worker_thread()) { // Note that there are two allocation rate counters, which have // different purposes and are sampled at different frequencies. const size_t bytes = page->size(); ZStatInc(ZCounterAllocationRate, bytes); ZStatInc(ZStatAllocRate::counter(), bytes); } return page; } void ZPageAllocator::satisfy_stalled() { for (;;) { ZPageAllocation* const allocation = _stalled.first(); if (allocation == NULL) { // Allocation queue is empty return; } if (!alloc_page_common(allocation)) { // Allocation could not be satisfied, give up return; } // Allocation succeeded, dequeue and satisfy allocation request. // Note that we must dequeue the allocation request first, since // it will immediately be deallocated once it has been satisfied. _stalled.remove(allocation); _satisfied.insert_last(allocation); allocation->satisfy(ZPageAllocationStallSuccess); } } void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { ZLocker locker(&_lock); // Update used statistics decrease_used(page->size(), reclaimed); // Set time when last used page->set_last_used(); // Cache page _cache.free_page(page); // Try satisfy stalled allocations satisfy_stalled(); } size_t ZPageAllocator::uncommit_inner(uint64_t delay, uint64_t* timeout) { // We need to join the suspendible thread set while manipulating capacity and // used, to make sure GC safepoints will have a consistent view. However, when // ZVerifyViews is enabled we need to join at a broader scope to also make sure // we don't change the address good mask after pages have been flushed, and // thereby made invisible to pages_do(), but before they have been unmapped. SuspendibleThreadSetJoiner joiner(ZVerifyViews); ZList pages; size_t flushed; { SuspendibleThreadSetJoiner joiner(!ZVerifyViews); ZLocker locker(&_lock); // Never uncommit the reserve, and never uncommit below min capacity. We flush // out and uncommit chunks at a time (~0.8% of the max capacity, but at least // one granule and at most 256M), in case demand for memory increases while we // are uncommitting. const size_t retain = clamp(_used + _max_reserve, _min_capacity, _capacity); const size_t release = _capacity - retain; const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M); const size_t flush = MIN2(release, limit); // Flush pages to uncommit flushed = _cache.flush_for_uncommit(flush, delay, timeout, &pages); if (flushed == 0) { // Nothing flushed return 0; } // Record flushed pages as claimed Atomic::add(&_claimed, flushed); } // Unmap, uncommit, and destroy flushed pages ZListRemoveIterator iter(&pages); for (ZPage* page; iter.next(&page);) { unmap_page(page); uncommit_page(page); destroy_page(page); } { SuspendibleThreadSetJoiner joiner(!ZVerifyViews); ZLocker locker(&_lock); // Adjust claimed and capacity to reflect the uncommit Atomic::sub(&_claimed, flushed); decrease_capacity(flushed, false /* set_max_capacity */); } return flushed; } uint64_t ZPageAllocator::uncommit() { EventZUncommit event; const uint64_t delay = ZUncommitDelay; uint64_t timeout = delay; size_t uncommitted = 0; while (Atomic::load(&_uncommit)) { const size_t flushed = uncommit_inner(delay, &timeout); if (flushed == 0) { // Done break; } uncommitted += flushed; } if (uncommitted > 0) { // Send event event.commit(uncommitted); // Update statistics ZStatInc(ZCounterUncommit, uncommitted); log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)", uncommitted / M, percent_of(uncommitted, _max_capacity)); } log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout); return timeout; } void ZPageAllocator::uncommit_cancel() { Atomic::store(&_uncommit, false); } void ZPageAllocator::enable_deferred_delete() const { _safe_delete.enable_deferred_delete(); } void ZPageAllocator::disable_deferred_delete() const { _safe_delete.disable_deferred_delete(); } void ZPageAllocator::debug_map_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); _physical.debug_map(page->physical_memory(), page->start()); } void ZPageAllocator::debug_unmap_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); _physical.debug_unmap(page->physical_memory(), page->start()); } void ZPageAllocator::pages_do(ZPageClosure* cl) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ZListIterator iter(&_satisfied); for (ZPageAllocation* allocation; iter.next(&allocation);) { ZListIterator iter(allocation->pages()); for (ZPage* page; iter.next(&page);) { cl->do_page(page); } } _cache.pages_do(cl); } bool ZPageAllocator::is_alloc_stalled() const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); return !_stalled.is_empty(); } void ZPageAllocator::check_out_of_memory() { ZLocker locker(&_lock); // Fail allocation requests that were enqueued before the // last GC cycle started, otherwise start a new GC cycle. for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) { if (allocation->seqnum() == ZGlobalSeqNum) { // Start a new GC cycle, keep allocation requests enqueued allocation->satisfy(ZPageAllocationStallStartGC); return; } // Out of memory, fail allocation request _stalled.remove(allocation); _satisfied.insert_last(allocation); allocation->satisfy(ZPageAllocationStallFailed); } }