--- old/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp 2020-05-18 23:08:24.654505623 +0200 +++ new/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp 2020-05-18 23:08:24.400497384 +0200 @@ -92,11 +92,11 @@ return _initialized; } -void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const { +void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { // Does nothing } -bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) { +bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); assert(is_aligned(length, os::vm_page_size()), "Invalid length"); @@ -115,7 +115,7 @@ return true; } -size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const { // Try to commit the whole region if (commit_inner(offset, length)) { // Success @@ -143,7 +143,7 @@ } } -size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { assert(is_aligned(offset, os::vm_page_size()), "Invalid offset"); assert(is_aligned(length, os::vm_page_size()), "Invalid length"); @@ -161,11 +161,14 @@ return length; } -void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { +bool ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { const ZErrno err = mremap(_base + offset, addr, size); if (err) { - fatal("Failed to remap memory (%s)", err.to_string()); + log_error(gc)("Failed to map memory (%s)", err.to_string()); + return false; } + + return true; } void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { @@ -175,6 +178,6 @@ const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); if (res == MAP_FAILED) { ZErrno err; - fatal("Failed to map memory (%s)", err.to_string()); + log_error(gc)("Failed to map memory (%s)", err.to_string()); } } --- old/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp 2020-05-18 23:08:25.072519183 +0200 +++ new/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp 2020-05-18 23:08:24.813510781 +0200 @@ -29,19 +29,19 @@ uintptr_t _base; bool _initialized; - bool commit_inner(size_t offset, size_t length); + bool commit_inner(size_t offset, size_t length) const; public: ZPhysicalMemoryBacking(size_t max_capacity); bool is_initialized() const; - void warn_commit_limits(size_t max) const; + void warn_commit_limits(size_t max_capacity) const; - size_t commit(size_t offset, size_t length); - size_t uncommit(size_t offset, size_t length); + size_t commit(size_t offset, size_t length) const; + size_t uncommit(size_t offset, size_t length) const; - void map(uintptr_t addr, size_t size, uintptr_t offset) const; + bool map(uintptr_t addr, size_t size, uintptr_t offset) const; void unmap(uintptr_t addr, size_t size) const; }; --- old/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp 2020-05-18 23:08:25.558534948 +0200 +++ new/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp 2020-05-18 23:08:25.231524341 +0200 @@ -299,7 +299,7 @@ return _initialized; } -void ZPhysicalMemoryBacking::warn_available_space(size_t max) const { +void ZPhysicalMemoryBacking::warn_available_space(size_t max_capacity) const { // Note that the available space on a tmpfs or a hugetlbfs filesystem // will be zero if no size limit was specified when it was mounted. if (_available == 0) { @@ -313,18 +313,18 @@ // Warn if the filesystem doesn't currently have enough space available to hold // the max heap size. The max heap size will be capped if we later hit this limit // when trying to expand the heap. - if (_available < max) { + if (_available < max_capacity) { log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); log_warning(gc)("Not enough space available on the backing filesystem to hold the current max Java heap"); log_warning(gc)("size (" SIZE_FORMAT "M). Please adjust the size of the backing filesystem accordingly " - "(available", max / M); + "(available", max_capacity / M); log_warning(gc)("space is currently " SIZE_FORMAT "M). Continuing execution with the current filesystem " "size could", _available / M); - log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to map memory."); + log_warning(gc)("lead to a premature OutOfMemoryError being thrown, due to failure to commit memory."); } } -void ZPhysicalMemoryBacking::warn_max_map_count(size_t max) const { +void ZPhysicalMemoryBacking::warn_max_map_count(size_t max_capacity) const { const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT; FILE* const file = fopen(filename, "r"); if (file == NULL) { @@ -347,24 +347,24 @@ // However, ZGC tends to create the most mappings and dominate the total count. // In the worst cases, ZGC will map each granule three times, i.e. once per heap view. // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory. - const size_t required_max_map_count = (max / ZGranuleSize) * 3 * 1.2; + const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 3 * 1.2; if (actual_max_map_count < required_max_map_count) { log_warning(gc)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); log_warning(gc)("The system limit on number of memory mappings per process might be too low for the given"); log_warning(gc)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at", - max / M, filename); + max_capacity / M, filename); log_warning(gc)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing execution " "with the current", required_max_map_count, actual_max_map_count); - log_warning(gc)("limit could lead to a fatal error, due to failure to map memory."); + log_warning(gc)("limit could lead to a premature OutOfMemoryError being thrown, due to failure to map memory."); } } -void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const { +void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { // Warn if available space is too low - warn_available_space(max); + warn_available_space(max_capacity); // Warn if max map count is too low - warn_max_map_count(max); + warn_max_map_count(max_capacity); } bool ZPhysicalMemoryBacking::is_tmpfs() const { @@ -474,7 +474,7 @@ return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) const { // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs // since Linux 4.3. When fallocate(2) is not supported we emulate it using // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite @@ -488,7 +488,7 @@ } } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(size_t offset, size_t length) const { const int mode = 0; // Allocate const int res = ZSyscall::fallocate(_fd, mode, offset, length); if (res == -1) { @@ -500,7 +500,7 @@ return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) const { // Using compat mode is more efficient when allocating space on hugetlbfs. // Note that allocating huge pages this way will only reserve them, and not // associate them with segments of the file. We must guarantee that we at @@ -527,7 +527,7 @@ return fallocate_fill_hole_compat(offset, length); } -ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) { +ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) const { if (ZLargePages::is_explicit()) { // We can only punch hole in pages that have been touched. Non-touched // pages are only reserved, and not associated with any specific file @@ -550,7 +550,7 @@ return 0; } -ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) { +ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, size_t offset, size_t length) const { // Try first half const size_t offset0 = offset; const size_t length0 = align_up(length / 2, _block_size); @@ -571,7 +571,7 @@ return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) { +ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, size_t offset, size_t length) const { assert(is_aligned(offset, _block_size), "Invalid offset"); assert(is_aligned(length, _block_size), "Invalid length"); @@ -587,7 +587,7 @@ return err; } -bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) { +bool ZPhysicalMemoryBacking::commit_inner(size_t offset, size_t length) const { log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", offset / M, (offset + length) / M, length / M); @@ -624,7 +624,7 @@ return mapping->at((int)nindex); } -size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::commit_numa_interleaved(size_t offset, size_t length) const { size_t committed = 0; // Commit one granule at a time, so that each granule @@ -649,7 +649,7 @@ return committed; } -size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::commit_default(size_t offset, size_t length) const { // Try to commit the whole region if (commit_inner(offset, length)) { // Success @@ -677,7 +677,7 @@ } } -size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::commit(size_t offset, size_t length) const { if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) { // To get granule-level NUMA interleaving when using non-large pages, // we must explicitly interleave the memory at commit/fallocate time. @@ -687,7 +687,7 @@ return commit_default(offset, length); } -size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) { +size_t ZPhysicalMemoryBacking::uncommit(size_t offset, size_t length) const { log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)", offset / M, (offset + length) / M, length / M); @@ -700,12 +700,15 @@ return length; } -void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { +bool ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, uintptr_t offset) const { const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, offset); if (res == MAP_FAILED) { ZErrno err; - fatal("Failed to map memory (%s)", err.to_string()); + log_error(gc)("Failed to map memory (%s)", err.to_string()); + return false; } + + return true; } void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { @@ -715,6 +718,6 @@ const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); if (res == MAP_FAILED) { ZErrno err; - fatal("Failed to map memory (%s)", err.to_string()); + log_error(gc)("Failed to map memory (%s)", err.to_string()); } } --- old/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp 2020-05-18 23:08:26.007549514 +0200 +++ new/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp 2020-05-18 23:08:25.747541079 +0200 @@ -35,8 +35,8 @@ size_t _available; bool _initialized; - void warn_available_space(size_t max) const; - void warn_max_map_count(size_t max) const; + void warn_available_space(size_t max_capacity) const; + void warn_max_map_count(size_t max_capacity) const; int create_mem_fd(const char* name) const; int create_file_fd(const char* name) const; @@ -49,28 +49,28 @@ ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const; ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const; ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const; - ZErrno fallocate_fill_hole_compat(size_t offset, size_t length); - ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length); - ZErrno fallocate_fill_hole(size_t offset, size_t length); - ZErrno fallocate_punch_hole(size_t offset, size_t length); - ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length); - ZErrno fallocate(bool punch_hole, size_t offset, size_t length); - - bool commit_inner(size_t offset, size_t length); - size_t commit_numa_interleaved(size_t offset, size_t length); - size_t commit_default(size_t offset, size_t length); + ZErrno fallocate_fill_hole_compat(size_t offset, size_t length) const; + ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length) const; + ZErrno fallocate_fill_hole(size_t offset, size_t length) const; + ZErrno fallocate_punch_hole(size_t offset, size_t length) const; + ZErrno split_and_fallocate(bool punch_hole, size_t offset, size_t length) const; + ZErrno fallocate(bool punch_hole, size_t offset, size_t length) const; + + bool commit_inner(size_t offset, size_t length) const; + size_t commit_numa_interleaved(size_t offset, size_t length) const; + size_t commit_default(size_t offset, size_t length) const; public: ZPhysicalMemoryBacking(size_t max_capacity); bool is_initialized() const; - void warn_commit_limits(size_t max) const; + void warn_commit_limits(size_t max_capacity) const; - size_t commit(size_t offset, size_t length); - size_t uncommit(size_t offset, size_t length); + size_t commit(size_t offset, size_t length) const; + size_t uncommit(size_t offset, size_t length) const; - void map(uintptr_t addr, size_t size, uintptr_t offset) const; + bool map(uintptr_t addr, size_t size, uintptr_t offset) const; void unmap(uintptr_t addr, size_t size) const; }; --- old/src/hotspot/os/windows/gc/z/zMapper_windows.cpp 2020-05-18 23:08:26.422562976 +0200 +++ new/src/hotspot/os/windows/gc/z/zMapper_windows.cpp 2020-05-18 23:08:26.164554607 +0200 @@ -223,7 +223,7 @@ } } -void ZMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) { +bool ZMapper::map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size) { void* const res = ZSyscall::MapViewOfFile3( file_handle, // FileMapping GetCurrentProcess(), // ProcessHandle @@ -237,8 +237,11 @@ ); if (res == NULL) { - fatal_error("Failed to map memory", addr, size); + log_error(gc)("Failed to map memory (%d)", GetLastError()); + return false; } + + return true; } void ZMapper::unmap_view_preserve_placeholder(uintptr_t addr, size_t size) { @@ -249,6 +252,6 @@ ); if (!res) { - fatal_error("Failed to unmap memory", addr, size); + log_error(gc)("Failed to unmap memory (%d)", GetLastError()); } } --- old/src/hotspot/os/windows/gc/z/zMapper_windows.hpp 2020-05-18 23:08:26.844576665 +0200 +++ new/src/hotspot/os/windows/gc/z/zMapper_windows.hpp 2020-05-18 23:08:26.584568231 +0200 @@ -75,7 +75,7 @@ // Map a view of the file handle and replace the placeholder covering the // given virtual address range - static void map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size); + static bool map_view_replace_placeholder(HANDLE file_handle, uintptr_t file_offset, uintptr_t addr, size_t size); // Unmap the view and reinstate a placeholder covering the given virtual // address range --- old/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp 2020-05-18 23:08:27.258590095 +0200 +++ new/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp 2020-05-18 23:08:26.998581661 +0200 @@ -42,7 +42,7 @@ return true; } -void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const { +void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { // Does nothing } @@ -100,15 +100,23 @@ return uncommit_from_paging_file(offset, length); } -void ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const { +bool ZPhysicalMemoryBacking::map(uintptr_t addr, size_t size, size_t offset) const { assert(is_aligned(offset, ZGranuleSize), "Misaligned"); assert(is_aligned(addr, ZGranuleSize), "Misaligned"); assert(is_aligned(size, ZGranuleSize), "Misaligned"); for (size_t i = 0; i < size; i += ZGranuleSize) { HANDLE const handle = get_handle(offset + i); - ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize); + if (!ZMapper::map_view_replace_placeholder(handle, 0 /* offset */, addr + i, ZGranuleSize)) { + // Unmap any successfully mapped granules + if (i > 0) { + unmap(addr, i - ZGranuleSize); + } + return false; + } } + + return true; } void ZPhysicalMemoryBacking::unmap(uintptr_t addr, size_t size) const { --- old/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp 2020-05-18 23:08:27.742605796 +0200 +++ new/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp 2020-05-18 23:08:27.410595026 +0200 @@ -44,12 +44,12 @@ bool is_initialized() const; - void warn_commit_limits(size_t max) const; + void warn_commit_limits(size_t max_capacity) const; size_t commit(size_t offset, size_t length); size_t uncommit(size_t offset, size_t length); - void map(uintptr_t addr, size_t size, size_t offset) const; + bool map(uintptr_t addr, size_t size, size_t offset) const; void unmap(uintptr_t addr, size_t size) const; }; --- old/src/hotspot/share/gc/z/vmStructs_z.hpp 2020-05-18 23:08:28.150619031 +0200 +++ new/src/hotspot/share/gc/z/vmStructs_z.hpp 2020-05-18 23:08:27.892610662 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,8 +77,8 @@ volatile_nonstatic_field(ZPage, _top, uintptr_t) \ \ nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \ - nonstatic_field(ZPageAllocator, _capacity, size_t) \ - nonstatic_field(ZPageAllocator, _used, size_t) \ + volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \ + volatile_nonstatic_field(ZPageAllocator, _used, size_t) \ \ nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \ \ --- old/src/hotspot/share/gc/z/zAllocationFlags.hpp 2020-05-18 23:08:28.562632396 +0200 +++ new/src/hotspot/share/gc/z/zAllocationFlags.hpp 2020-05-18 23:08:28.306624092 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,18 +31,20 @@ // Allocation flags layout // ----------------------- // -// 7 3 2 1 0 -// +----+-+-+-+-+ -// |0000|1|1|1|1| -// +----+-+-+-+-+ -// | | | | | -// | | | | * 0-0 Worker Thread Flag (1-bit) -// | | | | -// | | | * 1-1 Non-Blocking Flag (1-bit) -// | | | -// | | * 2-2 Relocation Flag (1-bit) -// | | -// | * 3-3 No Reserve Flag (1-bit) +// 7 4 3 2 1 0 +// +---+-+-+-+-+-+ +// |000|1|1|1|1|1| +// +---+-+-+-+-+-+ +// | | | | | | +// | | | | | * 0-0 Worker Thread Flag (1-bit) +// | | | | | +// | | | | * 1-1 Non-Blocking Flag (1-bit) +// | | | | +// | | | * 2-2 Relocation Flag (1-bit) +// | | | +// | | * 3-3 No Reserve Flag (1-bit) +// | | +// | * 4-4 Low Address Flag (1-bit) // | // * 7-4 Unused (4-bits) // @@ -53,6 +55,7 @@ typedef ZBitField field_non_blocking; typedef ZBitField field_relocation; typedef ZBitField field_no_reserve; + typedef ZBitField field_low_address; uint8_t _flags; @@ -76,6 +79,10 @@ _flags |= field_no_reserve::encode(true); } + void set_low_address() { + _flags |= field_low_address::encode(true); + } + bool worker_thread() const { return field_worker_thread::decode(_flags); } @@ -91,6 +98,10 @@ bool no_reserve() const { return field_no_reserve::decode(_flags); } + + bool low_address() const { + return field_low_address::decode(_flags); + } }; #endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP --- old/src/hotspot/share/gc/z/zDirector.cpp 2020-05-18 23:08:29.048648162 +0200 +++ new/src/hotspot/share/gc/z/zDirector.cpp 2020-05-18 23:08:28.717637424 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -186,7 +186,7 @@ const size_t max_capacity = ZHeap::heap()->soft_max_capacity(); const size_t max_reserve = ZHeap::heap()->max_reserve(); const size_t used = ZHeap::heap()->used(); - const size_t free_with_reserve = max_capacity - used; + const size_t free_with_reserve = max_capacity - MIN2(max_capacity, used); const size_t free = free_with_reserve - MIN2(free_with_reserve, max_reserve); const double free_percent = percent_of(free, max_capacity); --- old/src/hotspot/share/gc/z/zFuture.hpp 2020-05-18 23:08:29.553664544 +0200 +++ new/src/hotspot/share/gc/z/zFuture.hpp 2020-05-18 23:08:29.219653709 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,6 @@ ZFuture(); void set(T value); - T peek(); T get(); }; --- old/src/hotspot/share/gc/z/zFuture.inline.hpp 2020-05-18 23:08:30.057680893 +0200 +++ new/src/hotspot/share/gc/z/zFuture.inline.hpp 2020-05-18 23:08:29.724670091 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,11 +42,6 @@ } template -inline T ZFuture::peek() { - return _value; -} - -template inline T ZFuture::get() { // Wait for notification Thread* const thread = Thread::current(); --- old/src/hotspot/share/gc/z/zHeap.cpp 2020-05-18 23:08:30.544696691 +0200 +++ new/src/hotspot/share/gc/z/zHeap.cpp 2020-05-18 23:08:30.218686116 +0200 @@ -76,11 +76,11 @@ } size_t ZHeap::heap_min_size() const { - return MinHeapSize; + return MAX2(MinHeapSize, heap_max_reserve_size()); } size_t ZHeap::heap_initial_size() const { - return InitialHeapSize; + return MAX2(InitialHeapSize, heap_max_reserve_size()); } size_t ZHeap::heap_max_size() const { @@ -241,8 +241,12 @@ _page_allocator.free_page(page, reclaimed); } -uint64_t ZHeap::uncommit(uint64_t delay) { - return _page_allocator.uncommit(delay); +uint64_t ZHeap::uncommit() { + return _page_allocator.uncommit(); +} + +void ZHeap::uncommit_cancel() { + return _page_allocator.uncommit_cancel(); } void ZHeap::flip_to_marked() { --- old/src/hotspot/share/gc/z/zHeap.hpp 2020-05-18 23:08:31.056713300 +0200 +++ new/src/hotspot/share/gc/z/zHeap.hpp 2020-05-18 23:08:30.724702530 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -119,7 +119,8 @@ void free_page(ZPage* page, bool reclaimed); // Uncommit memory - uint64_t uncommit(uint64_t delay); + uint64_t uncommit(); + void uncommit_cancel(); // Object allocation uintptr_t alloc_tlab(size_t size); --- old/src/hotspot/share/gc/z/zMemory.cpp 2020-05-18 23:08:31.546729195 +0200 +++ new/src/hotspot/share/gc/z/zMemory.cpp 2020-05-18 23:08:31.223718718 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ #include "precompiled.hpp" #include "gc/z/zList.inline.hpp" +#include "gc/z/zLock.inline.hpp" #include "gc/z/zMemory.inline.hpp" #include "memory/allocation.inline.hpp" @@ -86,6 +87,8 @@ } uintptr_t ZMemoryManager::alloc_from_front(size_t size) { + ZLocker locker(&_lock); + ZListIterator iter(&_freelist); for (ZMemory* area; iter.next(&area);) { if (area->size() >= size) { @@ -109,6 +112,8 @@ } uintptr_t ZMemoryManager::alloc_from_front_at_most(size_t size, size_t* allocated) { + ZLocker locker(&_lock); + ZMemory* area = _freelist.first(); if (area != NULL) { if (area->size() <= size) { @@ -133,6 +138,8 @@ } uintptr_t ZMemoryManager::alloc_from_back(size_t size) { + ZLocker locker(&_lock); + ZListReverseIterator iter(&_freelist); for (ZMemory* area; iter.next(&area);) { if (area->size() >= size) { @@ -155,6 +162,8 @@ } uintptr_t ZMemoryManager::alloc_from_back_at_most(size_t size, size_t* allocated) { + ZLocker locker(&_lock); + ZMemory* area = _freelist.last(); if (area != NULL) { if (area->size() <= size) { @@ -181,6 +190,8 @@ assert(start != UINTPTR_MAX, "Invalid address"); const uintptr_t end = start + size; + ZLocker locker(&_lock); + ZListIterator iter(&_freelist); for (ZMemory* area; iter.next(&area);) { if (start < area->start()) { --- old/src/hotspot/share/gc/z/zMemory.hpp 2020-05-18 23:08:31.959742593 +0200 +++ new/src/hotspot/share/gc/z/zMemory.hpp 2020-05-18 23:08:31.701734224 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #define SHARE_GC_Z_ZMEMORY_HPP #include "gc/z/zList.hpp" +#include "gc/z/zLock.hpp" #include "memory/allocation.hpp" class ZMemory : public CHeapObj { @@ -65,6 +66,7 @@ }; private: + ZLock _lock; ZList _freelist; Callbacks _callbacks; --- old/src/hotspot/share/gc/z/zPage.cpp 2020-05-18 23:08:32.366755796 +0200 +++ new/src/hotspot/share/gc/z/zPage.cpp 2020-05-18 23:08:32.107747394 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,7 @@ void ZPage::assert_initialized() const { assert(!_virtual.is_null(), "Should not be null"); assert(!_physical.is_null(), "Should not be null"); + assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch"); assert((_type == ZPageTypeSmall && size() == ZPageSizeSmall) || (_type == ZPageTypeMedium && size() == ZPageSizeMedium) || (_type == ZPageTypeLarge && is_aligned(size(), ZGranuleSize)), @@ -99,6 +100,25 @@ return page; } +ZPage* ZPage::split_committed() { + const ZPhysicalMemory pmem = _physical.split_committed(); + if (pmem.is_null()) { + // Nothing committed + return NULL; + } + + assert(!_physical.is_null(), "Should not be null"); + + // Resize this page + const ZVirtualMemory vmem = _virtual.split(pmem.size()); + _type = type_from_size(_virtual.size()); + _top = start(); + _livemap.resize(object_max_count()); + + // Create new page + return new ZPage(vmem, pmem); +} + void ZPage::print_on(outputStream* out) const { out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s%s", type_to_string(), start(), top(), end(), --- old/src/hotspot/share/gc/z/zPage.hpp 2020-05-18 23:08:32.786769420 +0200 +++ new/src/hotspot/share/gc/z/zPage.hpp 2020-05-18 23:08:32.525760954 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,17 +69,15 @@ uintptr_t top() const; size_t remaining() const; - const ZPhysicalMemory& physical_memory() const; const ZVirtualMemory& virtual_memory() const; + const ZPhysicalMemory& physical_memory() const; + ZPhysicalMemory& physical_memory(); uint8_t numa_id(); bool is_allocating() const; bool is_relocatable() const; - bool is_mapped() const; - void set_pre_mapped(); - uint64_t last_used() const; void set_last_used(); @@ -88,6 +86,7 @@ ZPage* retype(uint8_t type); ZPage* split(size_t size); ZPage* split(uint8_t type, size_t size); + ZPage* split_committed(); bool is_in(uintptr_t addr) const; --- old/src/hotspot/share/gc/z/zPage.inline.hpp 2020-05-18 23:08:33.246784343 +0200 +++ new/src/hotspot/share/gc/z/zPage.inline.hpp 2020-05-18 23:08:32.939774384 +0200 @@ -126,12 +126,16 @@ return end() - top(); } +inline const ZVirtualMemory& ZPage::virtual_memory() const { + return _virtual; +} + inline const ZPhysicalMemory& ZPage::physical_memory() const { return _physical; } -inline const ZVirtualMemory& ZPage::virtual_memory() const { - return _virtual; +inline ZPhysicalMemory& ZPage::physical_memory() { + return _physical; } inline uint8_t ZPage::numa_id() { @@ -150,17 +154,6 @@ return _seqnum < ZGlobalSeqNum; } -inline bool ZPage::is_mapped() const { - return _seqnum > 0; -} - -inline void ZPage::set_pre_mapped() { - // The _seqnum variable is also used to signal that the virtual and physical - // memory has been mapped. So, we need to set it to non-zero when the memory - // has been pre-mapped. - _seqnum = 1; -} - inline uint64_t ZPage::last_used() const { return _last_used; } --- old/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-05-18 23:08:33.668798032 +0200 +++ new/src/hotspot/share/gc/z/zPageAllocator.cpp 2020-05-18 23:08:33.407789565 +0200 @@ -30,42 +30,52 @@ #include "gc/z/zLock.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageAllocator.hpp" -#include "gc/z/zPageCache.inline.hpp" +#include "gc/z/zPageCache.hpp" #include "gc/z/zSafeDelete.inline.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zTask.hpp" #include "gc/z/zTracer.inline.hpp" #include "gc/z/zWorkers.hpp" #include "jfr/jfrEvents.hpp" +#include "logging/log.hpp" #include "runtime/globals.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" #include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond); static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond); static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond); static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall"); -class ZPageAllocRequest : public StackObj { - friend class ZList; +enum ZPageAllocationStall { + ZPageAllocationStallSuccess, + ZPageAllocationStallFailed, + ZPageAllocationStallStartGC +}; + +class ZPageAllocation : public StackObj { + friend class ZList; private: - const uint8_t _type; - const size_t _size; - const ZAllocationFlags _flags; - const unsigned int _total_collections; - ZListNode _node; - ZFuture _result; + const uint8_t _type; + const size_t _size; + const ZAllocationFlags _flags; + const unsigned int _total_collections; + ZList _pages; + ZListNode _node; + ZFuture _stall_result; public: - ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) : + ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) : _type(type), _size(size), _flags(flags), - _total_collections(total_collections), + _total_collections(is_init_completed() ? ZCollectedHeap::heap()->total_collections() : 0), + _pages(), _node(), - _result() {} + _stall_result() {} uint8_t type() const { return _type; @@ -83,41 +93,39 @@ return _total_collections; } - ZPage* peek() { - return _result.peek(); + ZPageAllocationStall wait() { + return _stall_result.get(); } - ZPage* wait() { - return _result.get(); + ZList* pages() { + return &_pages; } - void satisfy(ZPage* page) { - _result.set(page); + void satisfy(ZPageAllocationStall result) { + _stall_result.set(result); } }; -ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1; - ZPageAllocator::ZPageAllocator(ZWorkers* workers, size_t min_capacity, size_t initial_capacity, size_t max_capacity, size_t max_reserve) : _lock(), + _cache(), _virtual(max_capacity), _physical(max_capacity), - _cache(), _min_capacity(min_capacity), _max_capacity(max_capacity), _max_reserve(max_reserve), _current_max_capacity(max_capacity), _capacity(0), + _used(0), _used_high(0), _used_low(0), - _used(0), _allocated(0), _reclaimed(0), - _queue(), + _stalled(), _satisfied(), _safe_delete(), _uncommit(false), @@ -136,26 +144,15 @@ // Warn if system limits could stop us from reaching max capacity _physical.warn_commit_limits(max_capacity); - // Commit initial capacity - _capacity = _physical.commit(initial_capacity); - if (_capacity != initial_capacity) { + // Check if uncommit should be enabled + _uncommit = _physical.should_enable_uncommit(min_capacity, max_capacity); + + // Pre-map initial capacity + if (!prime_cache(workers, initial_capacity)) { log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M); return; } - // If uncommit is not explicitly disabled, max capacity is greater than - // min capacity, and uncommit is supported by the platform, then we will - // try to uncommit unused memory. - _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit(); - if (_uncommit) { - log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay); - } else { - log_info(gc, init)("Uncommit: Disabled"); - } - - // Pre-map initial capacity - prime_cache(workers, initial_capacity); - // Successfully initialized _initialized = true; } @@ -189,21 +186,16 @@ } }; -void ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) { - // Allocate physical memory - const ZPhysicalMemory pmem = _physical.alloc(size); - guarantee(!pmem.is_null(), "Invalid size"); - - // Allocate virtual memory - const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */); - guarantee(!vmem.is_null(), "Invalid size"); +bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) { + ZAllocationFlags flags; - // Allocate page - ZPage* const page = new ZPage(vmem, pmem); + flags.set_non_blocking(); + flags.set_low_address(); - // Map page - map_page(page); - page->set_pre_mapped(); + ZPage* const page = alloc_page(ZPageTypeLarge, size, flags); + if (page == NULL) { + return false; + } if (AlwaysPreTouch) { // Pre-touch page @@ -211,9 +203,9 @@ workers->run_parallel(&task); } - // Add page to cache - page->set_last_used(); - _cache.free_page(page); + free_page(page, false /* reclaimed */); + + return true; } bool ZPageAllocator::is_initialized() const { @@ -230,11 +222,13 @@ size_t ZPageAllocator::soft_max_capacity() const { // Note that SoftMaxHeapSize is a manageable flag - return MIN2(SoftMaxHeapSize, _current_max_capacity); + const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize); + const size_t current_max_capacity = Atomic::load(&_current_max_capacity); + return MIN2(soft_max_capacity, current_max_capacity); } size_t ZPageAllocator::capacity() const { - return _capacity; + return Atomic::load(&_capacity); } size_t ZPageAllocator::max_reserve() const { @@ -250,11 +244,14 @@ } size_t ZPageAllocator::used() const { - return _used; + return Atomic::load(&_used); } size_t ZPageAllocator::unused() const { - const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve; + const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); + const ssize_t used = (ssize_t)Atomic::load(&_used); + const ssize_t max_reserve = (ssize_t)_max_reserve; + const ssize_t unused = capacity - used - max_reserve; return unused > 0 ? (size_t)unused : 0; } @@ -273,74 +270,104 @@ _used_high = _used_low = _used; } -void ZPageAllocator::increase_used(size_t size, bool relocation) { - if (relocation) { - // Allocating a page for the purpose of relocation has a - // negative contribution to the number of reclaimed bytes. - _reclaimed -= size; +size_t ZPageAllocator::increase_capacity(size_t size) { + const size_t increase = MIN2(size, _current_max_capacity - _capacity); + + // Update atomically since we have concurrent readers + Atomic::add(&_capacity, increase); + + return increase; +} + +void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) { + // Update atomically since we have concurrent readers + Atomic::sub(&_capacity, size); + + if (set_max_capacity) { + // Adjust current max capacity to avoid further attempts to increase capacity + log_error(gc)("Forced to lower max Java heap size from " + SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)", + _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity), + _capacity / M, percent_of(_capacity, _max_capacity)); + + // Update atomically since we have concurrent readers + Atomic::store(&_current_max_capacity, _capacity); + } +} + +void ZPageAllocator::increase_used(size_t size, bool allocation, bool relocation) { + if (allocation) { + if (relocation) { + // Allocating a page for the purpose of relocation has a + // negative contribution to the number of reclaimed bytes. + _reclaimed -= size; + } + _allocated += size; } - _allocated += size; - _used += size; + + // Update atomically since we have concurrent readers + Atomic::add(&_used, size); + if (_used > _used_high) { _used_high = _used; } } -void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { - // Only pages explicitly released with the reclaimed flag set - // counts as reclaimed bytes. This flag is true when we release - // a page after relocation, and is false when we release a page - // to undo an allocation. - if (reclaimed) { - _reclaimed += size; - } else { - _allocated -= size; +void ZPageAllocator::decrease_used(size_t size, bool free, bool reclaimed) { + if (free) { + // Only pages explicitly released with the reclaimed flag set + // counts as reclaimed bytes. This flag is true when we release + // a page after relocation, and is false when we release a page + // to undo an allocation. + if (reclaimed) { + _reclaimed += size; + } else { + _allocated -= size; + } } - _used -= size; + + // Update atomically since we have concurrent readers + Atomic::sub(&_used, size); + if (_used < _used_low) { _used_low = _used; } } -ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) { - // Allocate virtual memory - const ZVirtualMemory vmem = _virtual.alloc(size); - if (vmem.is_null()) { - // Out of address space - return NULL; +bool ZPageAllocator::commit_page(ZPage* page) { + // Commit physical memory + return _physical.commit(page->physical_memory()); +} + +void ZPageAllocator::uncommit_page(ZPage* page) { + // Uncommit physical memory, if uncommit is supported/enabled + if (_uncommit) { + _physical.uncommit(page->physical_memory()); } +} - // Allocate physical memory - const ZPhysicalMemory pmem = _physical.alloc(size); - assert(!pmem.is_null(), "Invalid size"); +bool ZPageAllocator::map_page(const ZPage* page) const { + // Map physical memory + return _physical.map(page->physical_memory(), page->start()); +} - // Allocate page - return new ZPage(type, vmem, pmem); +void ZPageAllocator::unmap_page(const ZPage* page) const { + // Unmap physical memory + _physical.unmap(page->physical_memory(), page->start()); } void ZPageAllocator::destroy_page(ZPage* page) { - const ZVirtualMemory& vmem = page->virtual_memory(); - const ZPhysicalMemory& pmem = page->physical_memory(); - - // Unmap memory - _physical.unmap(pmem, vmem.start()); + // Free virtual memory + _virtual.free(page->virtual_memory()); // Free physical memory - _physical.free(pmem); - - // Free virtual memory - _virtual.free(vmem); + _physical.free(page->physical_memory()); // Delete page safely _safe_delete(page); } -void ZPageAllocator::map_page(const ZPage* page) const { - // Map physical memory - _physical.map(page->physical_memory(), page->start()); -} - -size_t ZPageAllocator::max_available(bool no_reserve) const { +bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { size_t available = _current_max_capacity - _used; if (no_reserve) { @@ -348,179 +375,277 @@ available -= MIN2(available, _max_reserve); } - return available; + return available >= size; } -bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) { - if (max_available(no_reserve) < size) { - // Not enough free memory +bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const { + size_t available = _capacity - _used; + + if (no_reserve) { + // The reserve should not be considered available + available -= MIN2(available, _max_reserve); + } else if (_capacity != _current_max_capacity) { + // Always increase capacity before using the reserve return false; } - // We add the max_reserve to the requested size to avoid losing - // the reserve because of failure to increase capacity before - // reaching max capacity. - size += _max_reserve; - - // Don't try to increase capacity if enough unused capacity - // is available or if current max capacity has been reached. - const size_t available = _capacity - _used; - if (available < size && _capacity < _current_max_capacity) { - // Try to increase capacity - const size_t commit = MIN2(size - available, _current_max_capacity - _capacity); - const size_t committed = _physical.commit(commit); - _capacity += committed; - - log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, " - "Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, " - "Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M", - size / M, no_reserve ? "True" : "False", available / M, - commit / M, committed / M, _capacity / M); + return available >= size; +} - if (committed != commit) { - // Failed, or partly failed, to increase capacity. Adjust current - // max capacity to avoid further attempts to increase capacity. - log_error(gc)("Forced to lower max Java heap size from " - SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)", - _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity), - _capacity / M, percent_of(_capacity, _max_capacity)); +bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList* pages) { + if (!is_alloc_allowed(size, no_reserve)) { + // Out of memory + return false; + } - _current_max_capacity = _capacity; + // Try allocate from the page cache + if (is_alloc_allowed_from_cache(size, no_reserve)) { + ZPage* const page = _cache.alloc_page(type, size); + if (page != NULL) { + // Success + pages->insert_last(page); + return true; } } - if (!no_reserve) { - size -= _max_reserve; + // Try increase capacity + const size_t increased = increase_capacity(size); + if (increased < size) { + // Could not increase capacity enough to satisfy the allocation + // completely. Flush the page cache to satisfy the remainder. + const size_t remaining = size - increased; + _cache.flush_for_allocation(remaining, pages); } - const size_t new_available = _capacity - _used; - return new_available >= size; + // Success + return true; } -void ZPageAllocator::ensure_uncached_available(size_t size) { - assert(_capacity - _used >= size, "Invalid size"); - const size_t uncached_available = _capacity - _used - _cache.available(); - if (size > uncached_available) { - flush_cache_for_allocation(size - uncached_available); +bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) { + EventZPageAllocation event; + const uint8_t type = allocation->type(); + const size_t size = allocation->size(); + const ZAllocationFlags flags = allocation->flags(); + ZList* const pages = allocation->pages(); + + // Try allocate without using the reserve + if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) { + // If allowed to, try allocate using the reserve + if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) { + // Out of memory + return false; + } } + + // Updated used statistics + increase_used(size, true /* allocation */, flags.relocation()); + + // Send event + event.commit(type, size, flags.non_blocking(), flags.no_reserve(), + _used, _current_max_capacity - _used, _capacity - _used); + + // Success + return true; } -ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) { - if (!ensure_available(size, no_reserve)) { - // Not enough free memory - return NULL; +static void check_out_of_memory_during_initialization() { + if (!is_init_completed()) { + vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small"); } +} + +bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) { + ZStatTimer timer(ZCriticalPhaseAllocationStall); + EventZAllocationStall event; + ZPageAllocationStall result; + + // We can only block if the VM is fully initialized + check_out_of_memory_during_initialization(); + + do { + // Start asynchronous GC + ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); + + // Wait for allocation to complete, fail or request a GC + result = allocation->wait(); + } while (result == ZPageAllocationStallStartGC); - // Try allocate page from the cache - ZPage* const page = _cache.alloc_page(type, size); - if (page != NULL) { - return page; + { + // + // We grab the lock here for two different reasons: + // + // 1) Guard deletion of underlying semaphore. This is a workaround for + // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy + // the semaphore immediately after returning from sem_wait(). The + // reason is that sem_post() can touch the semaphore after a waiting + // thread have returned from sem_wait(). To avoid this race we are + // forcing the waiting thread to acquire/release the lock held by the + // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 + // + // 2) Guard the list of satisfied pages. + // + ZLocker locker(&_lock); + _satisfied.remove(allocation); } - // Try flush pages from the cache - ensure_uncached_available(size); + // Send event + event.commit(allocation->type(), allocation->size()); - // Create new page - return create_page(type, size); + return (result == ZPageAllocationStallSuccess); } -ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) { - EventZPageAllocation event; +bool ZPageAllocator::alloc_page_prepare(ZPageAllocation* allocation) { + { + ZLocker locker(&_lock); - ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve()); - if (page == NULL) { - // Out of memory + if (alloc_page_common(allocation)) { + // Success + return true; + } + + // Failed + if (allocation->flags().non_blocking()) { + // Don't stall + return false; + } + + // Enqueue allocation request + _stalled.insert_last(allocation); + } + + // Stall + return alloc_page_stall(allocation); +} + +ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) { + const size_t size = allocation->size(); + + // Allocate virtual memory + const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); + if (vmem.is_null()) { + log_error(gc)("Out of address space"); return NULL; } - // Update used statistics - increase_used(size, flags.relocation()); + ZPhysicalMemory pmem; + size_t flushed = 0; - // Send trace event - event.commit(type, size, _used, max_available(flags.no_reserve()), - _cache.available(), flags.non_blocking(), flags.no_reserve()); + // Unmap, transfer physical memory, and destroy flushed pages + ZListRemoveIterator iter(allocation->pages()); + for (ZPage* page; iter.next(&page);) { + flushed += page->size(); + unmap_page(page); + pmem.transfer_segments(page->physical_memory()); + destroy_page(page); + } - return page; -} + if (flushed > 0) { + // Update statistics + ZStatInc(ZCounterPageCacheFlush, flushed); + log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M); + } -void ZPageAllocator::check_out_of_memory_during_initialization() { - if (!is_init_completed()) { - vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small"); + // Allocate any remaining physical memory + if (flushed < size) { + const size_t remaining = size - flushed; + _physical.alloc(pmem, remaining); } + + // Create new page + return new ZPage(allocation->type(), vmem, pmem); } -ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) { - // Prepare to block - ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections()); +static bool is_alloc_satisfied(ZPageAllocation* allocation) { + // The allocation is immediately satisfied if the list of pages contains + // exactly one page, with the type and size that was requested. + return allocation->pages()->size() == 1 && + allocation->pages()->first()->type() == allocation->type() && + allocation->pages()->first()->size() == allocation->size(); +} - _lock.lock(); +ZPage* ZPageAllocator::alloc_page_finish(ZPageAllocation* allocation) { + // Fast path + if (is_alloc_satisfied(allocation)) { + return allocation->pages()->remove_first(); + } - // Try non-blocking allocation - ZPage* page = alloc_page_common(type, size, flags); + // Slow path + ZPage* const page = alloc_page_create(allocation); if (page == NULL) { - // Allocation failed, enqueue request - _queue.insert_last(&request); + // Out of address space + return NULL; } - _lock.unlock(); - - if (page == NULL) { - // Allocation failed - ZStatTimer timer(ZCriticalPhaseAllocationStall); - EventZAllocationStall event; - - // We can only block if VM is fully initialized - check_out_of_memory_during_initialization(); - - do { - // Start asynchronous GC - ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); - - // Wait for allocation to complete or fail - page = request.wait(); - } while (page == gc_marker); - - { - // - // We grab the lock here for two different reasons: - // - // 1) Guard deletion of underlying semaphore. This is a workaround for - // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy - // the semaphore immediately after returning from sem_wait(). The - // reason is that sem_post() can touch the semaphore after a waiting - // thread have returned from sem_wait(). To avoid this race we are - // forcing the waiting thread to acquire/release the lock held by the - // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 - // - // 2) Guard the list of satisfied pages. - // - ZLocker locker(&_lock); - _satisfied.remove(&request); + // Commit page + if (!commit_page(page)) { + // Failed or partially failed. Split of any successfully committed + // part of the page into a new page and insert it into list of pages, + // so that it will be re-inserted into the page cache. + ZPage* const committed_page = page->split_committed(); + if (committed_page != NULL) { + if (map_page(committed_page)) { + // Success + allocation->pages()->insert_last(committed_page); + } else { + // Failed + uncommit_page(committed_page); + destroy_page(committed_page); + } } - event.commit(type, size); + destroy_page(page); + return NULL; + } + + // Map page + if (!map_page(page)) { + // Failed + uncommit_page(page); + destroy_page(page); + return NULL; } + // Success return page; } -ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) { +void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) { + size_t freed = 0; + + // Free any allocated pages + ZListRemoveIterator iter(allocation->pages()); + for (ZPage* page; iter.next(&page);) { + freed += page->size(); + free_page(page, false /* reclaimed */); + } + ZLocker locker(&_lock); - return alloc_page_common(type, size, flags); + + // Adjust capacity and used to reflect the failed capacity increase + const size_t remaining = allocation->size() - freed; + decrease_used(remaining, false /* free */, false /* reclaimed */); + decrease_capacity(remaining, true /* set_max_capacity */); } ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { - ZPage* const page = flags.non_blocking() - ? alloc_page_nonblocking(type, size, flags) - : alloc_page_blocking(type, size, flags); - if (page == NULL) { +retry: + ZPageAllocation allocation(type, size, flags); + + // Allocate one or more pages from the page cache. If the allocation + // succeeds but the returned pages don't cover the complete allocation, + // then we are allowed to allocate the remaining memory directly from + // the physical memory manager. + if (!alloc_page_prepare(&allocation)) { // Out of memory return NULL; } - // Map page if needed - if (!page->is_mapped()) { - map_page(page); + ZPage* const page = alloc_page_finish(&allocation); + if (page == NULL) { + // Failed to commit or map. Clean up and retry, in the hope that + // we can still allocate by flushing the page cache (more agressively). + alloc_page_failed(&allocation); + goto retry; } // Reset page. This updates the page's sequence number and must @@ -541,26 +666,25 @@ return page; } -void ZPageAllocator::satisfy_alloc_queue() { +void ZPageAllocator::satisfy_stalled() { for (;;) { - ZPageAllocRequest* const request = _queue.first(); - if (request == NULL) { + ZPageAllocation* const allocation = _stalled.first(); + if (allocation == NULL) { // Allocation queue is empty return; } - ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags()); - if (page == NULL) { + if (!alloc_page_common(allocation)) { // Allocation could not be satisfied, give up return; } - // Allocation succeeded, dequeue and satisfy request. Note that - // the dequeue operation must happen first, since the request - // will immediately be deallocated once it has been satisfied. - _queue.remove(request); - _satisfied.insert_first(request); - request->satisfy(page); + // Allocation succeeded, dequeue and satisfy allocation request. + // Note that we must dequeue the allocation request first, since + // it will immediately be deallocated once it has been satisfied. + _stalled.remove(allocation); + _satisfied.insert_last(allocation); + allocation->satisfy(ZPageAllocationStallSuccess); } } @@ -568,7 +692,7 @@ ZLocker locker(&_lock); // Update used statistics - decrease_used(page->size(), reclaimed); + decrease_used(page->size(), true /* free */, reclaimed); // Set time when last used page->set_last_used(); @@ -576,172 +700,99 @@ // Cache page _cache.free_page(page); - // Try satisfy blocked allocations - satisfy_alloc_queue(); + // Try satisfy stalled allocations + satisfy_stalled(); } -size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation) { - EventZPageCacheFlush event; +size_t ZPageAllocator::uncommit_inner(uint64_t delay, uint64_t* timeout) { + // We need to join the suspendible thread set while manipulating capacity and + // used, to make sure GC safepoints will have a consistent view. However, when + // ZVerifyViews is enabled we need to join at a broader scope to also make sure + // we don't change the address good mask after pages have been flushed, and + // thereby made invisible to pages_do(), but before they have been unmapped. + SuspendibleThreadSetJoiner joiner(ZVerifyViews); + ZList pages; + size_t flushed; - ZList list; + { + SuspendibleThreadSetJoiner joiner(!ZVerifyViews); + ZLocker locker(&_lock); - // Flush pages - _cache.flush(cl, &list); + // Never uncommit the reserve, and never uncommit below min capacity. We flush + // out and uncommit chunks at a time (~0.8% of the max capacity, but at least + // one granule and at most 256M), in case demand for memory increases while we + // are uncommitting. + const size_t retain = clamp(_used + _max_reserve, _min_capacity, _current_max_capacity); + const size_t release = _capacity - retain; + const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M); + const size_t flush = MIN2(release, limit); + + // Flush pages to uncommit + flushed = _cache.flush_for_uncommit(flush, delay, timeout, &pages); + if (flushed == 0) { + // Nothing flushed + return 0; + } - const size_t overflushed = cl->overflushed(); - if (overflushed > 0) { - // Overflushed, keep part of last page - ZPage* const page = list.last()->split(overflushed); - _cache.free_page(page); + // Adjust used to reflect that these pages are no longer available + increase_used(flushed, false /* allocation */, false /* relocation */); } - // Destroy pages - size_t flushed = 0; - for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) { - flushed += page->size(); + // Unmap, uncommit, and destroy flushed pages + ZListRemoveIterator iter(&pages); + for (ZPage* page; iter.next(&page);) { + unmap_page(page); + uncommit_page(page); destroy_page(page); } - // Send event - event.commit(flushed, for_allocation); - - return flushed; -} - -class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure { -public: - ZPageCacheFlushForAllocationClosure(size_t requested) : - ZPageCacheFlushClosure(requested) {} - - virtual bool do_page(const ZPage* page) { - if (_flushed < _requested) { - // Flush page - _flushed += page->size(); - return true; - } + { + SuspendibleThreadSetJoiner joiner(!ZVerifyViews); + ZLocker locker(&_lock); - // Don't flush page - return false; + // Adjust used and capacity to reflect the uncommit + decrease_used(flushed, false /* free */, false /* reclaimed */); + decrease_capacity(flushed, false /* set_max_capacity */); } -}; - -void ZPageAllocator::flush_cache_for_allocation(size_t requested) { - assert(requested <= _cache.available(), "Invalid request"); - // Flush pages - ZPageCacheFlushForAllocationClosure cl(requested); - const size_t flushed = flush_cache(&cl, true /* for_allocation */); - - assert(requested == flushed, "Failed to flush"); - - const size_t cached_after = _cache.available(); - const size_t cached_before = cached_after + flushed; - - log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), " - "Flushed: " SIZE_FORMAT "M", - cached_before / M, percent_of(cached_before, max_capacity()), - cached_after / M, percent_of(cached_after, max_capacity()), - flushed / M); - - // Update statistics - ZStatInc(ZCounterPageCacheFlush, flushed); + return flushed; } -class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure { -private: - const uint64_t _now; - const uint64_t _delay; - uint64_t _timeout; - -public: - ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) : - ZPageCacheFlushClosure(requested), - _now(os::elapsedTime()), - _delay(delay), - _timeout(_delay) {} - - virtual bool do_page(const ZPage* page) { - const uint64_t expires = page->last_used() + _delay; - const uint64_t timeout = expires - MIN2(expires, _now); - - if (_flushed < _requested && timeout == 0) { - // Flush page - _flushed += page->size(); - return true; - } - - // Record shortest non-expired timeout - _timeout = MIN2(_timeout, timeout); - - // Don't flush page - return false; - } - - uint64_t timeout() const { - return _timeout; - } -}; - -uint64_t ZPageAllocator::uncommit(uint64_t delay) { - // Set the default timeout, when no pages are found in the - // cache or when uncommit is disabled, equal to the delay. - uint64_t timeout = delay; - - if (!_uncommit) { - // Disabled - return timeout; - } - +uint64_t ZPageAllocator::uncommit() { EventZUncommit event; - size_t capacity_before; - size_t capacity_after; - size_t uncommitted; - - { - SuspendibleThreadSetJoiner joiner; - ZLocker locker(&_lock); + const uint64_t delay = ZUncommitDelay; + uint64_t timeout = delay; + size_t uncommitted = 0; - // Don't flush more than we will uncommit. Never uncommit - // the reserve, and never uncommit below min capacity. - const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity); - const size_t guarded = MAX2(needed, _min_capacity); - const size_t uncommittable = _capacity - guarded; - const size_t uncached_available = _capacity - _used - _cache.available(); - size_t uncommit = MIN2(uncommittable, uncached_available); - const size_t flush = uncommittable - uncommit; - - if (flush > 0) { - // Flush pages to uncommit - ZPageCacheFlushForUncommitClosure cl(flush, delay); - uncommit += flush_cache(&cl, false /* for_allocation */); - timeout = cl.timeout(); - } - - // Uncommit - uncommitted = _physical.uncommit(uncommit); - _capacity -= uncommitted; + while (Atomic::load(&_uncommit)) { + const size_t flushed = uncommit_inner(delay, &timeout); + if (flushed == 0) { + // Done + break; + } - capacity_after = _capacity; - capacity_before = capacity_after + uncommitted; + uncommitted += flushed; } if (uncommitted > 0) { - log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), " - "Uncommitted: " SIZE_FORMAT "M", - capacity_before / M, percent_of(capacity_before, max_capacity()), - capacity_after / M, percent_of(capacity_after, max_capacity()), - uncommitted / M); - // Send event - event.commit(capacity_before, capacity_after, uncommitted); + event.commit(uncommitted); // Update statistics ZStatInc(ZCounterUncommit, uncommitted); + log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)", + uncommitted / M, percent_of(uncommitted, _max_capacity)); } + log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout); + return timeout; } +void ZPageAllocator::uncommit_cancel() { + Atomic::store(&_uncommit, false); +} + void ZPageAllocator::enable_deferred_delete() const { _safe_delete.enable_deferred_delete(); } @@ -761,10 +812,12 @@ } void ZPageAllocator::pages_do(ZPageClosure* cl) const { - ZListIterator iter(&_satisfied); - for (ZPageAllocRequest* request; iter.next(&request);) { - const ZPage* const page = request->peek(); - if (page != NULL) { + assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); + + ZListIterator iter(&_satisfied); + for (ZPageAllocation* allocation; iter.next(&allocation);) { + ZListIterator iter(allocation->pages()); + for (ZPage* page; iter.next(&page);) { cl->do_page(page); } } @@ -774,7 +827,7 @@ bool ZPageAllocator::is_alloc_stalled() const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - return !_queue.is_empty(); + return !_stalled.is_empty(); } void ZPageAllocator::check_out_of_memory() { @@ -782,16 +835,16 @@ // Fail allocation requests that were enqueued before the // last GC cycle started, otherwise start a new GC cycle. - for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) { - if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) { + for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) { + if (allocation->total_collections() == ZCollectedHeap::heap()->total_collections()) { // Start a new GC cycle, keep allocation requests enqueued - request->satisfy(gc_marker); + allocation->satisfy(ZPageAllocationStallStartGC); return; } // Out of memory, fail allocation request - _queue.remove(request); - _satisfied.insert_first(request); - request->satisfy(NULL); + _stalled.remove(allocation); + _satisfied.insert_last(allocation); + allocation->satisfy(ZPageAllocationStallFailed); } } --- old/src/hotspot/share/gc/z/zPageAllocator.hpp 2020-05-18 23:08:34.140813343 +0200 +++ new/src/hotspot/share/gc/z/zPageAllocator.hpp 2020-05-18 23:08:33.881804942 +0200 @@ -33,7 +33,7 @@ #include "gc/z/zVirtualMemory.hpp" #include "memory/allocation.hpp" -class ZPageAllocRequest; +class ZPageAllocation; class ZWorkers; class ZPageAllocator { @@ -41,50 +41,55 @@ private: ZLock _lock; + ZPageCache _cache; ZVirtualMemoryManager _virtual; ZPhysicalMemoryManager _physical; - ZPageCache _cache; const size_t _min_capacity; const size_t _max_capacity; const size_t _max_reserve; - size_t _current_max_capacity; - size_t _capacity; + volatile size_t _current_max_capacity; + volatile size_t _capacity; + volatile size_t _used; size_t _used_high; size_t _used_low; - size_t _used; size_t _allocated; ssize_t _reclaimed; - ZList _queue; - ZList _satisfied; + ZList _stalled; + ZList _satisfied; mutable ZSafeDelete _safe_delete; - bool _uncommit; + volatile bool _uncommit; bool _initialized; - static ZPage* const gc_marker; + bool prime_cache(ZWorkers* workers, size_t size); - void prime_cache(ZWorkers* workers, size_t size); + size_t increase_capacity(size_t size); + void decrease_capacity(size_t size, bool set_max_capacity); - void increase_used(size_t size, bool relocation); - void decrease_used(size_t size, bool reclaimed); + void increase_used(size_t size, bool allocation, bool relocation); + void decrease_used(size_t size, bool free, bool reclaimed); - ZPage* create_page(uint8_t type, size_t size); - void destroy_page(ZPage* page); + bool commit_page(ZPage* page); + void uncommit_page(ZPage* page); - size_t max_available(bool no_reserve) const; - bool ensure_available(size_t size, bool no_reserve); - void ensure_uncached_available(size_t size); + bool map_page(const ZPage* page) const; + void unmap_page(const ZPage* page) const; - void check_out_of_memory_during_initialization(); + void destroy_page(ZPage* page); + + bool is_alloc_allowed(size_t size, bool no_reserve) const; + bool is_alloc_allowed_from_cache(size_t size, bool no_reserve) const; - ZPage* alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve); - ZPage* alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags); - ZPage* alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags); - ZPage* alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags); + bool alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList* pages); + bool alloc_page_common(ZPageAllocation* allocation); + bool alloc_page_stall(ZPageAllocation* allocation); + bool alloc_page_prepare(ZPageAllocation* allocation); + ZPage* alloc_page_create(ZPageAllocation* allocation); + ZPage* alloc_page_finish(ZPageAllocation* allocation); + void alloc_page_failed(ZPageAllocation* allocation); - size_t flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation); - void flush_cache_for_allocation(size_t requested); + void satisfy_stalled(); - void satisfy_alloc_queue(); + size_t uncommit_inner(uint64_t delay, uint64_t* timeout); public: ZPageAllocator(ZWorkers* workers, @@ -112,13 +117,12 @@ ZPage* alloc_page(uint8_t type, size_t size, ZAllocationFlags flags); void free_page(ZPage* page, bool reclaimed); - uint64_t uncommit(uint64_t delay); + uint64_t uncommit(); + void uncommit_cancel(); void enable_deferred_delete() const; void disable_deferred_delete() const; - void map_page(const ZPage* page) const; - void debug_map_page(const ZPage* page) const; void debug_unmap_page(const ZPage* page) const; --- old/src/hotspot/share/gc/z/zPageCache.cpp 2020-05-18 23:08:34.568827227 +0200 +++ new/src/hotspot/share/gc/z/zPageCache.cpp 2020-05-18 23:08:34.304818663 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -22,29 +22,39 @@ */ #include "precompiled.hpp" +#include "gc/z/zGlobals.hpp" #include "gc/z/zList.inline.hpp" #include "gc/z/zNUMA.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageCache.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zValue.inline.hpp" +#include "jfr/jfrEvents.hpp" #include "logging/log.hpp" +#include "memory/allocation.hpp" static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond); static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond); static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond); static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond); +class ZPageCacheFlushClosure : public StackObj { + friend class ZPageCache; + +protected: + const size_t _requested; + size_t _flushed; + +public: + ZPageCacheFlushClosure(size_t requested); + virtual bool do_page(const ZPage* page) = 0; +}; + ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) : _requested(requested), _flushed(0) {} -size_t ZPageCacheFlushClosure::overflushed() const { - return _flushed > _requested ? _flushed - _requested : 0; -} - ZPageCache::ZPageCache() : - _available(0), _small(), _medium(), _large() {} @@ -161,7 +171,7 @@ page = oversized->split(type, size); // Cache remainder - free_page_inner(oversized); + free_page(oversized); } else { // Re-type correctly sized page page = oversized->retype(type); @@ -169,16 +179,14 @@ } } - if (page != NULL) { - _available -= page->size(); - } else { + if (page == NULL) { ZStatInc(ZCounterPageCacheMiss); } return page; } -void ZPageCache::free_page_inner(ZPage* page) { +void ZPageCache::free_page(ZPage* page) { const uint8_t type = page->type(); if (type == ZPageTypeSmall) { _small.get(page->numa_id()).insert_first(page); @@ -189,11 +197,6 @@ } } -void ZPageCache::free_page(ZPage* page) { - free_page_inner(page); - _available += page->size(); -} - bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from, ZList* to) { ZPage* const page = from->last(); if (page == NULL || !cl->do_page(page)) { @@ -202,7 +205,6 @@ } // Flush page - _available -= page->size(); from->remove(page); to->insert_last(page); return true; @@ -239,6 +241,90 @@ flush_list(cl, &_large, to); flush_list(cl, &_medium, to); flush_per_numa_lists(cl, &_small, to); + + if (cl->_flushed > cl->_requested) { + // Overflushed, keep part of last page + const size_t overflushed = cl->_flushed - cl->_requested; + free_page(to->last()->split(overflushed)); + cl->_flushed -= overflushed; + } +} + +class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure { +public: + ZPageCacheFlushForAllocationClosure(size_t requested) : + ZPageCacheFlushClosure(requested) {} + + virtual bool do_page(const ZPage* page) { + if (_flushed < _requested) { + // Flush page + _flushed += page->size(); + return true; + } + + // Don't flush page + return false; + } +}; + +void ZPageCache::flush_for_allocation(size_t requested, ZList* to) { + EventZPageCacheFlush event; + + // Flush + ZPageCacheFlushForAllocationClosure cl(requested); + flush(&cl, to); + + // Send event + event.commit(requested, true /* for_allocation */); +} + +class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure { +private: + const uint64_t _now; + const uint64_t _delay; + uint64_t* _timeout; + +public: + ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay, uint64_t* timeout) : + ZPageCacheFlushClosure(requested), + _now(os::elapsedTime()), + _delay(delay), + _timeout(timeout) {} + + virtual bool do_page(const ZPage* page) { + const uint64_t expires = page->last_used() + _delay; + const uint64_t timeout = expires - MIN2(expires, _now); + + if (_flushed < _requested && timeout == 0) { + // Flush page + _flushed += page->size(); + return true; + } + + // Record shortest non-expired timeout + *_timeout = MIN2(*_timeout, timeout); + + // Don't flush page + return false; + } +}; + +size_t ZPageCache::flush_for_uncommit(size_t requested, uint64_t delay, uint64_t* timeout, ZList* to) { + if (requested == 0) { + // Nothing to flush + return 0; + } + + EventZPageCacheFlush event; + + // Flush + ZPageCacheFlushForUncommitClosure cl(requested, delay, timeout); + flush(&cl, to); + + // Send event + event.commit(requested, false /* for_allocation */); + + return cl._flushed; } void ZPageCache::pages_do(ZPageClosure* cl) const { --- old/src/hotspot/share/gc/z/zPageCache.hpp 2020-05-18 23:08:34.985840755 +0200 +++ new/src/hotspot/share/gc/z/zPageCache.hpp 2020-05-18 23:08:34.727832385 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,22 +27,11 @@ #include "gc/z/zList.hpp" #include "gc/z/zPage.hpp" #include "gc/z/zValue.hpp" -#include "memory/allocation.hpp" -class ZPageCacheFlushClosure : public StackObj { -protected: - const size_t _requested; - size_t _flushed; - -public: - ZPageCacheFlushClosure(size_t requested); - size_t overflushed() const; - virtual bool do_page(const ZPage* page) = 0; -}; +class ZPageCacheFlushClosure; class ZPageCache { private: - size_t _available; ZPerNUMA > _small; ZList _medium; ZList _large; @@ -55,21 +44,19 @@ ZPage* alloc_oversized_large_page(size_t size); ZPage* alloc_oversized_page(size_t size); - void free_page_inner(ZPage* page); - bool flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from, ZList* to); void flush_list(ZPageCacheFlushClosure* cl, ZList* from, ZList* to); void flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA >* from, ZList* to); + void flush(ZPageCacheFlushClosure* cl, ZList* to); public: ZPageCache(); - size_t available() const; - ZPage* alloc_page(uint8_t type, size_t size); void free_page(ZPage* page); - void flush(ZPageCacheFlushClosure* cl, ZList* to); + void flush_for_allocation(size_t requested, ZList* to); + size_t flush_for_uncommit(size_t requested, uint64_t delay, uint64_t* timeout, ZList* to); void pages_do(ZPageClosure* cl) const; }; --- old/src/hotspot/share/gc/z/zPhysicalMemory.cpp 2020-05-18 23:08:35.390853893 +0200 +++ new/src/hotspot/share/gc/z/zPhysicalMemory.cpp 2020-05-18 23:08:35.132845523 +0200 @@ -27,92 +27,216 @@ #include "gc/z/zLargePages.inline.hpp" #include "gc/z/zNUMA.inline.hpp" #include "gc/z/zPhysicalMemory.inline.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" #include "runtime/init.hpp" #include "runtime/os.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" ZPhysicalMemory::ZPhysicalMemory() : + _nsegments_max(0), _nsegments(0), _segments(NULL) {} ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) : + _nsegments_max(0), _nsegments(0), _segments(NULL) { add_segment(segment); } ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) : + _nsegments_max(0), _nsegments(0), _segments(NULL) { - - // Copy segments - for (size_t i = 0; i < pmem.nsegments(); i++) { - add_segment(pmem.segment(i)); - } + add_segments(pmem); } const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) { // Free segments delete [] _segments; _segments = NULL; + _nsegments_max = 0; _nsegments = 0; // Copy segments - for (size_t i = 0; i < pmem.nsegments(); i++) { - add_segment(pmem.segment(i)); - } + add_segments(pmem); return *this; } ZPhysicalMemory::~ZPhysicalMemory() { delete [] _segments; - _segments = NULL; - _nsegments = 0; } size_t ZPhysicalMemory::size() const { size_t size = 0; - for (size_t i = 0; i < _nsegments; i++) { + for (uint32_t i = 0; i < _nsegments; i++) { size += _segments[i].size(); } return size; } +void ZPhysicalMemory::insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed) { + assert(index <= _nsegments, "Invalid index"); + + ZPhysicalMemorySegment* const from_segments = _segments; + + if (_nsegments + 1 > _nsegments_max) { + // Resize array + _nsegments_max = round_up_power_of_2(_nsegments_max + 1); + _segments = new ZPhysicalMemorySegment[_nsegments_max]; + + // Copy segments before index + for (uint32_t i = 0; i < index; i++) { + _segments[i] = from_segments[i]; + } + } + + // Copy/Move segments after index + for (uint32_t i = _nsegments; i > index; i--) { + _segments[i] = from_segments[i - 1]; + } + + // Insert new segment + _segments[index] = ZPhysicalMemorySegment(start, size, committed); + _nsegments++; + + // Delete old array + if (from_segments != _segments) { + delete [] from_segments; + } +} + +void ZPhysicalMemory::replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed) { + assert(index < _nsegments, "Invalid index"); + _segments[index] = ZPhysicalMemorySegment(start, size, committed);; +} + +void ZPhysicalMemory::remove_segment(uint32_t index) { + assert(index < _nsegments, "Invalid index"); + + // Move segments after index + for (uint32_t i = index + 1; i < _nsegments; i++) { + _segments[i - 1] = _segments[i]; + } + + _nsegments--; +} + +void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) { + for (uint32_t i = 0; i < pmem.nsegments(); i++) { + add_segment(pmem.segment(i)); + } +} + +void ZPhysicalMemory::transfer_segments(ZPhysicalMemory& pmem) { + add_segments(pmem); + pmem = ZPhysicalMemory(); +} + +static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) { + return before.end() == after.start() && before.is_committed() == after.is_committed(); +} + void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) { - // Try merge with last segment - if (_nsegments > 0) { - ZPhysicalMemorySegment& last = _segments[_nsegments - 1]; - assert(last.end() <= segment.start(), "Segments added out of order"); - if (last.end() == segment.start()) { - last = ZPhysicalMemorySegment(last.start(), last.size() + segment.size()); + // Insert segments in address order, merge segments when possible + for (uint32_t i = _nsegments; i > 0; i--) { + const uint32_t current = i - 1; + + if (_segments[current].end() <= segment.start()) { + if (is_mergable(_segments[current], segment)) { + if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) { + // Merge with end of current segment and start of next segment + const size_t start = _segments[current].start(); + const size_t size = _segments[current].size() + segment.size() + _segments[current + 1].size(); + replace_segment(current, start, size, segment.is_committed()); + remove_segment(current + 1); + return; + } + + // Merge with end of current segment + const size_t start = _segments[current].start(); + const size_t size = _segments[current].size() + segment.size(); + replace_segment(current, start, size, segment.is_committed()); + return; + } else if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) { + // Merge with start of next segment + const size_t start = segment.start(); + const size_t size = segment.size() + _segments[current + 1].size(); + replace_segment(current + 1, start, size, segment.is_committed()); + return; + } + + // Insert after current segment + insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed()); return; } } - // Resize array - ZPhysicalMemorySegment* const old_segments = _segments; - _segments = new ZPhysicalMemorySegment[_nsegments + 1]; - for (size_t i = 0; i < _nsegments; i++) { - _segments[i] = old_segments[i]; + if (_nsegments > 0 && is_mergable(segment, _segments[0])) { + // Merge with start of first segment + const size_t start = segment.start(); + const size_t size = segment.size() + _segments[0].size(); + replace_segment(0, start, size, segment.is_committed()); + return; } - delete [] old_segments; - // Add new segment - _segments[_nsegments] = segment; - _nsegments++; + // Insert before first segment + insert_segment(0, segment.start(), segment.size(), segment.is_committed()); +} + +bool ZPhysicalMemory::commit_segment(uint32_t index, size_t size) { + assert(index < _nsegments, "Invalid index"); + assert(size <= _segments[index].size(), "Invalid size"); + assert(!_segments[index].is_committed(), "Invalid state"); + + if (size == _segments[index].size()) { + // Completely committed + _segments[index].set_committed(true); + return true; + } + + if (size > 0) { + // Partially committed, split segment + insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, false /* committed */); + replace_segment(index, _segments[index].start(), size, true /* committed */); + } + + return false; +} + +bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) { + assert(index < _nsegments, "Invalid index"); + assert(size <= _segments[index].size(), "Invalid size"); + assert(_segments[index].is_committed(), "Invalid state"); + + if (size == _segments[index].size()) { + // Completely uncommitted + _segments[index].set_committed(false); + return true; + } + + if (size > 0) { + // Partially uncommitted, split segment + insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, true /* committed */); + replace_segment(index, _segments[index].start(), size, false /* committed */); + } + + return false; } ZPhysicalMemory ZPhysicalMemory::split(size_t size) { ZPhysicalMemory pmem; - size_t nsegments = 0; + uint32_t nsegments = 0; - for (size_t i = 0; i < _nsegments; i++) { + for (uint32_t i = 0; i < _nsegments; i++) { const ZPhysicalMemorySegment& segment = _segments[i]; if (pmem.size() < size) { if (pmem.size() + segment.size() <= size) { @@ -121,8 +245,8 @@ } else { // Split segment const size_t split_size = size - pmem.size(); - pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size)); - _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size); + pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed())); + _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed()); } } else { // Keep segment @@ -135,25 +259,68 @@ return pmem; } +ZPhysicalMemory ZPhysicalMemory::split_committed() { + ZPhysicalMemory pmem; + uint32_t nsegments = 0; + + for (uint32_t i = 0; i < _nsegments; i++) { + const ZPhysicalMemorySegment& segment = _segments[i]; + if (segment.is_committed()) { + // Transfer segment + pmem.add_segment(segment); + } else { + // Keep segment + _segments[nsegments++] = segment; + } + } + + _nsegments = nsegments; + + return pmem; +} + ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) : _backing(max_capacity) { - // Register everything as uncommitted - _uncommitted.free(0, max_capacity); + // Make the whole range free + _manager.free(0, max_capacity); } bool ZPhysicalMemoryManager::is_initialized() const { return _backing.is_initialized(); } -void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const { - _backing.warn_commit_limits(max); +void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const { + _backing.warn_commit_limits(max_capacity); } -bool ZPhysicalMemoryManager::supports_uncommit() { +bool ZPhysicalMemoryManager::should_enable_uncommit(size_t min_capacity, size_t max_capacity) { assert(!is_init_completed(), "Invalid state"); - // Test if uncommit is supported by uncommitting and then re-committing a granule - return commit(uncommit(ZGranuleSize)) == ZGranuleSize; + // If uncommit is not explicitly disabled, max capacity is greater than + // min capacity, and uncommit is supported by the platform, then uncommit + // will be enabled. + if (!ZUncommit) { + log_info(gc, init)("Uncommit: Disabled"); + return false; + } + + if (max_capacity == min_capacity) { + log_info(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)"); + return false; + } + + // Test if uncommit is supported by the operating system by committing + // and then uncommitting the a granule. + ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */)); + if (!commit(pmem) || !uncommit(pmem)) { + log_info(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)"); + return false; + } + + log_info(gc, init)("Uncommit: Enabled"); + log_info(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay); + + return true; } void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const { @@ -172,86 +339,67 @@ } } -size_t ZPhysicalMemoryManager::commit(size_t size) { - size_t committed = 0; +void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) { + assert(is_aligned(size, ZGranuleSize), "Invalid size"); - // Fill holes in the backing memory - while (committed < size) { + // Allocate segments + while (size > 0) { size_t allocated = 0; - const size_t remaining = size - committed; - const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated); - if (start == UINTPTR_MAX) { - // No holes to commit - break; - } - - // Try commit hole - const size_t filled = _backing.commit(start, allocated); - if (filled > 0) { - // Successful or partialy successful - _committed.free(start, filled); - committed += filled; - } - if (filled < allocated) { - // Failed or partialy failed - _uncommitted.free(start + filled, allocated - filled); - return committed; - } + const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated); + assert(start != UINTPTR_MAX, "Allocation should never fail"); + pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */)); + size -= allocated; } - - return committed; } -size_t ZPhysicalMemoryManager::uncommit(size_t size) { - size_t uncommitted = 0; - - // Punch holes in backing memory - while (uncommitted < size) { - size_t allocated = 0; - const size_t remaining = size - uncommitted; - const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated); - assert(start != UINTPTR_MAX, "Allocation should never fail"); - - // Try punch hole - const size_t punched = _backing.uncommit(start, allocated); - if (punched > 0) { - // Successful or partialy successful - _uncommitted.free(start, punched); - uncommitted += punched; - } - if (punched < allocated) { - // Failed or partialy failed - _committed.free(start + punched, allocated - punched); - return uncommitted; - } +void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) { + // Free segments + for (uint32_t i = 0; i < pmem.nsegments(); i++) { + const ZPhysicalMemorySegment& segment = pmem.segment(i); + _manager.free(segment.start(), segment.size()); } - - return uncommitted; } -ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) { - assert(is_aligned(size, ZGranuleSize), "Invalid size"); - - ZPhysicalMemory pmem; +bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) { + // Commit segments + for (uint32_t i = 0; i < pmem.nsegments(); i++) { + const ZPhysicalMemorySegment& segment = pmem.segment(i); + if (segment.is_committed()) { + // Segment already committed + continue; + } - // Allocate segments - for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) { - const uintptr_t start = _committed.alloc_from_front(ZGranuleSize); - assert(start != UINTPTR_MAX, "Allocation should never fail"); - pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize)); + // Commit segment + const size_t committed = _backing.commit(segment.start(), segment.size()); + if (!pmem.commit_segment(i, committed)) { + // Failed or partially failed + return false; + } } - return pmem; + // Success + return true; } -void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) { - const size_t nsegments = pmem.nsegments(); - - // Free segments - for (size_t i = 0; i < nsegments; i++) { +bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) { + // Commit segments + for (uint32_t i = 0; i < pmem.nsegments(); i++) { const ZPhysicalMemorySegment& segment = pmem.segment(i); - _committed.free(segment.start(), segment.size()); + if (!segment.is_committed()) { + // Segment already uncommitted + continue; + } + + // Uncommit segment + const size_t uncommitted = _backing.uncommit(segment.start(), segment.size()); + if (!pmem.uncommit_segment(i, uncommitted)) { + // Failed or partially failed + return false; + } } + + // Success + return true; } void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const { @@ -259,14 +407,21 @@ os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); } -void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const { - const size_t nsegments = pmem.nsegments(); +bool ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const { size_t size = 0; // Map segments - for (size_t i = 0; i < nsegments; i++) { + for (uint32_t i = 0; i < pmem.nsegments(); i++) { const ZPhysicalMemorySegment& segment = pmem.segment(i); - _backing.map(addr + size, segment.size(), segment.start()); + if (!_backing.map(addr + size, segment.size(), segment.start())) { + // Failed to map segment + if (size > 0) { + // Unmap successfully mapped segments + _backing.unmap(addr, size); + } + + return false; + } size += segment.size(); } @@ -277,6 +432,9 @@ // fault time. os::numa_make_global((char*)addr, size); } + + // Success + return true; } void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const { @@ -295,18 +453,32 @@ } } -void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const { +bool ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const { if (ZVerifyViews) { // Map good view - map_view(pmem, ZAddress::good(offset)); + if (!map_view(pmem, ZAddress::good(offset))) { + fatal("Failed to map memory"); + } } else { // Map all views - map_view(pmem, ZAddress::marked0(offset)); - map_view(pmem, ZAddress::marked1(offset)); - map_view(pmem, ZAddress::remapped(offset)); + if (!map_view(pmem, ZAddress::marked0(offset))) { + return false; + } + if (!map_view(pmem, ZAddress::marked1(offset))) { + unmap_view(pmem, ZAddress::marked0(offset)); + return false; + } + if (!map_view(pmem, ZAddress::remapped(offset))) { + unmap_view(pmem, ZAddress::marked1(offset)); + unmap_view(pmem, ZAddress::marked0(offset)); + return false; + } } nmt_commit(pmem, offset); + + // Success + return true; } void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const { @@ -326,7 +498,9 @@ void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const { // Map good view assert(ZVerifyViews, "Should be enabled"); - map_view(pmem, ZAddress::good(offset)); + if (!map_view(pmem, ZAddress::good(offset))) { + fatal("Failed to map memory"); + } } void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const { --- old/src/hotspot/share/gc/z/zPhysicalMemory.hpp 2020-05-18 23:08:35.819867809 +0200 +++ new/src/hotspot/share/gc/z/zPhysicalMemory.hpp 2020-05-18 23:08:35.560859407 +0200 @@ -32,21 +32,30 @@ private: uintptr_t _start; uintptr_t _end; + bool _committed; public: ZPhysicalMemorySegment(); - ZPhysicalMemorySegment(uintptr_t start, size_t size); + ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed); uintptr_t start() const; uintptr_t end() const; size_t size() const; + + bool is_committed() const; + void set_committed(bool committed); }; class ZPhysicalMemory { private: - size_t _nsegments; + uint32_t _nsegments_max; + uint32_t _nsegments; ZPhysicalMemorySegment* _segments; + void insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed); + void replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed); + void remove_segment(uint32_t index); + public: ZPhysicalMemory(); ZPhysicalMemory(const ZPhysicalMemorySegment& segment); @@ -57,24 +66,30 @@ bool is_null() const; size_t size() const; - size_t nsegments() const; - const ZPhysicalMemorySegment& segment(size_t index) const; + uint32_t nsegments() const; + const ZPhysicalMemorySegment& segment(uint32_t index) const; + + void add_segments(const ZPhysicalMemory& pmem); + void transfer_segments(ZPhysicalMemory& pmem); + void add_segment(const ZPhysicalMemorySegment& segment); + bool commit_segment(uint32_t index, size_t size); + bool uncommit_segment(uint32_t index, size_t size); ZPhysicalMemory split(size_t size); + ZPhysicalMemory split_committed(); }; class ZPhysicalMemoryManager { private: ZPhysicalMemoryBacking _backing; - ZMemoryManager _committed; - ZMemoryManager _uncommitted; + ZMemoryManager _manager; void nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const; void nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const; void pretouch_view(uintptr_t addr, size_t size) const; - void map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const; + bool map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const; void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const; public: @@ -82,18 +97,18 @@ bool is_initialized() const; - void warn_commit_limits(size_t max) const; - bool supports_uncommit(); - - size_t commit(size_t size); - size_t uncommit(size_t size); + void warn_commit_limits(size_t max_capacity) const; + bool should_enable_uncommit(size_t min_capacity, size_t max_capacity); - ZPhysicalMemory alloc(size_t size); + void alloc(ZPhysicalMemory& pmem, size_t size); void free(const ZPhysicalMemory& pmem); + bool commit(ZPhysicalMemory& pmem); + bool uncommit(ZPhysicalMemory& pmem); + void pretouch(uintptr_t offset, size_t size) const; - void map(const ZPhysicalMemory& pmem, uintptr_t offset) const; + bool map(const ZPhysicalMemory& pmem, uintptr_t offset) const; void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const; void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const; --- old/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp 2020-05-18 23:08:36.235881304 +0200 +++ new/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp 2020-05-18 23:08:35.974872837 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,11 +29,13 @@ inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() : _start(UINTPTR_MAX), - _end(UINTPTR_MAX) {} + _end(UINTPTR_MAX), + _committed(false) {} -inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size) : +inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(uintptr_t start, size_t size, bool committed) : _start(start), - _end(start + size) {} + _end(start + size), + _committed(committed) {} inline uintptr_t ZPhysicalMemorySegment::start() const { return _start; @@ -47,15 +49,23 @@ return _end - _start; } +inline bool ZPhysicalMemorySegment::is_committed() const { + return _committed; +} + +inline void ZPhysicalMemorySegment::set_committed(bool committed) { + _committed = committed; +} + inline bool ZPhysicalMemory::is_null() const { return _nsegments == 0; } -inline size_t ZPhysicalMemory::nsegments() const { +inline uint32_t ZPhysicalMemory::nsegments() const { return _nsegments; } -inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(size_t index) const { +inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(uint32_t index) const { assert(index < _nsegments, "Invalid segment index"); return _segments[index]; } --- old/src/hotspot/share/gc/z/zUncommitter.cpp 2020-05-18 23:08:36.650894766 +0200 +++ new/src/hotspot/share/gc/z/zUncommitter.cpp 2020-05-18 23:08:36.391886365 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -56,9 +56,7 @@ void ZUncommitter::run_service() { for (;;) { // Try uncommit unused memory - const uint64_t timeout = ZHeap::heap()->uncommit(ZUncommitDelay); - - log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout); + const uint64_t timeout = ZHeap::heap()->uncommit(); // Idle until next attempt if (!idle(timeout)) { @@ -68,6 +66,10 @@ } void ZUncommitter::stop_service() { + // Cancel any ongoing uncommit + ZHeap::heap()->uncommit_cancel(); + + // Signal thread to stop MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag); _stop = true; ml.notify(); --- old/src/hotspot/share/gc/z/zVirtualMemory.cpp 2020-05-18 23:08:37.063908164 +0200 +++ new/src/hotspot/share/gc/z/zVirtualMemory.cpp 2020-05-18 23:08:36.805899794 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,14 +151,14 @@ return _initialized; } -ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool alloc_from_front) { +ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) { uintptr_t start; - if (alloc_from_front || size <= ZPageSizeSmall) { - // Small page + // Small pages are allocated at low addresses, while medium/large pages + // are allocated at high addresses (unless forced to be at a low address). + if (force_low_address || size <= ZPageSizeSmall) { start = _manager.alloc_from_front(size); } else { - // Medium/Large page start = _manager.alloc_from_back(size); } --- old/src/hotspot/share/gc/z/zVirtualMemory.hpp 2020-05-18 23:08:37.546923832 +0200 +++ new/src/hotspot/share/gc/z/zVirtualMemory.hpp 2020-05-18 23:08:37.215913095 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ bool is_initialized() const; - ZVirtualMemory alloc(size_t size, bool alloc_from_front = false); + ZVirtualMemory alloc(size_t size, bool force_low_address); void free(const ZVirtualMemory& vmem); }; --- old/src/hotspot/share/jfr/metadata/metadata.xml 2020-05-18 23:08:37.963937359 +0200 +++ new/src/hotspot/share/jfr/metadata/metadata.xml 2020-05-18 23:08:37.702928893 +0200 @@ -1001,11 +1001,11 @@ + + - - @@ -1046,8 +1046,6 @@ - - --- old/test/hotspot/gtest/gc/z/test_zForwarding.cpp 2020-05-18 23:08:38.515955266 +0200 +++ new/test/hotspot/gtest/gc/z/test_zForwarding.cpp 2020-05-18 23:08:38.184944528 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -140,7 +140,7 @@ static void test(void (*function)(ZForwarding*), uint32_t size) { // Create page const ZVirtualMemory vmem(0, ZPageSizeSmall); - const ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZPageSizeSmall)); + const ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZPageSizeSmall, true)); ZPage page(ZPageTypeSmall, vmem, pmem); page.reset(); --- old/test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp 2020-05-18 23:08:39.028971907 +0200 +++ new/test/hotspot/gtest/gc/z/test_zPhysicalMemory.cpp 2020-05-18 23:08:38.693961040 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,8 +26,8 @@ #include "unittest.hpp" TEST(ZPhysicalMemoryTest, copy) { - const ZPhysicalMemorySegment seg0(0, 100); - const ZPhysicalMemorySegment seg1(200, 100); + const ZPhysicalMemorySegment seg0(0, 100, true); + const ZPhysicalMemorySegment seg1(200, 100, true); ZPhysicalMemory pmem0; pmem0.add_segment(seg0); @@ -52,13 +52,13 @@ } TEST(ZPhysicalMemoryTest, segments) { - const ZPhysicalMemorySegment seg0(0, 1); - const ZPhysicalMemorySegment seg1(1, 1); - const ZPhysicalMemorySegment seg2(2, 1); - const ZPhysicalMemorySegment seg3(3, 1); - const ZPhysicalMemorySegment seg4(4, 1); - const ZPhysicalMemorySegment seg5(5, 1); - const ZPhysicalMemorySegment seg6(6, 1); + const ZPhysicalMemorySegment seg0(0, 1, true); + const ZPhysicalMemorySegment seg1(1, 1, true); + const ZPhysicalMemorySegment seg2(2, 1, true); + const ZPhysicalMemorySegment seg3(3, 1, true); + const ZPhysicalMemorySegment seg4(4, 1, true); + const ZPhysicalMemorySegment seg5(5, 1, true); + const ZPhysicalMemorySegment seg6(6, 1, true); ZPhysicalMemory pmem0; EXPECT_EQ(pmem0.nsegments(), 0u); @@ -116,9 +116,9 @@ TEST(ZPhysicalMemoryTest, split) { ZPhysicalMemory pmem; - pmem.add_segment(ZPhysicalMemorySegment(0, 10)); - pmem.add_segment(ZPhysicalMemorySegment(10, 10)); - pmem.add_segment(ZPhysicalMemorySegment(30, 10)); + pmem.add_segment(ZPhysicalMemorySegment(0, 10, true)); + pmem.add_segment(ZPhysicalMemorySegment(10, 10, true)); + pmem.add_segment(ZPhysicalMemorySegment(30, 10, true)); EXPECT_EQ(pmem.nsegments(), 2u); EXPECT_EQ(pmem.size(), 30u); @@ -140,3 +140,63 @@ EXPECT_EQ(pmem.nsegments(), 0u); EXPECT_EQ(pmem.size(), 0u); } + +TEST(ZPhysicalMemoryTest, split_committed) { + ZPhysicalMemory pmem0; + pmem0.add_segment(ZPhysicalMemorySegment(0, 10, true)); + pmem0.add_segment(ZPhysicalMemorySegment(10, 10, false)); + pmem0.add_segment(ZPhysicalMemorySegment(20, 10, true)); + pmem0.add_segment(ZPhysicalMemorySegment(30, 10, false)); + EXPECT_EQ(pmem0.nsegments(), 4u); + EXPECT_EQ(pmem0.size(), 40u); + + ZPhysicalMemory pmem1 = pmem0.split_committed(); + EXPECT_EQ(pmem0.nsegments(), 2u); + EXPECT_EQ(pmem0.size(), 20u); + EXPECT_EQ(pmem1.nsegments(), 2u); + EXPECT_EQ(pmem1.size(), 20u); +} + +TEST(ZPhysicalMemoryTest, transfer0) { + ZPhysicalMemory pmem0; + ZPhysicalMemory pmem1; + + pmem0.add_segment(ZPhysicalMemorySegment(10, 10, true)); + pmem0.add_segment(ZPhysicalMemorySegment(30, 10, true)); + pmem0.add_segment(ZPhysicalMemorySegment(50, 10, true)); + EXPECT_EQ(pmem0.nsegments(), 3u); + EXPECT_EQ(pmem0.size(), 30u); + + pmem1.add_segment(ZPhysicalMemorySegment(20, 10, true)); + pmem1.add_segment(ZPhysicalMemorySegment(40, 10, true)); + pmem1.add_segment(ZPhysicalMemorySegment(60, 10, true)); + EXPECT_EQ(pmem1.nsegments(), 3u); + EXPECT_EQ(pmem1.size(), 30u); + + pmem0.transfer_segments(pmem1); + EXPECT_EQ(pmem0.nsegments(), 1u); + EXPECT_EQ(pmem0.size(), 60u); + EXPECT_TRUE(pmem1.is_null()); +} + +TEST(ZPhysicalMemoryTest, transfer1) { + ZPhysicalMemory pmem0; + ZPhysicalMemory pmem1; + + pmem0.add_segment(ZPhysicalMemorySegment(10, 10, true)); + pmem0.add_segment(ZPhysicalMemorySegment(30, 10, true)); + pmem0.add_segment(ZPhysicalMemorySegment(50, 10, true)); + EXPECT_EQ(pmem0.nsegments(), 3u); + EXPECT_EQ(pmem0.size(), 30u); + + pmem1.add_segment(ZPhysicalMemorySegment(20, 10, false)); + pmem1.add_segment(ZPhysicalMemorySegment(40, 10, false)); + pmem1.add_segment(ZPhysicalMemorySegment(60, 10, false)); + EXPECT_EQ(pmem1.nsegments(), 3u); + EXPECT_EQ(pmem1.size(), 30u); + + pmem0.transfer_segments(pmem1); + EXPECT_EQ(pmem0.nsegments(), 6u); + EXPECT_EQ(pmem0.size(), 60u); + EXPECT_TRUE(pmem1.is_null()); +} --- old/test/hotspot/jtreg/gc/z/TestUncommit.java 2020-05-18 23:08:39.519987835 +0200 +++ new/test/hotspot/jtreg/gc/z/TestUncommit.java 2020-05-18 23:08:39.195977325 +0200 @@ -25,18 +25,13 @@ /* * @test TestUncommit - * @requires vm.gc.Z & !vm.graal.enabled & vm.compMode != "Xcomp" + * @requires vm.gc.Z & !vm.graal.enabled * @summary Test ZGC uncommit unused memory * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit true 2 * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms512M -Xmx512M -XX:ZUncommitDelay=10 gc.z.TestUncommit false 1 * @run main/othervm -XX:+UseZGC -Xlog:gc*,gc+stats=off -Xms128M -Xmx512M -XX:ZUncommitDelay=10 -XX:-ZUncommit gc.z.TestUncommit false 1 */ -/* - * This test is disabled when running with -Xcomp, since it seems to affect - * the timing of the test, causing memory to appear to be uncommitted too fast. - */ - import java.util.ArrayList; public class TestUncommit { --- old/src/hotspot/share/gc/z/zPageCache.inline.hpp 2020-05-18 23:08:39.935001297 +0200 +++ /dev/null 2020-05-08 10:28:18.126332474 +0200 @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_ZPAGECACHE_INLINE_HPP -#define SHARE_GC_Z_ZPAGECACHE_INLINE_HPP - -#include "gc/z/zList.inline.hpp" -#include "gc/z/zPageCache.hpp" -#include "gc/z/zValue.inline.hpp" - -inline size_t ZPageCache::available() const { - return _available; -} - -#endif // SHARE_GC_Z_ZPAGECACHE_INLINE_HPP