--- /dev/null 2019-03-01 09:48:43.932982607 +0000 +++ new/src/hotspot/os_cpu/linux_aarch64/gc/z/zPhysicalMemoryBacking_linux_aarch64.cpp 2019-03-14 17:07:50.466857288 +0000 @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zErrno.hpp" +#include "gc/z/zErrno.hpp" +#include "gc/z/zLargePages.inline.hpp" +#include "gc/z/zMemory.hpp" +#include "gc/z/zNUMA.hpp" +#include "gc/z/zPhysicalMemory.inline.hpp" +#include "gc/z/zPhysicalMemoryBacking_linux_aarch64.hpp" +#include "logging/log.hpp" +#include "runtime/os.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +#include +#include +#include + +// Support for building on older Linux systems +#ifndef MADV_HUGEPAGE +#define MADV_HUGEPAGE 14 +#endif + +// Proc file entry for max map mount +#define ZFILENAME_PROC_MAX_MAP_COUNT "/proc/sys/vm/max_map_count" + +ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity) { + // Check and warn if max map count is too low + check_max_map_count(max_capacity); +} + +void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity) const { + const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT; + FILE* const file = fopen(filename, "r"); + if (file == NULL) { + // Failed to open file, skip check + log_debug(gc, init)("Failed to open %s", filename); + return; + } + + size_t actual_max_map_count = 0; + const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count); + fclose(file); + if (result != 1) { + // Failed to read file, skip check + log_debug(gc, init)("Failed to read %s", filename); + return; + } + + // The required max map count is impossible to calculate exactly since subsystems + // other than ZGC are also creating memory mappings, and we have no control over that. + // However, ZGC tends to create the most mappings and dominate the total count. + // In the worst cases, We speculate that we need another 20% to allow for + // non-ZGC subsystems to map memory. + const size_t required_max_map_count = (max_capacity / ZGranuleSize) * 1.2; + if (actual_max_map_count < required_max_map_count) { + log_warning(gc, init)("***** WARNING! INCORRECT SYSTEM CONFIGURATION DETECTED! *****"); + log_warning(gc, init)("The system limit on number of memory mappings per process might be too low " + "for the given"); + log_warning(gc, init)("max Java heap size (" SIZE_FORMAT "M). Please adjust %s to allow for at", + max_capacity / M, filename); + log_warning(gc, init)("least " SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). Continuing " + "execution with the current", required_max_map_count, actual_max_map_count); + log_warning(gc, init)("limit could lead to a fatal error, due to failure to map memory."); + } +} + +size_t ZPhysicalMemoryBacking::try_expand(size_t old_capacity, size_t new_capacity) { + assert(old_capacity < new_capacity, "Invalid old/new capacity"); + return new_capacity; +} + +ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) { + assert(is_aligned(size, ZGranuleSize), "Invalid size"); + return ZPhysicalMemory(size); +} + +void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) { + assert(pmem.nsegments() == 1, "Invalid number of segments"); +} + +void ZPhysicalMemoryBacking::map_failed(ZErrno err) const { + if (err == ENOMEM) { + fatal("Failed to map memory. Please check the system limit on number of " + "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT); + } else { + fatal("Failed to map memory (%s)", err.to_string()); + } +} + +uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const { + // We only have one heap mapping, so just convert the offset to a heap address + return ZAddress::address(offset); +} + +void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const { + assert(pmem.nsegments() == 1, "Invalid number of segments"); + uintptr_t addr = ZAddress::address(offset); + + const size_t size = pmem.size(); + int flags = MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE; + + if (ZLargePages::is_explicit()) { + flags |= MAP_HUGETLB; + } + + const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, flags, 0, 0); + + if (res == MAP_FAILED) { + ZErrno err; + map_failed(err); + } + + // Advise on use of transparent huge pages before touching it + if (ZLargePages::is_transparent()) { + if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) { + ZErrno err; + log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string()); + } + } + + // NUMA interleave memory before touching it + ZNUMA::memory_interleave(addr, size); + + if (AlwaysPreTouch) { + const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size(); + os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); + } +} + +void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const { + const size_t size = pmem.size(); + const uintptr_t addr = ZAddress::address(offset); + + const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); + if (res == MAP_FAILED) { + ZErrno err; + map_failed(err); + } +} + +void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const { + // Does nothing when using VA-masking +}