--- old/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2020-05-15 01:14:18.260897914 +0200 +++ new/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp 2020-05-15 01:14:18.001889500 +0200 @@ -6415,6 +6415,14 @@ StubRoutines::_dtan = generate_libmTan(); } } + + // Safefetch stubs. + generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, + &StubRoutines::_safefetch32_fault_pc, + &StubRoutines::_safefetch32_continuation_pc); + generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, + &StubRoutines::_safefetchN_fault_pc, + &StubRoutines::_safefetchN_continuation_pc); } void generate_all() { @@ -6534,14 +6542,6 @@ StubRoutines::_base64_encodeBlock = generate_base64_encodeBlock(); } - // Safefetch stubs. - generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry, - &StubRoutines::_safefetch32_fault_pc, - &StubRoutines::_safefetch32_continuation_pc); - generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry, - &StubRoutines::_safefetchN_fault_pc, - &StubRoutines::_safefetchN_continuation_pc); - BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); if (bs_nm != NULL) { StubRoutines::x86::_method_entry_barrier = generate_method_entry_barrier(); --- old/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp 2020-05-15 01:14:18.826916303 +0200 +++ new/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp 2020-05-15 01:14:18.569907953 +0200 @@ -33,6 +33,7 @@ #include "logging/log.hpp" #include "runtime/init.hpp" #include "runtime/os.hpp" +#include "runtime/stubRoutines.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/growableArray.hpp" @@ -390,7 +391,7 @@ return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap(size_t offset, size_t length, bool touch) const { +ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const { // On hugetlbfs, mapping a file segment will fail immediately, without // the need to touch the mapped pages first, if there aren't enough huge // pages available to back the mapping. @@ -420,6 +421,50 @@ return 0; } +static bool is_mapping_backed_by_memory(void* addr, size_t length, size_t page_size) { + char* const start = (char*)addr; + char* const end = start + length; + + // Touch pages to make sure the mapping is backed. If the mapping can't + // be backed we'll get a SIGBUS, which is why we're using SafeFetch32. + // On tmpfs, doing a fetch (as opposed to a store) is enough to cause + // the backing pages to be allocated. + for (char *p = start; p < end; p += page_size) { + if (SafeFetch32((int*)p, -1) == -1) { + return false; + } + } + + return true; +} + +ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const { + // On tmpfs, we need to touch the mapped pages to figure out + // if there are enough pages available to back the mapping. + void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); + if (addr == MAP_FAILED) { + // Failed + return errno; + } + + // Back the mapping with transparent huge pages + os::realign_memory((char*)addr, length, os::large_page_size()); + + // Check if the mapping can be backed by memory + const bool backed = is_mapping_backed_by_memory(addr, length, _block_size); + + // Unmap again. From now on, if the mapping was backed, the pages that + // were mapped are allocated to this file. There's no risk in getting + // SIGBUS when touching them. + if (munmap(addr, length) == -1) { + // Failed + return errno; + } + + // Success + return backed ? 0 : ENOMEM; +} + ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const { uint8_t data = 0; @@ -438,7 +483,8 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) { // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs // since Linux 4.3. When fallocate(2) is not supported we emulate it using - // ftruncate/pwrite (for tmpfs) or ftruncate/mmap/munmap (for hugetlbfs). + // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite + // (for tmpfs without transparent huge pages and other filesystem types). const size_t end = offset + length; if (end > _size) { @@ -451,8 +497,12 @@ } // Allocate backing memory - const ZErrno err = is_hugetlbfs() ? fallocate_compat_mmap(offset, length, false /* touch */) - : fallocate_compat_pwrite(offset, length); + const ZErrno err = ZLargePages::is_explicit() ? + fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */) : + (ZLargePages::is_transparent() ? + fallocate_compat_mmap_tmpfs(offset, length) : + fallocate_compat_pwrite(offset, length)); + if (err) { if (end > _size) { // Restore file size @@ -495,7 +545,7 @@ // Note that allocating huge pages this way will only reserve them, and not // associate them with segments of the file. We must guarantee that we at // some point touch these segments, otherwise we can not punch hole in them. - if (z_fallocate_supported && !is_hugetlbfs()) { + if (z_fallocate_supported && !ZLargePages::is_enabled()) { const ZErrno err = fallocate_fill_hole_syscall(offset, length); if (!err) { // Success @@ -516,12 +566,12 @@ } ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) { - if (is_hugetlbfs()) { + if (ZLargePages::is_explicit()) { // We can only punch hole in pages that have been touched. Non-touched // pages are only reserved, and not associated with any specific file // segment. We don't know which pages have been previously touched, so // we always touch them here to guarantee that we can punch hole. - const ZErrno err = fallocate_compat_mmap(offset, length, true /* touch */); + const ZErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */); if (err) { // Failed return err; @@ -582,7 +632,7 @@ retry: const ZErrno err = fallocate(false /* punch_hole */, offset, length); if (err) { - if (err == ENOSPC && !is_init_completed() && is_hugetlbfs() && z_fallocate_hugetlbfs_attempts-- > 0) { + if (err == ENOSPC && !is_init_completed() && ZLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) { // If we fail to allocate during initialization, due to lack of space on // the hugetlbfs filesystem, then we wait and retry a few times before // giving up. Otherwise there is a risk that running JVMs back-to-back --- old/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp 2020-05-15 01:14:19.261930435 +0200 +++ new/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp 2020-05-15 01:14:19.005922118 +0200 @@ -47,7 +47,8 @@ bool tmpfs_supports_transparent_huge_pages() const; ZErrno fallocate_compat_ftruncate(size_t size) const; - ZErrno fallocate_compat_mmap(size_t offset, size_t length, bool reserve_only) const; + ZErrno fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const; + ZErrno fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const; ZErrno fallocate_compat_pwrite(size_t offset, size_t length) const; ZErrno fallocate_fill_hole_compat(size_t offset, size_t length); ZErrno fallocate_fill_hole_syscall(size_t offset, size_t length); --- old/src/hotspot/share/gc/z/zPhysicalMemory.cpp 2020-05-15 01:14:19.674943853 +0200 +++ new/src/hotspot/share/gc/z/zPhysicalMemory.cpp 2020-05-15 01:14:19.419935568 +0200 @@ -284,11 +284,6 @@ // fault time. os::numa_make_global((char*)addr, size); } - - // Setup transparent large pages - if (ZLargePages::is_transparent()) { - os::realign_memory((char*)addr, size, os::large_page_size()); - } } void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {