< prev index next >

src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp

Print this page

        

*** 31,40 **** --- 31,41 ---- #include "gc/z/zPhysicalMemoryBacking_linux.hpp" #include "gc/z/zSyscall_linux.hpp" #include "logging/log.hpp" #include "runtime/init.hpp" #include "runtime/os.hpp" + #include "runtime/stubRoutines.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/growableArray.hpp" #include <fcntl.h>
*** 388,398 **** // Success return 0; } ! ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap(size_t offset, size_t length, bool touch) const { // On hugetlbfs, mapping a file segment will fail immediately, without // the need to touch the mapped pages first, if there aren't enough huge // pages available to back the mapping. void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); if (addr == MAP_FAILED) { --- 389,399 ---- // Success return 0; } ! ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(size_t offset, size_t length, bool touch) const { // On hugetlbfs, mapping a file segment will fail immediately, without // the need to touch the mapped pages first, if there aren't enough huge // pages available to back the mapping. void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); if (addr == MAP_FAILED) {
*** 418,427 **** --- 419,472 ---- // Success return 0; } + static bool is_mapping_backed_by_memory(void* addr, size_t length, size_t page_size) { + char* const start = (char*)addr; + char* const end = start + length; + + // Touch pages to make sure the mapping is backed. If the mapping can't + // be backed we'll get a SIGBUS, which is why we're using SafeFetch32. + // On tmpfs, doing a fetch (as opposed to a store) is enough to cause + // the backing pages to be allocated. + for (char *p = start; p < end; p += page_size) { + if (SafeFetch32((int*)p, -1) == -1) { + return false; + } + } + + return true; + } + + ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(size_t offset, size_t length) const { + // On tmpfs, we need to touch the mapped pages to figure out + // if there are enough pages available to back the mapping. + void* const addr = mmap(0, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, offset); + if (addr == MAP_FAILED) { + // Failed + return errno; + } + + // Back the mapping with transparent huge pages + os::realign_memory((char*)addr, length, os::large_page_size()); + + // Check if the mapping can be backed by memory + const bool backed = is_mapping_backed_by_memory(addr, length, _block_size); + + // Unmap again. From now on, if the mapping was backed, the pages that + // were mapped are allocated to this file. There's no risk in getting + // SIGBUS when touching them. + if (munmap(addr, length) == -1) { + // Failed + return errno; + } + + // Success + return backed ? 0 : ENOMEM; + } + ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(size_t offset, size_t length) const { uint8_t data = 0; // Allocate backing memory by writing to each block for (size_t pos = offset; pos < offset + length; pos += _block_size) {
*** 436,446 **** } ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) { // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs // since Linux 4.3. When fallocate(2) is not supported we emulate it using ! // ftruncate/pwrite (for tmpfs) or ftruncate/mmap/munmap (for hugetlbfs). const size_t end = offset + length; if (end > _size) { // Increase file size const ZErrno err = fallocate_compat_ftruncate(end); --- 481,492 ---- } ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(size_t offset, size_t length) { // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs // since Linux 4.3. When fallocate(2) is not supported we emulate it using ! // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite ! // (for tmpfs without transparent huge pages and other filesystem types). const size_t end = offset + length; if (end > _size) { // Increase file size const ZErrno err = fallocate_compat_ftruncate(end);
*** 449,460 **** return err; } } // Allocate backing memory ! const ZErrno err = is_hugetlbfs() ? fallocate_compat_mmap(offset, length, false /* touch */) ! : fallocate_compat_pwrite(offset, length); if (err) { if (end > _size) { // Restore file size fallocate_compat_ftruncate(_size); } --- 495,510 ---- return err; } } // Allocate backing memory ! const ZErrno err = ZLargePages::is_explicit() ? ! fallocate_compat_mmap_hugetlbfs(offset, length, false /* touch */) : ! (ZLargePages::is_transparent() ? ! fallocate_compat_mmap_tmpfs(offset, length) : ! fallocate_compat_pwrite(offset, length)); ! if (err) { if (end > _size) { // Restore file size fallocate_compat_ftruncate(_size); }
*** 493,503 **** ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) { // Using compat mode is more efficient when allocating space on hugetlbfs. // Note that allocating huge pages this way will only reserve them, and not // associate them with segments of the file. We must guarantee that we at // some point touch these segments, otherwise we can not punch hole in them. ! if (z_fallocate_supported && !is_hugetlbfs()) { const ZErrno err = fallocate_fill_hole_syscall(offset, length); if (!err) { // Success return 0; } --- 543,553 ---- ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(size_t offset, size_t length) { // Using compat mode is more efficient when allocating space on hugetlbfs. // Note that allocating huge pages this way will only reserve them, and not // associate them with segments of the file. We must guarantee that we at // some point touch these segments, otherwise we can not punch hole in them. ! if (z_fallocate_supported && !ZLargePages::is_enabled()) { const ZErrno err = fallocate_fill_hole_syscall(offset, length); if (!err) { // Success return 0; }
*** 514,529 **** return fallocate_fill_hole_compat(offset, length); } ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) { ! if (is_hugetlbfs()) { // We can only punch hole in pages that have been touched. Non-touched // pages are only reserved, and not associated with any specific file // segment. We don't know which pages have been previously touched, so // we always touch them here to guarantee that we can punch hole. ! const ZErrno err = fallocate_compat_mmap(offset, length, true /* touch */); if (err) { // Failed return err; } } --- 564,579 ---- return fallocate_fill_hole_compat(offset, length); } ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(size_t offset, size_t length) { ! if (ZLargePages::is_explicit()) { // We can only punch hole in pages that have been touched. Non-touched // pages are only reserved, and not associated with any specific file // segment. We don't know which pages have been previously touched, so // we always touch them here to guarantee that we can punch hole. ! const ZErrno err = fallocate_compat_mmap_hugetlbfs(offset, length, true /* touch */); if (err) { // Failed return err; } }
*** 580,590 **** offset / M, (offset + length) / M, length / M); retry: const ZErrno err = fallocate(false /* punch_hole */, offset, length); if (err) { ! if (err == ENOSPC && !is_init_completed() && is_hugetlbfs() && z_fallocate_hugetlbfs_attempts-- > 0) { // If we fail to allocate during initialization, due to lack of space on // the hugetlbfs filesystem, then we wait and retry a few times before // giving up. Otherwise there is a risk that running JVMs back-to-back // will fail, since there is a delay between process termination and the // huge pages owned by that process being returned to the huge page pool --- 630,640 ---- offset / M, (offset + length) / M, length / M); retry: const ZErrno err = fallocate(false /* punch_hole */, offset, length); if (err) { ! if (err == ENOSPC && !is_init_completed() && ZLargePages::is_explicit() && z_fallocate_hugetlbfs_attempts-- > 0) { // If we fail to allocate during initialization, due to lack of space on // the hugetlbfs filesystem, then we wait and retry a few times before // giving up. Otherwise there is a risk that running JVMs back-to-back // will fail, since there is a delay between process termination and the // huge pages owned by that process being returned to the huge page pool
< prev index next >