1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zAddress.inline.hpp"
  26 #include "gc/z/zErrno.hpp"
  27 #include "gc/z/zLargePages.inline.hpp"
  28 #include "gc/z/zMemory.hpp"
  29 #include "gc/z/zNUMA.hpp"
  30 #include "gc/z/zPhysicalMemory.inline.hpp"
  31 #include "logging/log.hpp"
  32 #include "runtime/os.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "zBackingFile_linux_x86.hpp"
  36 #include "zPhysicalMemoryBacking_linux_x86.hpp"
  37 
  38 #include <stdio.h>
  39 #include <sys/mman.h>
  40 #include <sys/types.h>
  41 
  42 // Support for building on older Linux systems
  43 #ifndef MADV_HUGEPAGE
  44 #define MADV_HUGEPAGE                        14
  45 #endif
  46 
  47 // Proc file entry for max map mount
  48 #define ZFILENAME_PROC_MAX_MAP_COUNT         "/proc/sys/vm/max_map_count"
  49 
  50 ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity, size_t granule_size) :
  51     _manager(),
  52     _file(),
  53     _granule_size(granule_size) {
  54 
  55   // Check and warn if max map count seems too low
  56   check_max_map_count(max_capacity, granule_size);
  57 }
  58 
  59 void ZPhysicalMemoryBacking::check_max_map_count(size_t max_capacity, size_t granule_size) const {
  60   const char* const filename = ZFILENAME_PROC_MAX_MAP_COUNT;
  61   FILE* const file = fopen(filename, "r");
  62   if (file == NULL) {
  63     // Failed to open file, skip check
  64     log_debug(gc)("Failed to open %s", filename);
  65     return;
  66   }
  67 
  68   size_t actual_max_map_count = 0;
  69   const int result = fscanf(file, SIZE_FORMAT, &actual_max_map_count);
  70   fclose(file);
  71   if (result != 1) {
  72     // Failed to read file, skip check
  73     log_debug(gc)("Failed to read %s", filename);
  74     return;
  75   }
  76 
  77   // The required max map count is impossible to calculate exactly since subsystems
  78   // other than ZGC are also creating memory mappings, and we have no control over that.
  79   // However, ZGC tends to create the most mappings and dominate the total count.
  80   // In the worst cases, ZGC will map each granule three times, i.e. once per heap view.
  81   // We speculate that we need another 20% to allow for non-ZGC subsystems to map memory.
  82   const size_t required_max_map_count = (max_capacity / granule_size) * 3 * 1.2;
  83   if (actual_max_map_count < required_max_map_count) {
  84     log_warning(gc)("The system limit on number of memory mappings "
  85                     "per process might be too low for the given");
  86     log_warning(gc)("Java heap size (" SIZE_FORMAT "M). Please "
  87                     "adjust %s to allow for at least", max_capacity / M, filename);
  88     log_warning(gc)(SIZE_FORMAT " mappings (current limit is " SIZE_FORMAT "). "
  89                     "Continuing execution with the current limit could",
  90                     required_max_map_count, actual_max_map_count);
  91     log_warning(gc)("lead to a fatal error down the line, due to failed "
  92                     "attempts to map memory.");
  93   }
  94 }
  95 
  96 bool ZPhysicalMemoryBacking::is_initialized() const {
  97   return _file.is_initialized();
  98 }
  99 
 100 bool ZPhysicalMemoryBacking::expand(size_t from, size_t to) {
 101   const size_t size = to - from;
 102 
 103   // Expand
 104   if (!_file.expand(from, size)) {
 105     return false;
 106   }
 107 
 108   // Add expanded space to free list
 109   _manager.free(from, size);
 110 
 111   return true;
 112 }
 113 
 114 ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
 115   assert(is_aligned(size, _granule_size), "Invalid size");
 116 
 117   ZPhysicalMemory pmem;
 118 
 119   // Allocate segments
 120   for (size_t allocated = 0; allocated < size; allocated += _granule_size) {
 121     const uintptr_t start = _manager.alloc_from_front(_granule_size);
 122     assert(start != UINTPTR_MAX, "Allocation should never fail");
 123     pmem.add_segment(ZPhysicalMemorySegment(start, _granule_size));
 124   }
 125 
 126   return pmem;
 127 }
 128 
 129 void ZPhysicalMemoryBacking::free(ZPhysicalMemory pmem) {
 130   const size_t nsegments = pmem.nsegments();
 131 
 132   // Free segments
 133   for (size_t i = 0; i < nsegments; i++) {
 134     const ZPhysicalMemorySegment segment = pmem.segment(i);
 135     _manager.free(segment.start(), segment.size());
 136   }
 137 }
 138 
 139 void ZPhysicalMemoryBacking::map_failed(ZErrno err) const {
 140   if (err == ENOMEM) {
 141     fatal("Failed to map memory. Please check the system limit on number of "
 142           "memory mappings allowed per process (see %s)", ZFILENAME_PROC_MAX_MAP_COUNT);
 143   } else {
 144     fatal("Failed to map memory (%s)", err.to_string());
 145   }
 146 }
 147 
 148 void ZPhysicalMemoryBacking::advise_view(uintptr_t addr, size_t size) const {
 149   if (madvise((void*)addr, size, MADV_HUGEPAGE) == -1) {
 150     ZErrno err;
 151     log_error(gc)("Failed to advise use of transparent huge pages (%s)", err.to_string());
 152   }
 153 }
 154 
 155 void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
 156   const size_t page_size = ZLargePages::is_explicit() ? os::large_page_size() : os::vm_page_size();
 157   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 158 }
 159 
 160 void ZPhysicalMemoryBacking::map_view(ZPhysicalMemory pmem, uintptr_t addr, bool pretouch) const {
 161   const size_t nsegments = pmem.nsegments();
 162 
 163   // Map segments
 164   for (size_t i = 0; i < nsegments; i++) {
 165     const ZPhysicalMemorySegment segment = pmem.segment(i);
 166     const size_t size = segment.size();
 167     const void* const res = mmap((void*)addr, size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _file.fd(), segment.start());
 168     if (res == MAP_FAILED) {
 169       ZErrno err;
 170       map_failed(err);
 171     }
 172 
 173     // Advise on use of transparent huge pages before touching it
 174     if (ZLargePages::is_transparent()) {
 175       advise_view(addr, size);
 176     }
 177 
 178     // NUMA interleave memory before touching it
 179     ZNUMA::memory_interleave(addr, size);
 180 
 181     if (pretouch) {
 182       pretouch_view(addr, size);
 183     }
 184 
 185     addr += size;
 186   }
 187 }
 188 
 189 void ZPhysicalMemoryBacking::unmap_view(ZPhysicalMemory pmem, uintptr_t addr) const {
 190   // Note that we must keep the address space reservation intact and just detach
 191   // the backing memory. For this reason we map a new anonymous, non-accessible
 192   // and non-reserved page over the mapping instead of actually unmapping.
 193   const size_t size = pmem.size();
 194   const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
 195   if (res == MAP_FAILED) {
 196     ZErrno err;
 197     map_failed(err);
 198   }
 199 }
 200 
 201 uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
 202   // From an NMT point of view we treat the first heap mapping (marked0) as committed
 203   return ZAddress::marked0(offset);
 204 }
 205 
 206 void ZPhysicalMemoryBacking::map(ZPhysicalMemory pmem, uintptr_t offset) const {
 207   if (ZUnmapBadViews) {
 208     // Only map the good view, for debugging only
 209     map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
 210   } else {
 211     // Map all views
 212     map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
 213     map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
 214     map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
 215   }
 216 }
 217 
 218 void ZPhysicalMemoryBacking::unmap(ZPhysicalMemory pmem, uintptr_t offset) const {
 219   if (ZUnmapBadViews) {
 220     // Only map the good view, for debugging only
 221     unmap_view(pmem, ZAddress::good(offset));
 222   } else {
 223     // Unmap all views
 224     unmap_view(pmem, ZAddress::marked0(offset));
 225     unmap_view(pmem, ZAddress::marked1(offset));
 226     unmap_view(pmem, ZAddress::remapped(offset));
 227   }
 228 }
 229 
 230 void ZPhysicalMemoryBacking::flip(ZPhysicalMemory pmem, uintptr_t offset) const {
 231   assert(ZUnmapBadViews, "Should be enabled");
 232   const uintptr_t addr_good = ZAddress::good(offset);
 233   const uintptr_t addr_bad = ZAddress::is_marked(ZAddressGoodMask) ? ZAddress::remapped(offset) : ZAddress::marked(offset);
 234   // Map/Unmap views
 235   map_view(pmem, addr_good, false /* pretouch */);
 236   unmap_view(pmem, addr_bad);
 237 }