1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zAddress.inline.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zLargePages.inline.hpp"
  28 #include "gc/z/zNUMA.inline.hpp"
  29 #include "gc/z/zPhysicalMemory.inline.hpp"
  30 #include "runtime/init.hpp"
  31 #include "runtime/os.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 ZPhysicalMemory::ZPhysicalMemory() :
  38     _nsegments(0),
  39     _segments(NULL) {}
  40 
  41 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
  42     _nsegments(0),
  43     _segments(NULL) {
  44   add_segment(segment);
  45 }
  46 
  47 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
  48     _nsegments(0),
  49     _segments(NULL) {
  50 
  51   // Copy segments
  52   for (size_t i = 0; i < pmem.nsegments(); i++) {
  53     add_segment(pmem.segment(i));
  54   }
  55 }
  56 
  57 const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
  58   // Free segments
  59   delete [] _segments;
  60   _segments = NULL;
  61   _nsegments = 0;
  62 
  63   // Copy segments
  64   for (size_t i = 0; i < pmem.nsegments(); i++) {
  65     add_segment(pmem.segment(i));
  66   }
  67 
  68   return *this;
  69 }
  70 
  71 ZPhysicalMemory::~ZPhysicalMemory() {
  72   delete [] _segments;
  73   _segments = NULL;
  74   _nsegments = 0;
  75 }
  76 
  77 size_t ZPhysicalMemory::size() const {
  78   size_t size = 0;
  79 
  80   for (size_t i = 0; i < _nsegments; i++) {
  81     size += _segments[i].size();
  82   }
  83 
  84   return size;
  85 }
  86 
  87 void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
  88   // Try merge with last segment
  89   if (_nsegments > 0) {
  90     ZPhysicalMemorySegment& last = _segments[_nsegments - 1];
  91     assert(last.end() <= segment.start(), "Segments added out of order");
  92     if (last.end() == segment.start()) {
  93       last = ZPhysicalMemorySegment(last.start(), last.size() + segment.size());
  94       return;
  95     }
  96   }
  97 
  98   // Resize array
  99   ZPhysicalMemorySegment* const old_segments = _segments;
 100   _segments = new ZPhysicalMemorySegment[_nsegments + 1];
 101   for (size_t i = 0; i < _nsegments; i++) {
 102     _segments[i] = old_segments[i];
 103   }
 104   delete [] old_segments;
 105 
 106   // Add new segment
 107   _segments[_nsegments] = segment;
 108   _nsegments++;
 109 }
 110 
 111 ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
 112   ZPhysicalMemory pmem;
 113   size_t nsegments = 0;
 114 
 115   for (size_t i = 0; i < _nsegments; i++) {
 116     const ZPhysicalMemorySegment& segment = _segments[i];
 117     if (pmem.size() < size) {
 118       if (pmem.size() + segment.size() <= size) {
 119         // Transfer segment
 120         pmem.add_segment(segment);
 121       } else {
 122         // Split segment
 123         const size_t split_size = size - pmem.size();
 124         pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size));
 125         _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size);
 126       }
 127     } else {
 128       // Keep segment
 129       _segments[nsegments++] = segment;
 130     }
 131   }
 132 
 133   _nsegments = nsegments;
 134 
 135   return pmem;
 136 }
 137 
 138 ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
 139     _backing(max_capacity) {
 140   // Register everything as uncommitted
 141   _uncommitted.free(0, max_capacity);
 142 }
 143 
 144 bool ZPhysicalMemoryManager::is_initialized() const {
 145   return _backing.is_initialized();
 146 }
 147 
 148 void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const {
 149   _backing.warn_commit_limits(max);
 150 }
 151 
 152 bool ZPhysicalMemoryManager::supports_uncommit() {
 153   assert(!is_init_completed(), "Invalid state");
 154 
 155   // Test if uncommit is supported by uncommitting and then re-committing a granule
 156   return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
 157 }
 158 
 159 void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 160   // From an NMT point of view we treat the first heap view (marked0) as committed
 161   const uintptr_t addr = ZAddress::marked0(offset);
 162   const size_t size = pmem.size();
 163   MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
 164 }
 165 
 166 void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 167   if (MemTracker::tracking_level() > NMT_minimal) {
 168     const uintptr_t addr = ZAddress::marked0(offset);
 169     const size_t size = pmem.size();
 170     Tracker tracker(Tracker::uncommit);
 171     tracker.record((address)addr, size);
 172   }
 173 }
 174 
 175 size_t ZPhysicalMemoryManager::commit(size_t size) {
 176   size_t committed = 0;
 177 
 178   // Fill holes in the backing memory
 179   while (committed < size) {
 180     size_t allocated = 0;
 181     const size_t remaining = size - committed;
 182     const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
 183     if (start == UINTPTR_MAX) {
 184       // No holes to commit
 185       break;
 186     }
 187 
 188     // Try commit hole
 189     const size_t filled = _backing.commit(start, allocated);
 190     if (filled > 0) {
 191       // Successful or partialy successful
 192       _committed.free(start, filled);
 193       committed += filled;
 194     }
 195     if (filled < allocated) {
 196       // Failed or partialy failed
 197       _uncommitted.free(start + filled, allocated - filled);
 198       return committed;
 199     }
 200   }
 201 
 202   return committed;
 203 }
 204 
 205 size_t ZPhysicalMemoryManager::uncommit(size_t size) {
 206   size_t uncommitted = 0;
 207 
 208   // Punch holes in backing memory
 209   while (uncommitted < size) {
 210     size_t allocated = 0;
 211     const size_t remaining = size - uncommitted;
 212     const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
 213     assert(start != UINTPTR_MAX, "Allocation should never fail");
 214 
 215     // Try punch hole
 216     const size_t punched = _backing.uncommit(start, allocated);
 217     if (punched > 0) {
 218       // Successful or partialy successful
 219       _uncommitted.free(start, punched);
 220       uncommitted += punched;
 221     }
 222     if (punched < allocated) {
 223       // Failed or partialy failed
 224       _committed.free(start + punched, allocated - punched);
 225       return uncommitted;
 226     }
 227   }
 228 
 229   return uncommitted;
 230 }
 231 
 232 ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
 233   assert(is_aligned(size, ZGranuleSize), "Invalid size");
 234 
 235   ZPhysicalMemory pmem;
 236 
 237   // Allocate segments
 238   for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
 239     const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
 240     assert(start != UINTPTR_MAX, "Allocation should never fail");
 241     pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
 242   }
 243 
 244   return pmem;
 245 }
 246 
 247 void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
 248   const size_t nsegments = pmem.nsegments();
 249 
 250   // Free segments
 251   for (size_t i = 0; i < nsegments; i++) {
 252     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 253     _committed.free(segment.start(), segment.size());
 254   }
 255 }
 256 
 257 void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
 258   const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
 259   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 260 }
 261 
 262 void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 263   const size_t nsegments = pmem.nsegments();
 264   size_t size = 0;
 265 
 266   // Map segments
 267   for (size_t i = 0; i < nsegments; i++) {
 268     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 269     _backing.map(addr + size, segment.size(), segment.start());
 270     size += segment.size();
 271   }
 272 
 273   // Setup NUMA interleaving for large pages
 274   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 275     // To get granule-level NUMA interleaving when using large pages,
 276     // we simply let the kernel interleave the memory for us at page
 277     // fault time.
 278     os::numa_make_global((char*)addr, size);
 279   }
 280 }
 281 
 282 void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 283   _backing.unmap(addr, pmem.size());
 284 }
 285 
 286 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 287   if (ZVerifyViews) {
 288     // Pre-touch good view
 289     pretouch_view(ZAddress::good(offset), size);
 290   } else {
 291     // Pre-touch all views
 292     pretouch_view(ZAddress::marked0(offset), size);
 293     pretouch_view(ZAddress::marked1(offset), size);
 294     pretouch_view(ZAddress::remapped(offset), size);
 295   }
 296 }
 297 
 298 void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 299   if (ZVerifyViews) {
 300     // Map good view
 301     map_view(pmem, ZAddress::good(offset));
 302   } else {
 303     // Map all views
 304     map_view(pmem, ZAddress::marked0(offset));
 305     map_view(pmem, ZAddress::marked1(offset));
 306     map_view(pmem, ZAddress::remapped(offset));
 307   }
 308 
 309   nmt_commit(pmem, offset);
 310 }
 311 
 312 void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 313   nmt_uncommit(pmem, offset);
 314 
 315   if (ZVerifyViews) {
 316     // Unmap good view
 317     unmap_view(pmem, ZAddress::good(offset));
 318   } else {
 319     // Unmap all views
 320     unmap_view(pmem, ZAddress::marked0(offset));
 321     unmap_view(pmem, ZAddress::marked1(offset));
 322     unmap_view(pmem, ZAddress::remapped(offset));
 323   }
 324 }
 325 
 326 void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 327   // Map good view
 328   assert(ZVerifyViews, "Should be enabled");
 329   map_view(pmem, ZAddress::good(offset));
 330 }
 331 
 332 void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 333   // Unmap good view
 334   assert(ZVerifyViews, "Should be enabled");
 335   unmap_view(pmem, ZAddress::good(offset));
 336 }