1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zAddress.inline.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zLargePages.inline.hpp"
  28 #include "gc/z/zNUMA.inline.hpp"
  29 #include "gc/z/zPhysicalMemory.inline.hpp"
  30 #include "runtime/init.hpp"
  31 #include "runtime/os.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 #include "utilities/debug.hpp"
  35 #include "utilities/globalDefinitions.hpp"
  36 
  37 ZPhysicalMemory::ZPhysicalMemory() :
  38     _nsegments(0),
  39     _segments(NULL) {}
  40 
  41 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
  42     _nsegments(0),
  43     _segments(NULL) {
  44   add_segment(segment);
  45 }
  46 
  47 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
  48     _nsegments(0),
  49     _segments(NULL) {
  50 
  51   // Copy segments
  52   for (size_t i = 0; i < pmem.nsegments(); i++) {
  53     add_segment(pmem.segment(i));
  54   }
  55 }
  56 
  57 const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
  58   // Free segments
  59   delete [] _segments;
  60   _segments = NULL;
  61   _nsegments = 0;
  62 
  63   // Copy segments
  64   for (size_t i = 0; i < pmem.nsegments(); i++) {
  65     add_segment(pmem.segment(i));
  66   }
  67 
  68   return *this;
  69 }
  70 
  71 ZPhysicalMemory::~ZPhysicalMemory() {
  72   delete [] _segments;
  73   _segments = NULL;
  74   _nsegments = 0;
  75 }
  76 
  77 size_t ZPhysicalMemory::size() const {
  78   size_t size = 0;
  79 
  80   for (size_t i = 0; i < _nsegments; i++) {
  81     size += _segments[i].size();
  82   }
  83 
  84   return size;
  85 }
  86 
  87 void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
  88   // Try merge with last segment
  89   if (_nsegments > 0) {
  90     ZPhysicalMemorySegment& last = _segments[_nsegments - 1];
  91     assert(last.end() <= segment.start(), "Segments added out of order");
  92     if (last.end() == segment.start()) {
  93       last = ZPhysicalMemorySegment(last.start(), last.size() + segment.size());
  94       return;
  95     }
  96   }
  97 
  98   // Resize array
  99   ZPhysicalMemorySegment* const old_segments = _segments;
 100   _segments = new ZPhysicalMemorySegment[_nsegments + 1];
 101   for (size_t i = 0; i < _nsegments; i++) {
 102     _segments[i] = old_segments[i];
 103   }
 104   delete [] old_segments;
 105 
 106   // Add new segment
 107   _segments[_nsegments] = segment;
 108   _nsegments++;
 109 }
 110 
 111 ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
 112   ZPhysicalMemory pmem;
 113   size_t nsegments = 0;
 114 
 115   for (size_t i = 0; i < _nsegments; i++) {
 116     const ZPhysicalMemorySegment& segment = _segments[i];
 117     if (pmem.size() < size) {
 118       if (pmem.size() + segment.size() <= size) {
 119         // Transfer segment
 120         pmem.add_segment(segment);
 121       } else {
 122         // Split segment
 123         const size_t split_size = size - pmem.size();
 124         pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size));
 125         _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size);
 126       }
 127     } else {
 128       // Keep segment
 129       _segments[nsegments++] = segment;
 130     }
 131   }
 132 
 133   _nsegments = nsegments;
 134 
 135   return pmem;
 136 }
 137 
 138 bool ZPhysicalMemoryManager::is_initialized() const {
 139   return _backing.is_initialized();
 140 }
 141 
 142 void ZPhysicalMemoryManager::warn_commit_limits(size_t max) const {
 143   _backing.warn_commit_limits(max);
 144 }
 145 
 146 bool ZPhysicalMemoryManager::supports_uncommit() {
 147   assert(!is_init_completed(), "Invalid state");
 148   assert(_backing.size() >= ZGranuleSize, "Invalid size");
 149 
 150   // Test if uncommit is supported by uncommitting and then re-committing a granule
 151   return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
 152 }
 153 
 154 void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 155   // From an NMT point of view we treat the first heap view (marked0) as committed
 156   const uintptr_t addr = ZAddress::marked0(offset);
 157   const size_t size = pmem.size();
 158   MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
 159 }
 160 
 161 void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 162   if (MemTracker::tracking_level() > NMT_minimal) {
 163     const uintptr_t addr = ZAddress::marked0(offset);
 164     const size_t size = pmem.size();
 165     Tracker tracker(Tracker::uncommit);
 166     tracker.record((address)addr, size);
 167   }
 168 }
 169 
 170 size_t ZPhysicalMemoryManager::commit(size_t size) {
 171   size_t committed = 0;
 172 
 173   // Fill holes in the backing memory
 174   while (committed < size) {
 175     size_t allocated = 0;
 176     const size_t remaining = size - committed;
 177     const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
 178     if (start == UINTPTR_MAX) {
 179       // No holes to commit
 180       break;
 181     }
 182 
 183     // Try commit hole
 184     const size_t filled = _backing.commit(start, allocated);
 185     if (filled > 0) {
 186       // Successful or partialy successful
 187       _committed.free(start, filled);
 188       committed += filled;
 189     }
 190     if (filled < allocated) {
 191       // Failed or partialy failed
 192       _uncommitted.free(start + filled, allocated - filled);
 193       return committed;
 194     }
 195   }
 196 
 197   // Expand backing memory
 198   if (committed < size) {
 199     const size_t remaining = size - committed;
 200     const uintptr_t start = _backing.size();
 201     const size_t expanded = _backing.commit(start, remaining);
 202     if (expanded > 0) {
 203       // Successful or partialy successful
 204       _committed.free(start, expanded);
 205       committed += expanded;
 206     }
 207   }
 208 
 209   return committed;
 210 }
 211 
 212 size_t ZPhysicalMemoryManager::uncommit(size_t size) {
 213   size_t uncommitted = 0;
 214 
 215   // Punch holes in backing memory
 216   while (uncommitted < size) {
 217     size_t allocated = 0;
 218     const size_t remaining = size - uncommitted;
 219     const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
 220     assert(start != UINTPTR_MAX, "Allocation should never fail");
 221 
 222     // Try punch hole
 223     const size_t punched = _backing.uncommit(start, allocated);
 224     if (punched > 0) {
 225       // Successful or partialy successful
 226       _uncommitted.free(start, punched);
 227       uncommitted += punched;
 228     }
 229     if (punched < allocated) {
 230       // Failed or partialy failed
 231       _committed.free(start + punched, allocated - punched);
 232       return uncommitted;
 233     }
 234   }
 235 
 236   return uncommitted;
 237 }
 238 
 239 ZPhysicalMemory ZPhysicalMemoryManager::alloc(size_t size) {
 240   assert(is_aligned(size, ZGranuleSize), "Invalid size");
 241 
 242   ZPhysicalMemory pmem;
 243 
 244   // Allocate segments
 245   for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
 246     const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
 247     assert(start != UINTPTR_MAX, "Allocation should never fail");
 248     pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
 249   }
 250 
 251   return pmem;
 252 }
 253 
 254 void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
 255   const size_t nsegments = pmem.nsegments();
 256 
 257   // Free segments
 258   for (size_t i = 0; i < nsegments; i++) {
 259     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 260     _committed.free(segment.start(), segment.size());
 261   }
 262 }
 263 
 264 void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
 265   const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
 266   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 267 }
 268 
 269 void ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 270   const size_t nsegments = pmem.nsegments();
 271   size_t size = 0;
 272 
 273   // Map segments
 274   for (size_t i = 0; i < nsegments; i++) {
 275     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 276     _backing.map(addr + size, segment.size(), segment.start());
 277     size += segment.size();
 278   }
 279 
 280   // Setup NUMA interleaving for large pages
 281   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 282     // To get granule-level NUMA interleaving when using large pages,
 283     // we simply let the kernel interleave the memory for us at page
 284     // fault time.
 285     os::numa_make_global((char*)addr, size);
 286   }
 287 }
 288 
 289 void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 290   _backing.unmap(addr, pmem.size());
 291 }
 292 
 293 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 294   if (ZVerifyViews) {
 295     // Pre-touch good view
 296     pretouch_view(ZAddress::good(offset), size);
 297   } else {
 298     // Pre-touch all views
 299     pretouch_view(ZAddress::marked0(offset), size);
 300     pretouch_view(ZAddress::marked1(offset), size);
 301     pretouch_view(ZAddress::remapped(offset), size);
 302   }
 303 }
 304 
 305 void ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 306   if (ZVerifyViews) {
 307     // Map good view
 308     map_view(pmem, ZAddress::good(offset));
 309   } else {
 310     // Map all views
 311     map_view(pmem, ZAddress::marked0(offset));
 312     map_view(pmem, ZAddress::marked1(offset));
 313     map_view(pmem, ZAddress::remapped(offset));
 314   }
 315 
 316   nmt_commit(pmem, offset);
 317 }
 318 
 319 void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 320   nmt_uncommit(pmem, offset);
 321 
 322   if (ZVerifyViews) {
 323     // Unmap good view
 324     unmap_view(pmem, ZAddress::good(offset));
 325   } else {
 326     // Unmap all views
 327     unmap_view(pmem, ZAddress::marked0(offset));
 328     unmap_view(pmem, ZAddress::marked1(offset));
 329     unmap_view(pmem, ZAddress::remapped(offset));
 330   }
 331 }
 332 
 333 void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 334   // Map good view
 335   assert(ZVerifyViews, "Should be enabled");
 336   map_view(pmem, ZAddress::good(offset));
 337 }
 338 
 339 void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 340   // Unmap good view
 341   assert(ZVerifyViews, "Should be enabled");
 342   unmap_view(pmem, ZAddress::good(offset));
 343 }