1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcLogPrecious.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zArray.inline.hpp"
  28 #include "gc/z/zGlobals.hpp"
  29 #include "gc/z/zLargePages.inline.hpp"
  30 #include "gc/z/zNUMA.inline.hpp"
  31 #include "gc/z/zPhysicalMemory.inline.hpp"
  32 #include "logging/log.hpp"
  33 #include "runtime/globals.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/init.hpp"
  36 #include "runtime/os.hpp"
  37 #include "services/memTracker.hpp"
  38 #include "utilities/align.hpp"
  39 #include "utilities/debug.hpp"
  40 #include "utilities/globalDefinitions.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 
  43 ZPhysicalMemory::ZPhysicalMemory() :
  44     _segments() {}
  45 
  46 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
  47     _segments() {
  48   add_segment(segment);
  49 }
  50 
  51 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
  52     _segments() {
  53   add_segments(pmem);
  54 }
  55 
  56 const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
  57   // Free segments
  58   _segments.clear_and_deallocate();
  59 
  60   // Copy segments
  61   add_segments(pmem);
  62 
  63   return *this;
  64 }
  65 
  66 size_t ZPhysicalMemory::size() const {
  67   size_t size = 0;
  68 
  69   for (int i = 0; i < _segments.length(); i++) {
  70     size += _segments.at(i).size();
  71   }
  72 
  73   return size;
  74 }
  75 
  76 void ZPhysicalMemory::insert_segment(int index, uintptr_t start, size_t size, bool committed) {
  77   _segments.insert_before(index, ZPhysicalMemorySegment(start, size, committed));
  78 }
  79 
  80 void ZPhysicalMemory::replace_segment(int index, uintptr_t start, size_t size, bool committed) {
  81   _segments.at_put(index, ZPhysicalMemorySegment(start, size, committed));
  82 }
  83 
  84 void ZPhysicalMemory::remove_segment(int index) {
  85   _segments.remove_at(index);
  86 }
  87 
  88 void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
  89   for (int i = 0; i < pmem.nsegments(); i++) {
  90     add_segment(pmem.segment(i));
  91   }
  92 }
  93 
  94 void ZPhysicalMemory::remove_segments() {
  95   _segments.clear_and_deallocate();
  96 }
  97 
  98 static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
  99   return before.end() == after.start() && before.is_committed() == after.is_committed();
 100 }
 101 
 102 void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
 103   // Insert segments in address order, merge segments when possible
 104   for (int i = _segments.length(); i > 0; i--) {
 105     const int current = i - 1;
 106 
 107     if (_segments.at(current).end() <= segment.start()) {
 108       if (is_mergable(_segments.at(current), segment)) {
 109         if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
 110           // Merge with end of current segment and start of next segment
 111           const size_t start = _segments.at(current).start();
 112           const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size();
 113           replace_segment(current, start, size, segment.is_committed());
 114           remove_segment(current + 1);
 115           return;
 116         }
 117 
 118         // Merge with end of current segment
 119         const size_t start = _segments.at(current).start();
 120         const size_t size = _segments.at(current).size() + segment.size();
 121         replace_segment(current, start, size, segment.is_committed());
 122         return;
 123       } else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
 124         // Merge with start of next segment
 125         const size_t start = segment.start();
 126         const size_t size = segment.size() + _segments.at(current + 1).size();
 127         replace_segment(current + 1, start, size, segment.is_committed());
 128         return;
 129       }
 130 
 131       // Insert after current segment
 132       insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
 133       return;
 134     }
 135   }
 136 
 137   if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) {
 138     // Merge with start of first segment
 139     const size_t start = segment.start();
 140     const size_t size = segment.size() + _segments.at(0).size();
 141     replace_segment(0, start, size, segment.is_committed());
 142     return;
 143   }
 144 
 145   // Insert before first segment
 146   insert_segment(0, segment.start(), segment.size(), segment.is_committed());
 147 }
 148 
 149 bool ZPhysicalMemory::commit_segment(int index, size_t size) {
 150   ZPhysicalMemorySegment& segment = _segments.at(index);
 151 
 152   assert(size <= segment.size(), "Invalid size");
 153   assert(!segment.is_committed(), "Invalid state");
 154 
 155   if (size == segment.size()) {
 156     // Completely committed
 157     segment.set_committed(true);
 158     return true;
 159   }
 160 
 161   if (size > 0) {
 162     // Partially committed, split segment
 163     insert_segment(index + 1, segment.start() + size, segment.size() - size, false /* committed */);
 164     replace_segment(index, segment.start(), size, true /* committed */);
 165   }
 166 
 167   return false;
 168 }
 169 
 170 bool ZPhysicalMemory::uncommit_segment(int index, size_t size) {
 171   ZPhysicalMemorySegment& segment = _segments.at(index);
 172 
 173   assert(size <= segment.size(), "Invalid size");
 174   assert(segment.is_committed(), "Invalid state");
 175 
 176   if (size == segment.size()) {
 177     // Completely uncommitted
 178     segment.set_committed(false);
 179     return true;
 180   }
 181 
 182   if (size > 0) {
 183     // Partially uncommitted, split segment
 184     insert_segment(index + 1, segment.start() + size, segment.size() - size, true /* committed */);
 185     replace_segment(index, segment.start(), size, false /* committed */);
 186   }
 187 
 188   return false;
 189 }
 190 
 191 ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
 192   ZPhysicalMemory pmem;
 193   int nsegments = 0;
 194 
 195   for (int i = 0; i < _segments.length(); i++) {
 196     const ZPhysicalMemorySegment& segment = _segments.at(i);
 197     if (pmem.size() < size) {
 198       if (pmem.size() + segment.size() <= size) {
 199         // Transfer segment
 200         pmem.add_segment(segment);
 201       } else {
 202         // Split segment
 203         const size_t split_size = size - pmem.size();
 204         pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
 205         _segments.at_put(nsegments++, ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed()));
 206       }
 207     } else {
 208       // Keep segment
 209       _segments.at_put(nsegments++, segment);
 210     }
 211   }
 212 
 213   _segments.trunc_to(nsegments);
 214 
 215   return pmem;
 216 }
 217 
 218 ZPhysicalMemory ZPhysicalMemory::split_committed() {
 219   ZPhysicalMemory pmem;
 220   int nsegments = 0;
 221 
 222   for (int i = 0; i < _segments.length(); i++) {
 223     const ZPhysicalMemorySegment& segment = _segments.at(i);
 224     if (segment.is_committed()) {
 225       // Transfer segment
 226       pmem.add_segment(segment);
 227     } else {
 228       // Keep segment
 229       _segments.at_put(nsegments++, segment);
 230     }
 231   }
 232 
 233   _segments.trunc_to(nsegments);
 234 
 235   return pmem;
 236 }
 237 
 238 ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
 239     _backing(max_capacity) {
 240   // Make the whole range free
 241   _manager.free(0, max_capacity);
 242 }
 243 
 244 bool ZPhysicalMemoryManager::is_initialized() const {
 245   return _backing.is_initialized();
 246 }
 247 
 248 void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
 249   _backing.warn_commit_limits(max_capacity);
 250 }
 251 
 252 void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
 253   assert(!is_init_completed(), "Invalid state");
 254 
 255   // If uncommit is not explicitly disabled, max capacity is greater than
 256   // min capacity, and uncommit is supported by the platform, then uncommit
 257   // will be enabled.
 258   if (!ZUncommit) {
 259     log_info_p(gc, init)("Uncommit: Disabled");
 260     return;
 261   }
 262 
 263   if (max_capacity == min_capacity) {
 264     log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
 265     FLAG_SET_ERGO(ZUncommit, false);
 266     return;
 267   }
 268 
 269   // Test if uncommit is supported by the operating system by committing
 270   // and then uncommitting a granule.
 271   ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */));
 272   if (!commit(pmem) || !uncommit(pmem)) {
 273     log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
 274     FLAG_SET_ERGO(ZUncommit, false);
 275     return;
 276   }
 277 
 278   log_info_p(gc, init)("Uncommit: Enabled");
 279   log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 280 }
 281 
 282 void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
 283   // From an NMT point of view we treat the first heap view (marked0) as committed
 284   const uintptr_t addr = ZAddress::marked0(offset);
 285   MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
 286 }
 287 
 288 void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
 289   if (MemTracker::tracking_level() > NMT_minimal) {
 290     const uintptr_t addr = ZAddress::marked0(offset);
 291     Tracker tracker(Tracker::uncommit);
 292     tracker.record((address)addr, size);
 293   }
 294 }
 295 
 296 void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
 297   assert(is_aligned(size, ZGranuleSize), "Invalid size");
 298 
 299   // Allocate segments
 300   while (size > 0) {
 301     size_t allocated = 0;
 302     const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
 303     assert(start != UINTPTR_MAX, "Allocation should never fail");
 304     pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
 305     size -= allocated;
 306   }
 307 }
 308 
 309 void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
 310   // Free segments
 311   for (int i = 0; i < pmem.nsegments(); i++) {
 312     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 313     _manager.free(segment.start(), segment.size());
 314   }
 315 }
 316 
 317 bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
 318   // Commit segments
 319   for (int i = 0; i < pmem.nsegments(); i++) {
 320     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 321     if (segment.is_committed()) {
 322       // Segment already committed
 323       continue;
 324     }
 325 
 326     // Commit segment
 327     const size_t committed = _backing.commit(segment.start(), segment.size());
 328     if (!pmem.commit_segment(i, committed)) {
 329       // Failed or partially failed
 330       return false;
 331     }
 332   }
 333 
 334   // Success
 335   return true;
 336 }
 337 
 338 bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
 339   // Commit segments
 340   for (int i = 0; i < pmem.nsegments(); i++) {
 341     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 342     if (!segment.is_committed()) {
 343       // Segment already uncommitted
 344       continue;
 345     }
 346 
 347     // Uncommit segment
 348     const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
 349     if (!pmem.uncommit_segment(i, uncommitted)) {
 350       // Failed or partially failed
 351       return false;
 352     }
 353   }
 354 
 355   // Success
 356   return true;
 357 }
 358 
 359 void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
 360   const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
 361   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 362 }
 363 
 364 void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const {
 365   size_t size = 0;
 366 
 367   // Map segments
 368   for (int i = 0; i < pmem.nsegments(); i++) {
 369     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 370     _backing.map(addr + size, segment.size(), segment.start());
 371     size += segment.size();
 372   }
 373 
 374   // Setup NUMA interleaving for large pages
 375   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 376     // To get granule-level NUMA interleaving when using large pages,
 377     // we simply let the kernel interleave the memory for us at page
 378     // fault time.
 379     os::numa_make_global((char*)addr, size);
 380   }
 381 }
 382 
 383 void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const {
 384   _backing.unmap(addr, size);
 385 }
 386 
 387 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 388   if (ZVerifyViews) {
 389     // Pre-touch good view
 390     pretouch_view(ZAddress::good(offset), size);
 391   } else {
 392     // Pre-touch all views
 393     pretouch_view(ZAddress::marked0(offset), size);
 394     pretouch_view(ZAddress::marked1(offset), size);
 395     pretouch_view(ZAddress::remapped(offset), size);
 396   }
 397 }
 398 
 399 void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
 400   const size_t size = pmem.size();
 401 
 402   if (ZVerifyViews) {
 403     // Map good view
 404     map_view(ZAddress::good(offset), pmem);
 405   } else {
 406     // Map all views
 407     map_view(ZAddress::marked0(offset), pmem);
 408     map_view(ZAddress::marked1(offset), pmem);
 409     map_view(ZAddress::remapped(offset), pmem);
 410   }
 411 
 412   nmt_commit(offset, size);
 413 }
 414 
 415 void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
 416   nmt_uncommit(offset, size);
 417 
 418   if (ZVerifyViews) {
 419     // Unmap good view
 420     unmap_view(ZAddress::good(offset), size);
 421   } else {
 422     // Unmap all views
 423     unmap_view(ZAddress::marked0(offset), size);
 424     unmap_view(ZAddress::marked1(offset), size);
 425     unmap_view(ZAddress::remapped(offset), size);
 426   }
 427 }
 428 
 429 void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
 430   // Map good view
 431   assert(ZVerifyViews, "Should be enabled");
 432   map_view(ZAddress::good(offset), pmem);
 433 }
 434 
 435 void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const {
 436   // Unmap good view
 437   assert(ZVerifyViews, "Should be enabled");
 438   unmap_view(ZAddress::good(offset), size);
 439 }