1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zAddress.inline.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zLargePages.inline.hpp"
  28 #include "gc/z/zNUMA.inline.hpp"
  29 #include "gc/z/zPhysicalMemory.inline.hpp"
  30 #include "logging/log.hpp"
  31 #include "runtime/globals.hpp"
  32 #include "runtime/init.hpp"
  33 #include "runtime/os.hpp"
  34 #include "services/memTracker.hpp"
  35 #include "utilities/align.hpp"
  36 #include "utilities/debug.hpp"
  37 #include "utilities/globalDefinitions.hpp"
  38 #include "utilities/powerOfTwo.hpp"
  39 
  40 ZPhysicalMemory::ZPhysicalMemory() :
  41     _nsegments_max(0),
  42     _nsegments(0),
  43     _segments(NULL) {}
  44 
  45 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
  46     _nsegments_max(0),
  47     _nsegments(0),
  48     _segments(NULL) {
  49   add_segment(segment);
  50 }
  51 
  52 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
  53     _nsegments_max(0),
  54     _nsegments(0),
  55     _segments(NULL) {
  56   add_segments(pmem);
  57 }
  58 
  59 const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
  60   // Free segments
  61   delete [] _segments;
  62   _segments = NULL;
  63   _nsegments_max = 0;
  64   _nsegments = 0;
  65 
  66   // Copy segments
  67   add_segments(pmem);
  68 
  69   return *this;
  70 }
  71 
  72 ZPhysicalMemory::~ZPhysicalMemory() {
  73   delete [] _segments;
  74 }
  75 
  76 size_t ZPhysicalMemory::size() const {
  77   size_t size = 0;
  78 
  79   for (uint32_t i = 0; i < _nsegments; i++) {
  80     size += _segments[i].size();
  81   }
  82 
  83   return size;
  84 }
  85 
  86 void ZPhysicalMemory::insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
  87   assert(index <= _nsegments, "Invalid index");
  88 
  89   ZPhysicalMemorySegment* const from_segments = _segments;
  90 
  91   if (_nsegments + 1 > _nsegments_max) {
  92     // Resize array
  93     _nsegments_max = round_up_power_of_2(_nsegments_max + 1);
  94     _segments = new ZPhysicalMemorySegment[_nsegments_max];
  95 
  96     // Copy segments before index
  97     for (uint32_t i = 0; i < index; i++) {
  98       _segments[i] = from_segments[i];
  99     }
 100   }
 101 
 102   // Copy/Move segments after index
 103   for (uint32_t i = _nsegments; i > index; i--) {
 104     _segments[i] = from_segments[i - 1];
 105   }
 106 
 107   // Insert new segment
 108   _segments[index] = ZPhysicalMemorySegment(start, size, committed);
 109   _nsegments++;
 110 
 111   // Delete old array
 112   if (from_segments != _segments) {
 113     delete [] from_segments;
 114   }
 115 }
 116 
 117 void ZPhysicalMemory::replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
 118   assert(index < _nsegments, "Invalid index");
 119   _segments[index] = ZPhysicalMemorySegment(start, size, committed);;
 120 }
 121 
 122 void ZPhysicalMemory::remove_segment(uint32_t index) {
 123   assert(index < _nsegments, "Invalid index");
 124 
 125   // Move segments after index
 126   for (uint32_t i = index + 1; i < _nsegments; i++) {
 127     _segments[i - 1] = _segments[i];
 128   }
 129 
 130   _nsegments--;
 131 }
 132 
 133 void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
 134   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 135     add_segment(pmem.segment(i));
 136   }
 137 }
 138 
 139 void ZPhysicalMemory::transfer_segments(ZPhysicalMemory& pmem) {
 140   add_segments(pmem);
 141   pmem = ZPhysicalMemory();
 142 }
 143 
 144 static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
 145   return before.end() == after.start() && before.is_committed() == after.is_committed();
 146 }
 147 
 148 void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
 149   // Insert segments in address order, merge segments when possible
 150   for (uint32_t i = _nsegments; i > 0; i--) {
 151     const uint32_t current = i - 1;
 152 
 153     if (_segments[current].end() <= segment.start()) {
 154       if (is_mergable(_segments[current], segment)) {
 155         if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
 156           // Merge with end of current segment and start of next segment
 157           const size_t start = _segments[current].start();
 158           const size_t size = _segments[current].size() + segment.size() + _segments[current + 1].size();
 159           replace_segment(current, start, size, segment.is_committed());
 160           remove_segment(current + 1);
 161           return;
 162         }
 163 
 164         // Merge with end of current segment
 165         const size_t start = _segments[current].start();
 166         const size_t size = _segments[current].size() + segment.size();
 167         replace_segment(current, start, size, segment.is_committed());
 168         return;
 169       } else if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
 170         // Merge with start of next segment
 171         const size_t start = segment.start();
 172         const size_t size = segment.size() + _segments[current + 1].size();
 173         replace_segment(current + 1, start, size, segment.is_committed());
 174         return;
 175       }
 176 
 177       // Insert after current segment
 178       insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
 179       return;
 180     }
 181   }
 182 
 183   if (_nsegments > 0 && is_mergable(segment, _segments[0])) {
 184     // Merge with start of first segment
 185     const size_t start = segment.start();
 186     const size_t size = segment.size() + _segments[0].size();
 187     replace_segment(0, start, size, segment.is_committed());
 188     return;
 189   }
 190 
 191   // Insert before first segment
 192   insert_segment(0, segment.start(), segment.size(), segment.is_committed());
 193 }
 194 
 195 bool ZPhysicalMemory::commit_segment(uint32_t index, size_t size) {
 196   assert(index < _nsegments, "Invalid index");
 197   assert(size <= _segments[index].size(), "Invalid size");
 198   assert(!_segments[index].is_committed(), "Invalid state");
 199 
 200   if (size == _segments[index].size()) {
 201     // Completely committed
 202     _segments[index].set_committed(true);
 203     return true;
 204   }
 205 
 206   if (size > 0) {
 207     // Partially committed, split segment
 208     insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, false /* committed */);
 209     replace_segment(index, _segments[index].start(), size, true /* committed */);
 210   }
 211 
 212   return false;
 213 }
 214 
 215 bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) {
 216   assert(index < _nsegments, "Invalid index");
 217   assert(size <= _segments[index].size(), "Invalid size");
 218   assert(_segments[index].is_committed(), "Invalid state");
 219 
 220   if (size == _segments[index].size()) {
 221     // Completely uncommitted
 222     _segments[index].set_committed(false);
 223     return true;
 224   }
 225 
 226   if (size > 0) {
 227     // Partially uncommitted, split segment
 228     insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, true /* committed */);
 229     replace_segment(index, _segments[index].start(), size, false /* committed */);
 230   }
 231 
 232   return false;
 233 }
 234 
 235 ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
 236   ZPhysicalMemory pmem;
 237   uint32_t nsegments = 0;
 238 
 239   for (uint32_t i = 0; i < _nsegments; i++) {
 240     const ZPhysicalMemorySegment& segment = _segments[i];
 241     if (pmem.size() < size) {
 242       if (pmem.size() + segment.size() <= size) {
 243         // Transfer segment
 244         pmem.add_segment(segment);
 245       } else {
 246         // Split segment
 247         const size_t split_size = size - pmem.size();
 248         pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
 249         _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed());
 250       }
 251     } else {
 252       // Keep segment
 253       _segments[nsegments++] = segment;
 254     }
 255   }
 256 
 257   _nsegments = nsegments;
 258 
 259   return pmem;
 260 }
 261 
 262 ZPhysicalMemory ZPhysicalMemory::split_committed() {
 263   ZPhysicalMemory pmem;
 264   uint32_t nsegments = 0;
 265 
 266   for (uint32_t i = 0; i < _nsegments; i++) {
 267     const ZPhysicalMemorySegment& segment = _segments[i];
 268     if (segment.is_committed()) {
 269       // Transfer segment
 270       pmem.add_segment(segment);
 271     } else {
 272       // Keep segment
 273       _segments[nsegments++] = segment;
 274     }
 275   }
 276 
 277   _nsegments = nsegments;
 278 
 279   return pmem;
 280 }
 281 
 282 ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
 283     _backing(max_capacity) {
 284   // Make the whole range free
 285   _manager.free(0, max_capacity);
 286 }
 287 
 288 bool ZPhysicalMemoryManager::is_initialized() const {
 289   return _backing.is_initialized();
 290 }
 291 
 292 void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
 293   _backing.warn_commit_limits(max_capacity);
 294 }
 295 
 296 bool ZPhysicalMemoryManager::should_enable_uncommit(size_t min_capacity, size_t max_capacity) {
 297   assert(!is_init_completed(), "Invalid state");
 298 
 299   // If uncommit is not explicitly disabled, max capacity is greater than
 300   // min capacity, and uncommit is supported by the platform, then uncommit
 301   // will be enabled.
 302   if (!ZUncommit) {
 303     log_info(gc, init)("Uncommit: Disabled");
 304     return false;
 305   }
 306 
 307   if (max_capacity == min_capacity) {
 308     log_info(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
 309     return false;
 310   }
 311 
 312   // Test if uncommit is supported by the operating system by committing
 313   // and then uncommitting the a granule.
 314   ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */));
 315   if (!commit(pmem) || !uncommit(pmem)) {
 316     log_info(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
 317     return false;
 318   }
 319 
 320   log_info(gc, init)("Uncommit: Enabled");
 321   log_info(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 322 
 323   return true;
 324 }
 325 
 326 void ZPhysicalMemoryManager::nmt_commit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 327   // From an NMT point of view we treat the first heap view (marked0) as committed
 328   const uintptr_t addr = ZAddress::marked0(offset);
 329   const size_t size = pmem.size();
 330   MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
 331 }
 332 
 333 void ZPhysicalMemoryManager::nmt_uncommit(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 334   if (MemTracker::tracking_level() > NMT_minimal) {
 335     const uintptr_t addr = ZAddress::marked0(offset);
 336     const size_t size = pmem.size();
 337     Tracker tracker(Tracker::uncommit);
 338     tracker.record((address)addr, size);
 339   }
 340 }
 341 
 342 void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
 343   assert(is_aligned(size, ZGranuleSize), "Invalid size");
 344 
 345   // Allocate segments
 346   while (size > 0) {
 347     size_t allocated = 0;
 348     const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
 349     assert(start != UINTPTR_MAX, "Allocation should never fail");
 350     pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
 351     size -= allocated;
 352   }
 353 }
 354 
 355 void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
 356   // Free segments
 357   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 358     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 359     _manager.free(segment.start(), segment.size());
 360   }
 361 }
 362 
 363 bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
 364   // Commit segments
 365   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 366     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 367     if (segment.is_committed()) {
 368       // Segment already committed
 369       continue;
 370     }
 371 
 372     // Commit segment
 373     const size_t committed = _backing.commit(segment.start(), segment.size());
 374     if (!pmem.commit_segment(i, committed)) {
 375       // Failed or partially failed
 376       return false;
 377     }
 378   }
 379 
 380   // Success
 381   return true;
 382 }
 383 
 384 bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
 385   // Commit segments
 386   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 387     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 388     if (!segment.is_committed()) {
 389       // Segment already uncommitted
 390       continue;
 391     }
 392 
 393     // Uncommit segment
 394     const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
 395     if (!pmem.uncommit_segment(i, uncommitted)) {
 396       // Failed or partially failed
 397       return false;
 398     }
 399   }
 400 
 401   // Success
 402   return true;
 403 }
 404 
 405 void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
 406   const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
 407   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 408 }
 409 
 410 bool ZPhysicalMemoryManager::map_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 411   size_t size = 0;
 412 
 413   // Map segments
 414   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 415     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 416     if (!_backing.map(addr + size, segment.size(), segment.start())) {
 417       // Failed to map segment
 418       if (size > 0) {
 419         // Unmap successfully mapped segments
 420         _backing.unmap(addr, size);
 421       }
 422 
 423       return false;
 424     }
 425     size += segment.size();
 426   }
 427 
 428   // Setup NUMA interleaving for large pages
 429   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 430     // To get granule-level NUMA interleaving when using large pages,
 431     // we simply let the kernel interleave the memory for us at page
 432     // fault time.
 433     os::numa_make_global((char*)addr, size);
 434   }
 435 
 436   // Success
 437   return true;
 438 }
 439 
 440 void ZPhysicalMemoryManager::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
 441   _backing.unmap(addr, pmem.size());
 442 }
 443 
 444 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 445   if (ZVerifyViews) {
 446     // Pre-touch good view
 447     pretouch_view(ZAddress::good(offset), size);
 448   } else {
 449     // Pre-touch all views
 450     pretouch_view(ZAddress::marked0(offset), size);
 451     pretouch_view(ZAddress::marked1(offset), size);
 452     pretouch_view(ZAddress::remapped(offset), size);
 453   }
 454 }
 455 
 456 bool ZPhysicalMemoryManager::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 457   if (ZVerifyViews) {
 458     // Map good view
 459     if (!map_view(pmem, ZAddress::good(offset))) {
 460       fatal("Failed to map memory");
 461     }
 462   } else {
 463     // Map all views
 464     if (!map_view(pmem, ZAddress::marked0(offset))) {
 465       return false;
 466     }
 467     if (!map_view(pmem, ZAddress::marked1(offset))) {
 468       unmap_view(pmem, ZAddress::marked0(offset));
 469       return false;
 470     }
 471     if (!map_view(pmem, ZAddress::remapped(offset))) {
 472       unmap_view(pmem, ZAddress::marked1(offset));
 473       unmap_view(pmem, ZAddress::marked0(offset));
 474       return false;
 475     }
 476   }
 477 
 478   nmt_commit(pmem, offset);
 479 
 480   // Success
 481   return true;
 482 }
 483 
 484 void ZPhysicalMemoryManager::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 485   nmt_uncommit(pmem, offset);
 486 
 487   if (ZVerifyViews) {
 488     // Unmap good view
 489     unmap_view(pmem, ZAddress::good(offset));
 490   } else {
 491     // Unmap all views
 492     unmap_view(pmem, ZAddress::marked0(offset));
 493     unmap_view(pmem, ZAddress::marked1(offset));
 494     unmap_view(pmem, ZAddress::remapped(offset));
 495   }
 496 }
 497 
 498 void ZPhysicalMemoryManager::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 499   // Map good view
 500   assert(ZVerifyViews, "Should be enabled");
 501   if (!map_view(pmem, ZAddress::good(offset))) {
 502     fatal("Failed to map memory");
 503   }
 504 }
 505 
 506 void ZPhysicalMemoryManager::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
 507   // Unmap good view
 508   assert(ZVerifyViews, "Should be enabled");
 509   unmap_view(pmem, ZAddress::good(offset));
 510 }