1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcLogPrecious.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zLargePages.inline.hpp"
  29 #include "gc/z/zNUMA.inline.hpp"
  30 #include "gc/z/zPhysicalMemory.inline.hpp"
  31 #include "logging/log.hpp"
  32 #include "runtime/globals.hpp"
  33 #include "runtime/globals_extension.hpp"
  34 #include "runtime/init.hpp"
  35 #include "runtime/os.hpp"
  36 #include "services/memTracker.hpp"
  37 #include "utilities/align.hpp"
  38 #include "utilities/debug.hpp"
  39 #include "utilities/globalDefinitions.hpp"
  40 #include "utilities/powerOfTwo.hpp"
  41 
  42 ZPhysicalMemory::ZPhysicalMemory() :
  43     _nsegments_max(0),
  44     _nsegments(0),
  45     _segments(NULL) {}
  46 
  47 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
  48     _nsegments_max(0),
  49     _nsegments(0),
  50     _segments(NULL) {
  51   add_segment(segment);
  52 }
  53 
  54 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
  55     _nsegments_max(0),
  56     _nsegments(0),
  57     _segments(NULL) {
  58   add_segments(pmem);
  59 }
  60 
  61 const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
  62   remove_segments();
  63   add_segments(pmem);
  64   return *this;
  65 }
  66 
  67 ZPhysicalMemory::~ZPhysicalMemory() {
  68   remove_segments();
  69 }
  70 
  71 size_t ZPhysicalMemory::size() const {
  72   size_t size = 0;
  73 
  74   for (uint32_t i = 0; i < _nsegments; i++) {
  75     size += _segments[i].size();
  76   }
  77 
  78   return size;
  79 }
  80 
  81 void ZPhysicalMemory::insert_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
  82   assert(index <= _nsegments, "Invalid index");
  83 
  84   ZPhysicalMemorySegment* const from_segments = _segments;
  85 
  86   if (_nsegments + 1 > _nsegments_max) {
  87     // Resize array
  88     _nsegments_max = round_up_power_of_2(_nsegments_max + 1);
  89     _segments = new ZPhysicalMemorySegment[_nsegments_max];
  90 
  91     // Copy segments before index
  92     for (uint32_t i = 0; i < index; i++) {
  93       _segments[i] = from_segments[i];
  94     }
  95   }
  96 
  97   // Copy/Move segments after index
  98   for (uint32_t i = _nsegments; i > index; i--) {
  99     _segments[i] = from_segments[i - 1];
 100   }
 101 
 102   // Insert new segment
 103   _segments[index] = ZPhysicalMemorySegment(start, size, committed);
 104   _nsegments++;
 105 
 106   // Delete old array
 107   if (from_segments != _segments) {
 108     delete [] from_segments;
 109   }
 110 }
 111 
 112 void ZPhysicalMemory::replace_segment(uint32_t index, uintptr_t start, size_t size, bool committed) {
 113   assert(index < _nsegments, "Invalid index");
 114   _segments[index] = ZPhysicalMemorySegment(start, size, committed);;
 115 }
 116 
 117 void ZPhysicalMemory::remove_segment(uint32_t index) {
 118   assert(index < _nsegments, "Invalid index");
 119 
 120   // Move segments after index
 121   for (uint32_t i = index + 1; i < _nsegments; i++) {
 122     _segments[i - 1] = _segments[i];
 123   }
 124 
 125   _nsegments--;
 126 }
 127 
 128 void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
 129   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 130     add_segment(pmem.segment(i));
 131   }
 132 }
 133 
 134 void ZPhysicalMemory::remove_segments() {
 135   delete [] _segments;
 136   _segments = NULL;
 137   _nsegments_max = 0;
 138   _nsegments = 0;
 139 }
 140 
 141 static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
 142   return before.end() == after.start() && before.is_committed() == after.is_committed();
 143 }
 144 
 145 void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
 146   // Insert segments in address order, merge segments when possible
 147   for (uint32_t i = _nsegments; i > 0; i--) {
 148     const uint32_t current = i - 1;
 149 
 150     if (_segments[current].end() <= segment.start()) {
 151       if (is_mergable(_segments[current], segment)) {
 152         if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
 153           // Merge with end of current segment and start of next segment
 154           const size_t start = _segments[current].start();
 155           const size_t size = _segments[current].size() + segment.size() + _segments[current + 1].size();
 156           replace_segment(current, start, size, segment.is_committed());
 157           remove_segment(current + 1);
 158           return;
 159         }
 160 
 161         // Merge with end of current segment
 162         const size_t start = _segments[current].start();
 163         const size_t size = _segments[current].size() + segment.size();
 164         replace_segment(current, start, size, segment.is_committed());
 165         return;
 166       } else if (current + 1 < _nsegments && is_mergable(segment, _segments[current + 1])) {
 167         // Merge with start of next segment
 168         const size_t start = segment.start();
 169         const size_t size = segment.size() + _segments[current + 1].size();
 170         replace_segment(current + 1, start, size, segment.is_committed());
 171         return;
 172       }
 173 
 174       // Insert after current segment
 175       insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
 176       return;
 177     }
 178   }
 179 
 180   if (_nsegments > 0 && is_mergable(segment, _segments[0])) {
 181     // Merge with start of first segment
 182     const size_t start = segment.start();
 183     const size_t size = segment.size() + _segments[0].size();
 184     replace_segment(0, start, size, segment.is_committed());
 185     return;
 186   }
 187 
 188   // Insert before first segment
 189   insert_segment(0, segment.start(), segment.size(), segment.is_committed());
 190 }
 191 
 192 bool ZPhysicalMemory::commit_segment(uint32_t index, size_t size) {
 193   assert(index < _nsegments, "Invalid index");
 194   assert(size <= _segments[index].size(), "Invalid size");
 195   assert(!_segments[index].is_committed(), "Invalid state");
 196 
 197   if (size == _segments[index].size()) {
 198     // Completely committed
 199     _segments[index].set_committed(true);
 200     return true;
 201   }
 202 
 203   if (size > 0) {
 204     // Partially committed, split segment
 205     insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, false /* committed */);
 206     replace_segment(index, _segments[index].start(), size, true /* committed */);
 207   }
 208 
 209   return false;
 210 }
 211 
 212 bool ZPhysicalMemory::uncommit_segment(uint32_t index, size_t size) {
 213   assert(index < _nsegments, "Invalid index");
 214   assert(size <= _segments[index].size(), "Invalid size");
 215   assert(_segments[index].is_committed(), "Invalid state");
 216 
 217   if (size == _segments[index].size()) {
 218     // Completely uncommitted
 219     _segments[index].set_committed(false);
 220     return true;
 221   }
 222 
 223   if (size > 0) {
 224     // Partially uncommitted, split segment
 225     insert_segment(index + 1, _segments[index].start() + size, _segments[index].size() - size, true /* committed */);
 226     replace_segment(index, _segments[index].start(), size, false /* committed */);
 227   }
 228 
 229   return false;
 230 }
 231 
 232 ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
 233   ZPhysicalMemory pmem;
 234   uint32_t nsegments = 0;
 235 
 236   for (uint32_t i = 0; i < _nsegments; i++) {
 237     const ZPhysicalMemorySegment& segment = _segments[i];
 238     if (pmem.size() < size) {
 239       if (pmem.size() + segment.size() <= size) {
 240         // Transfer segment
 241         pmem.add_segment(segment);
 242       } else {
 243         // Split segment
 244         const size_t split_size = size - pmem.size();
 245         pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
 246         _segments[nsegments++] = ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed());
 247       }
 248     } else {
 249       // Keep segment
 250       _segments[nsegments++] = segment;
 251     }
 252   }
 253 
 254   _nsegments = nsegments;
 255 
 256   return pmem;
 257 }
 258 
 259 ZPhysicalMemory ZPhysicalMemory::split_committed() {
 260   ZPhysicalMemory pmem;
 261   uint32_t nsegments = 0;
 262 
 263   for (uint32_t i = 0; i < _nsegments; i++) {
 264     const ZPhysicalMemorySegment& segment = _segments[i];
 265     if (segment.is_committed()) {
 266       // Transfer segment
 267       pmem.add_segment(segment);
 268     } else {
 269       // Keep segment
 270       _segments[nsegments++] = segment;
 271     }
 272   }
 273 
 274   _nsegments = nsegments;
 275 
 276   return pmem;
 277 }
 278 
 279 ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
 280     _backing(max_capacity) {
 281   // Make the whole range free
 282   _manager.free(0, max_capacity);
 283 }
 284 
 285 bool ZPhysicalMemoryManager::is_initialized() const {
 286   return _backing.is_initialized();
 287 }
 288 
 289 void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
 290   _backing.warn_commit_limits(max_capacity);
 291 }
 292 
 293 void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
 294   assert(!is_init_completed(), "Invalid state");
 295 
 296   // If uncommit is not explicitly disabled, max capacity is greater than
 297   // min capacity, and uncommit is supported by the platform, then uncommit
 298   // will be enabled.
 299   if (!ZUncommit) {
 300     log_info_p(gc, init)("Uncommit: Disabled");
 301     return;
 302   }
 303 
 304   if (max_capacity == min_capacity) {
 305     log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
 306     FLAG_SET_ERGO(ZUncommit, false);
 307     return;
 308   }
 309 
 310   // Test if uncommit is supported by the operating system by committing
 311   // and then uncommitting a granule.
 312   ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */));
 313   if (!commit(pmem) || !uncommit(pmem)) {
 314     log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
 315     FLAG_SET_ERGO(ZUncommit, false);
 316     return;
 317   }
 318 
 319   log_info_p(gc, init)("Uncommit: Enabled");
 320   log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 321 }
 322 
 323 void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
 324   // From an NMT point of view we treat the first heap view (marked0) as committed
 325   const uintptr_t addr = ZAddress::marked0(offset);
 326   MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
 327 }
 328 
 329 void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
 330   if (MemTracker::tracking_level() > NMT_minimal) {
 331     const uintptr_t addr = ZAddress::marked0(offset);
 332     Tracker tracker(Tracker::uncommit);
 333     tracker.record((address)addr, size);
 334   }
 335 }
 336 
 337 void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
 338   assert(is_aligned(size, ZGranuleSize), "Invalid size");
 339 
 340   // Allocate segments
 341   while (size > 0) {
 342     size_t allocated = 0;
 343     const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
 344     assert(start != UINTPTR_MAX, "Allocation should never fail");
 345     pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
 346     size -= allocated;
 347   }
 348 }
 349 
 350 void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
 351   // Free segments
 352   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 353     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 354     _manager.free(segment.start(), segment.size());
 355   }
 356 }
 357 
 358 bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
 359   // Commit segments
 360   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 361     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 362     if (segment.is_committed()) {
 363       // Segment already committed
 364       continue;
 365     }
 366 
 367     // Commit segment
 368     const size_t committed = _backing.commit(segment.start(), segment.size());
 369     if (!pmem.commit_segment(i, committed)) {
 370       // Failed or partially failed
 371       return false;
 372     }
 373   }
 374 
 375   // Success
 376   return true;
 377 }
 378 
 379 bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
 380   // Commit segments
 381   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 382     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 383     if (!segment.is_committed()) {
 384       // Segment already uncommitted
 385       continue;
 386     }
 387 
 388     // Uncommit segment
 389     const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
 390     if (!pmem.uncommit_segment(i, uncommitted)) {
 391       // Failed or partially failed
 392       return false;
 393     }
 394   }
 395 
 396   // Success
 397   return true;
 398 }
 399 
 400 void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
 401   const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
 402   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 403 }
 404 
 405 void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const {
 406   size_t size = 0;
 407 
 408   // Map segments
 409   for (uint32_t i = 0; i < pmem.nsegments(); i++) {
 410     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 411     _backing.map(addr + size, segment.size(), segment.start());
 412     size += segment.size();
 413   }
 414 
 415   // Setup NUMA interleaving for large pages
 416   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 417     // To get granule-level NUMA interleaving when using large pages,
 418     // we simply let the kernel interleave the memory for us at page
 419     // fault time.
 420     os::numa_make_global((char*)addr, size);
 421   }
 422 }
 423 
 424 void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const {
 425   _backing.unmap(addr, size);
 426 }
 427 
 428 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 429   if (ZVerifyViews) {
 430     // Pre-touch good view
 431     pretouch_view(ZAddress::good(offset), size);
 432   } else {
 433     // Pre-touch all views
 434     pretouch_view(ZAddress::marked0(offset), size);
 435     pretouch_view(ZAddress::marked1(offset), size);
 436     pretouch_view(ZAddress::remapped(offset), size);
 437   }
 438 }
 439 
 440 void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
 441   const size_t size = pmem.size();
 442 
 443   if (ZVerifyViews) {
 444     // Map good view
 445     map_view(ZAddress::good(offset), pmem);
 446   } else {
 447     // Map all views
 448     map_view(ZAddress::marked0(offset), pmem);
 449     map_view(ZAddress::marked1(offset), pmem);
 450     map_view(ZAddress::remapped(offset), pmem);
 451   }
 452 
 453   nmt_commit(offset, size);
 454 }
 455 
 456 void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
 457   nmt_uncommit(offset, size);
 458 
 459   if (ZVerifyViews) {
 460     // Unmap good view
 461     unmap_view(ZAddress::good(offset), size);
 462   } else {
 463     // Unmap all views
 464     unmap_view(ZAddress::marked0(offset), size);
 465     unmap_view(ZAddress::marked1(offset), size);
 466     unmap_view(ZAddress::remapped(offset), size);
 467   }
 468 }
 469 
 470 void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
 471   // Map good view
 472   assert(ZVerifyViews, "Should be enabled");
 473   map_view(ZAddress::good(offset), pmem);
 474 }
 475 
 476 void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const {
 477   // Unmap good view
 478   assert(ZVerifyViews, "Should be enabled");
 479   unmap_view(ZAddress::good(offset), size);
 480 }