1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcLogPrecious.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zArray.inline.hpp"
  28 #include "gc/z/zGlobals.hpp"
  29 #include "gc/z/zLargePages.inline.hpp"
  30 #include "gc/z/zNUMA.inline.hpp"
  31 #include "gc/z/zPhysicalMemory.inline.hpp"
  32 #include "logging/log.hpp"
  33 #include "runtime/globals.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/init.hpp"
  36 #include "runtime/os.hpp"
  37 #include "services/memTracker.hpp"
  38 #include "utilities/align.hpp"
  39 #include "utilities/debug.hpp"
  40 #include "utilities/globalDefinitions.hpp"
  41 #include "utilities/powerOfTwo.hpp"
  42 
  43 ZPhysicalMemory::ZPhysicalMemory() :
  44     _segments() {}
  45 
  46 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) :
  47     _segments() {
  48   add_segment(segment);
  49 }
  50 
  51 ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) :
  52     _segments() {
  53   add_segments(pmem);
  54 }
  55 
  56 const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
  57   // Free segments
  58   _segments.clear_and_deallocate();
  59 
  60   // Copy segments
  61   add_segments(pmem);
  62 
  63   return *this;
  64 }
  65 
  66 size_t ZPhysicalMemory::size() const {
  67   size_t size = 0;
  68 
  69   for (int i = 0; i < _segments.length(); i++) {
  70     size += _segments.at(i).size();
  71   }
  72 
  73   return size;
  74 }
  75 
  76 void ZPhysicalMemory::insert_segment(int index, uintptr_t start, size_t size, bool committed) {
  77   _segments.insert_before(index, ZPhysicalMemorySegment(start, size, committed));
  78 }
  79 
  80 void ZPhysicalMemory::replace_segment(int index, uintptr_t start, size_t size, bool committed) {
  81   _segments.at_put(index, ZPhysicalMemorySegment(start, size, committed));
  82 }
  83 
  84 void ZPhysicalMemory::remove_segment(int index) {
  85   _segments.remove_at(index);
  86 }
  87 
  88 void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
  89   for (int i = 0; i < pmem.nsegments(); i++) {
  90     add_segment(pmem.segment(i));
  91   }
  92 }
  93 
  94 void ZPhysicalMemory::remove_segments() {
  95   _segments.clear_and_deallocate();
  96 }
  97 
  98 static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
  99   return before.end() == after.start() && before.is_committed() == after.is_committed();
 100 }
 101 
 102 void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
 103   // Insert segments in address order, merge segments when possible
 104   for (int i = _segments.length(); i > 0; i--) {
 105     const int current = i - 1;
 106 
 107     if (_segments.at(current).end() <= segment.start()) {
 108       if (is_mergable(_segments.at(current), segment)) {
 109         if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
 110           // Merge with end of current segment and start of next segment
 111           const size_t start = _segments.at(current).start();
 112           const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size();
 113           replace_segment(current, start, size, segment.is_committed());
 114           remove_segment(current + 1);
 115           return;
 116         }
 117 
 118         // Merge with end of current segment
 119         const size_t start = _segments.at(current).start();
 120         const size_t size = _segments.at(current).size() + segment.size();
 121         replace_segment(current, start, size, segment.is_committed());
 122         return;
 123       } else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
 124         // Merge with start of next segment
 125         const size_t start = segment.start();
 126         const size_t size = segment.size() + _segments.at(current + 1).size();
 127         replace_segment(current + 1, start, size, segment.is_committed());
 128         return;
 129       }
 130 
 131       // Insert after current segment
 132       insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
 133       return;
 134     }
 135   }
 136 
 137   if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) {
 138     // Merge with start of first segment
 139     const size_t start = segment.start();
 140     const size_t size = segment.size() + _segments.at(0).size();
 141     replace_segment(0, start, size, segment.is_committed());
 142     return;
 143   }
 144 
 145   // Insert before first segment
 146   insert_segment(0, segment.start(), segment.size(), segment.is_committed());
 147 }
 148 
 149 bool ZPhysicalMemory::commit_segment(int index, size_t size) {
 150   assert(size <= _segments.at(index).size(), "Invalid size");
 151   assert(!_segments.at(index).is_committed(), "Invalid state");
 152 
 153   if (size == _segments.at(index).size()) {



 154     // Completely committed
 155     _segments.at(index).set_committed(true);
 156     return true;
 157   }
 158 
 159   if (size > 0) {
 160     // Partially committed, split segment
 161     insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, false /* committed */);
 162     replace_segment(index, _segments.at(index).start(), size, true /* committed */);
 163   }
 164 
 165   return false;
 166 }
 167 
 168 bool ZPhysicalMemory::uncommit_segment(int index, size_t size) {
 169   assert(size <= _segments.at(index).size(), "Invalid size");
 170   assert(_segments.at(index).is_committed(), "Invalid state");


 171 
 172   if (size == _segments.at(index).size()) {
 173     // Completely uncommitted
 174     _segments.at(index).set_committed(false);
 175     return true;
 176   }
 177 
 178   if (size > 0) {
 179     // Partially uncommitted, split segment
 180     insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, true /* committed */);
 181     replace_segment(index, _segments.at(index).start(), size, false /* committed */);
 182   }
 183 
 184   return false;
 185 }
 186 
 187 ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
 188   ZPhysicalMemory pmem;
 189   int nsegments = 0;
 190 
 191   for (int i = 0; i < _segments.length(); i++) {
 192     const ZPhysicalMemorySegment& segment = _segments.at(i);
 193     if (pmem.size() < size) {
 194       if (pmem.size() + segment.size() <= size) {
 195         // Transfer segment
 196         pmem.add_segment(segment);
 197       } else {
 198         // Split segment
 199         const size_t split_size = size - pmem.size();
 200         pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
 201         _segments.at_put(nsegments++, ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed()));
 202       }
 203     } else {
 204       // Keep segment
 205       _segments.at_put(nsegments++, segment);
 206     }
 207   }
 208 
 209   _segments.trunc_to(nsegments);
 210 
 211   return pmem;
 212 }
 213 
 214 ZPhysicalMemory ZPhysicalMemory::split_committed() {
 215   ZPhysicalMemory pmem;
 216   int nsegments = 0;
 217 
 218   for (int i = 0; i < _segments.length(); i++) {
 219     const ZPhysicalMemorySegment& segment = _segments.at(i);
 220     if (segment.is_committed()) {
 221       // Transfer segment
 222       pmem.add_segment(segment);
 223     } else {
 224       // Keep segment
 225       _segments.at_put(nsegments++, segment);
 226     }
 227   }
 228 
 229   _segments.trunc_to(nsegments);
 230 
 231   return pmem;
 232 }
 233 
 234 ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) :
 235     _backing(max_capacity) {
 236   // Make the whole range free
 237   _manager.free(0, max_capacity);
 238 }
 239 
 240 bool ZPhysicalMemoryManager::is_initialized() const {
 241   return _backing.is_initialized();
 242 }
 243 
 244 void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
 245   _backing.warn_commit_limits(max_capacity);
 246 }
 247 
 248 void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
 249   assert(!is_init_completed(), "Invalid state");
 250 
 251   // If uncommit is not explicitly disabled, max capacity is greater than
 252   // min capacity, and uncommit is supported by the platform, then uncommit
 253   // will be enabled.
 254   if (!ZUncommit) {
 255     log_info_p(gc, init)("Uncommit: Disabled");
 256     return;
 257   }
 258 
 259   if (max_capacity == min_capacity) {
 260     log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
 261     FLAG_SET_ERGO(ZUncommit, false);
 262     return;
 263   }
 264 
 265   // Test if uncommit is supported by the operating system by committing
 266   // and then uncommitting a granule.
 267   ZPhysicalMemory pmem(ZPhysicalMemorySegment(0, ZGranuleSize, false /* committed */));
 268   if (!commit(pmem) || !uncommit(pmem)) {
 269     log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
 270     FLAG_SET_ERGO(ZUncommit, false);
 271     return;
 272   }
 273 
 274   log_info_p(gc, init)("Uncommit: Enabled");
 275   log_info_p(gc, init)("Uncommit Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 276 }
 277 
 278 void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
 279   // From an NMT point of view we treat the first heap view (marked0) as committed
 280   const uintptr_t addr = ZAddress::marked0(offset);
 281   MemTracker::record_virtual_memory_commit((void*)addr, size, CALLER_PC);
 282 }
 283 
 284 void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
 285   if (MemTracker::tracking_level() > NMT_minimal) {
 286     const uintptr_t addr = ZAddress::marked0(offset);
 287     Tracker tracker(Tracker::uncommit);
 288     tracker.record((address)addr, size);
 289   }
 290 }
 291 
 292 void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
 293   assert(is_aligned(size, ZGranuleSize), "Invalid size");
 294 
 295   // Allocate segments
 296   while (size > 0) {
 297     size_t allocated = 0;
 298     const uintptr_t start = _manager.alloc_from_front_at_most(size, &allocated);
 299     assert(start != UINTPTR_MAX, "Allocation should never fail");
 300     pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
 301     size -= allocated;
 302   }
 303 }
 304 
 305 void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
 306   // Free segments
 307   for (int i = 0; i < pmem.nsegments(); i++) {
 308     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 309     _manager.free(segment.start(), segment.size());
 310   }
 311 }
 312 
 313 bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
 314   // Commit segments
 315   for (int i = 0; i < pmem.nsegments(); i++) {
 316     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 317     if (segment.is_committed()) {
 318       // Segment already committed
 319       continue;
 320     }
 321 
 322     // Commit segment
 323     const size_t committed = _backing.commit(segment.start(), segment.size());
 324     if (!pmem.commit_segment(i, committed)) {
 325       // Failed or partially failed
 326       return false;
 327     }
 328   }
 329 
 330   // Success
 331   return true;
 332 }
 333 
 334 bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
 335   // Commit segments
 336   for (int i = 0; i < pmem.nsegments(); i++) {
 337     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 338     if (!segment.is_committed()) {
 339       // Segment already uncommitted
 340       continue;
 341     }
 342 
 343     // Uncommit segment
 344     const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
 345     if (!pmem.uncommit_segment(i, uncommitted)) {
 346       // Failed or partially failed
 347       return false;
 348     }
 349   }
 350 
 351   // Success
 352   return true;
 353 }
 354 
 355 void ZPhysicalMemoryManager::pretouch_view(uintptr_t addr, size_t size) const {
 356   const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
 357   os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
 358 }
 359 
 360 void ZPhysicalMemoryManager::map_view(uintptr_t addr, const ZPhysicalMemory& pmem) const {
 361   size_t size = 0;
 362 
 363   // Map segments
 364   for (int i = 0; i < pmem.nsegments(); i++) {
 365     const ZPhysicalMemorySegment& segment = pmem.segment(i);
 366     _backing.map(addr + size, segment.size(), segment.start());
 367     size += segment.size();
 368   }
 369 
 370   // Setup NUMA interleaving for large pages
 371   if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
 372     // To get granule-level NUMA interleaving when using large pages,
 373     // we simply let the kernel interleave the memory for us at page
 374     // fault time.
 375     os::numa_make_global((char*)addr, size);
 376   }
 377 }
 378 
 379 void ZPhysicalMemoryManager::unmap_view(uintptr_t addr, size_t size) const {
 380   _backing.unmap(addr, size);
 381 }
 382 
 383 void ZPhysicalMemoryManager::pretouch(uintptr_t offset, size_t size) const {
 384   if (ZVerifyViews) {
 385     // Pre-touch good view
 386     pretouch_view(ZAddress::good(offset), size);
 387   } else {
 388     // Pre-touch all views
 389     pretouch_view(ZAddress::marked0(offset), size);
 390     pretouch_view(ZAddress::marked1(offset), size);
 391     pretouch_view(ZAddress::remapped(offset), size);
 392   }
 393 }
 394 
 395 void ZPhysicalMemoryManager::map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
 396   const size_t size = pmem.size();
 397 
 398   if (ZVerifyViews) {
 399     // Map good view
 400     map_view(ZAddress::good(offset), pmem);
 401   } else {
 402     // Map all views
 403     map_view(ZAddress::marked0(offset), pmem);
 404     map_view(ZAddress::marked1(offset), pmem);
 405     map_view(ZAddress::remapped(offset), pmem);
 406   }
 407 
 408   nmt_commit(offset, size);
 409 }
 410 
 411 void ZPhysicalMemoryManager::unmap(uintptr_t offset, size_t size) const {
 412   nmt_uncommit(offset, size);
 413 
 414   if (ZVerifyViews) {
 415     // Unmap good view
 416     unmap_view(ZAddress::good(offset), size);
 417   } else {
 418     // Unmap all views
 419     unmap_view(ZAddress::marked0(offset), size);
 420     unmap_view(ZAddress::marked1(offset), size);
 421     unmap_view(ZAddress::remapped(offset), size);
 422   }
 423 }
 424 
 425 void ZPhysicalMemoryManager::debug_map(uintptr_t offset, const ZPhysicalMemory& pmem) const {
 426   // Map good view
 427   assert(ZVerifyViews, "Should be enabled");
 428   map_view(ZAddress::good(offset), pmem);
 429 }
 430 
 431 void ZPhysicalMemoryManager::debug_unmap(uintptr_t offset, size_t size) const {
 432   // Unmap good view
 433   assert(ZVerifyViews, "Should be enabled");
 434   unmap_view(ZAddress::good(offset), size);
 435 }
--- EOF ---