1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zCollectedHeap.hpp"
  28 #include "gc/z/zFuture.inline.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zLock.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageAllocator.hpp"
  33 #include "gc/z/zPageCache.inline.hpp"
  34 #include "gc/z/zSafeDelete.inline.hpp"
  35 #include "gc/z/zStat.hpp"
  36 #include "gc/z/zTracer.inline.hpp"
  37 #include "runtime/globals.hpp"
  38 #include "runtime/init.hpp"
  39 #include "runtime/java.hpp"
  40 #include "utilities/debug.hpp"
  41 
  42 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  43 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
  44 static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
  45 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  46 
  47 class ZPageAllocRequest : public StackObj {
  48   friend class ZList<ZPageAllocRequest>;
  49 
  50 private:
  51   const uint8_t                _type;
  52   const size_t                 _size;
  53   const ZAllocationFlags       _flags;
  54   const unsigned int           _total_collections;
  55   ZListNode<ZPageAllocRequest> _node;
  56   ZFuture<ZPage*>              _result;
  57 
  58 public:
  59   ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
  60       _type(type),
  61       _size(size),
  62       _flags(flags),
  63       _total_collections(total_collections) {}
  64 
  65   uint8_t type() const {
  66     return _type;
  67   }
  68 
  69   size_t size() const {
  70     return _size;
  71   }
  72 
  73   ZAllocationFlags flags() const {
  74     return _flags;
  75   }
  76 
  77   unsigned int total_collections() const {
  78     return _total_collections;
  79   }
  80 
  81   ZPage* wait() {
  82     return _result.get();
  83   }
  84 
  85   void satisfy(ZPage* page) {
  86     _result.set(page);
  87   }
  88 };
  89 
  90 ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
  91 
  92 ZPageAllocator::ZPageAllocator(size_t min_capacity,
  93                                size_t initial_capacity,
  94                                size_t max_capacity,
  95                                size_t max_reserve) :
  96     _lock(),
  97     _virtual(),
  98     _physical(),
  99     _cache(),
 100     _min_capacity(min_capacity),
 101     _max_capacity(max_capacity),
 102     _max_reserve(max_reserve),
 103     _current_max_capacity(max_capacity),
 104     _capacity(0),
 105     _used_high(0),
 106     _used_low(0),
 107     _used(0),
 108     _allocated(0),
 109     _reclaimed(0),
 110     _queue(),
 111     _safe_delete(),
 112     _uncommit(false),
 113     _initialized(false) {
 114 
 115   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
 116     return;
 117   }
 118 
 119   log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
 120   log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
 121   log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
 122   log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
 123   log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 124 
 125   // Warn if system limits could stop us from reaching max capacity
 126   _physical.warn_commit_limits(max_capacity);
 127 
 128   // Commit initial capacity
 129   _capacity = _physical.commit(initial_capacity);
 130   if (_capacity != initial_capacity) {
 131     log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
 132     return;
 133   }
 134 
 135   // If uncommit is not explicitly disabled, max capacity is greater than
 136   // min capacity, and uncommit is supported by the platform, then we will
 137   // try to uncommit unused memory.
 138   _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit();
 139   if (_uncommit) {
 140     log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 141   } else {
 142     log_info(gc, init)("Uncommit: Disabled");
 143   }
 144 
 145   // Pre-map initial capacity
 146   prime_cache(initial_capacity);
 147 
 148   // Successfully initialized
 149   _initialized = true;
 150 }
 151 
 152 void ZPageAllocator::prime_cache(size_t size) {
 153   // Allocate physical memory
 154   const ZPhysicalMemory pmem = _physical.alloc(size);
 155   guarantee(!pmem.is_null(), "Invalid size");
 156 
 157   // Allocate virtual memory
 158   const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */);
 159   guarantee(!vmem.is_null(), "Invalid size");
 160 
 161   // Allocate page
 162   ZPage* const page = new ZPage(vmem, pmem);
 163 
 164   // Map page
 165   map_page(page);
 166   page->set_pre_mapped();
 167 
 168   // Add page to cache
 169   page->set_last_used();
 170   _cache.free_page(page);
 171 }
 172 
 173 bool ZPageAllocator::is_initialized() const {
 174   return _initialized;
 175 }
 176 
 177 size_t ZPageAllocator::min_capacity() const {
 178   return _min_capacity;
 179 }
 180 
 181 size_t ZPageAllocator::max_capacity() const {
 182   return _max_capacity;
 183 }
 184 
 185 size_t ZPageAllocator::soft_max_capacity() const {
 186   // Note that SoftMaxHeapSize is a manageable flag
 187   return MIN2(SoftMaxHeapSize, _current_max_capacity);
 188 }
 189 
 190 size_t ZPageAllocator::capacity() const {
 191   return _capacity;
 192 }
 193 
 194 size_t ZPageAllocator::max_reserve() const {
 195   return _max_reserve;
 196 }
 197 
 198 size_t ZPageAllocator::used_high() const {
 199   return _used_high;
 200 }
 201 
 202 size_t ZPageAllocator::used_low() const {
 203   return _used_low;
 204 }
 205 
 206 size_t ZPageAllocator::used() const {
 207   return _used;
 208 }
 209 
 210 size_t ZPageAllocator::unused() const {
 211   const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve;
 212   return unused > 0 ? (size_t)unused : 0;
 213 }
 214 
 215 size_t ZPageAllocator::allocated() const {
 216   return _allocated;
 217 }
 218 
 219 size_t ZPageAllocator::reclaimed() const {
 220   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 221 }
 222 
 223 void ZPageAllocator::reset_statistics() {
 224   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 225   _allocated = 0;
 226   _reclaimed = 0;
 227   _used_high = _used_low = _used;
 228 }
 229 
 230 void ZPageAllocator::increase_used(size_t size, bool relocation) {
 231   if (relocation) {
 232     // Allocating a page for the purpose of relocation has a
 233     // negative contribution to the number of reclaimed bytes.
 234     _reclaimed -= size;
 235   }
 236   _allocated += size;
 237   _used += size;
 238   if (_used > _used_high) {
 239     _used_high = _used;
 240   }
 241 }
 242 
 243 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
 244   if (reclaimed) {
 245     // Only pages explicitly released with the reclaimed flag set
 246     // counts as reclaimed bytes. This flag is typically true when
 247     // a worker releases a page after relocation, and is typically
 248     // false when we release a page to undo an allocation.
 249     _reclaimed += size;
 250   }
 251   _used -= size;
 252   if (_used < _used_low) {
 253     _used_low = _used;
 254   }
 255 }
 256 
 257 ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
 258   // Allocate virtual memory
 259   const ZVirtualMemory vmem = _virtual.alloc(size);
 260   if (vmem.is_null()) {
 261     // Out of address space
 262     return NULL;
 263   }
 264 
 265   // Allocate physical memory
 266   const ZPhysicalMemory pmem = _physical.alloc(size);
 267   assert(!pmem.is_null(), "Invalid size");
 268 
 269   // Allocate page
 270   return new ZPage(type, vmem, pmem);
 271 }
 272 
 273 void ZPageAllocator::destroy_page(ZPage* page) {
 274   const ZVirtualMemory& vmem = page->virtual_memory();
 275   const ZPhysicalMemory& pmem = page->physical_memory();
 276 
 277   // Unmap memory
 278   _physical.unmap(pmem, vmem.start());
 279 
 280   // Free physical memory
 281   _physical.free(pmem);
 282 
 283   // Free virtual memory
 284   _virtual.free(vmem);
 285 
 286   // Delete page safely
 287   _safe_delete(page);
 288 }
 289 
 290 void ZPageAllocator::map_page(const ZPage* page) const {
 291   // Map physical memory
 292   if (!page->is_mapped()) {
 293     _physical.map(page->physical_memory(), page->start());
 294   } else if (ZVerifyViews) {
 295     _physical.debug_map(page->physical_memory(), page->start());
 296   }
 297 }
 298 
 299 size_t ZPageAllocator::max_available(bool no_reserve) const {
 300   size_t available = _current_max_capacity - _used;
 301 
 302   if (no_reserve) {
 303     // The reserve should not be considered available
 304     available -= MIN2(available, _max_reserve);
 305   }
 306 
 307   return available;
 308 }
 309 
 310 bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) {
 311   if (max_available(no_reserve) < size) {
 312     // Not enough free memory
 313     return false;
 314   }
 315 
 316   // We add the max_reserve to the requested size to avoid losing
 317   // the reserve because of failure to increase capacity before
 318   // reaching max capacity.
 319   size += _max_reserve;
 320 
 321   // Don't try to increase capacity if enough unused capacity
 322   // is available or if current max capacity has been reached.
 323   const size_t available = _capacity - _used;
 324   if (available < size && _capacity < _current_max_capacity) {
 325     // Try to increase capacity
 326     const size_t commit = MIN2(size - available, _current_max_capacity - _capacity);
 327     const size_t committed = _physical.commit(commit);
 328     _capacity += committed;
 329 
 330     log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, "
 331                         "Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, "
 332                         "Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M",
 333                         size / M, no_reserve ? "True" : "False", available / M,
 334                         commit / M, committed / M, _capacity / M);
 335 
 336     if (committed != commit) {
 337       // Failed, or partly failed, to increase capacity. Adjust current
 338       // max capacity to avoid further attempts to increase capacity.
 339       log_error(gc)("Forced to lower max Java heap size from "
 340                     SIZE_FORMAT "M(%.0lf%%) to " SIZE_FORMAT "M(%.0lf%%)",
 341                     _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
 342                     _capacity / M, percent_of(_capacity, _max_capacity));
 343 
 344       _current_max_capacity = _capacity;
 345     }
 346   }
 347 
 348   if (!no_reserve) {
 349     size -= _max_reserve;
 350   }
 351 
 352   const size_t new_available = _capacity - _used;
 353   return new_available >= size;
 354 }
 355 
 356 void ZPageAllocator::ensure_uncached_available(size_t size) {
 357   assert(_capacity - _used >= size, "Invalid size");
 358   const size_t uncached_available = _capacity - _used - _cache.available();
 359   if (size > uncached_available) {
 360     flush_cache_for_allocation(size - uncached_available);
 361   }
 362 }
 363 
 364 ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) {
 365   if (!ensure_available(size, no_reserve)) {
 366     // Not enough free memory
 367     return NULL;
 368   }
 369 
 370   // Try allocate page from the cache
 371   ZPage* const page = _cache.alloc_page(type, size);
 372   if (page != NULL) {
 373     return page;
 374   }
 375 
 376   // Try flush pages from the cache
 377   ensure_uncached_available(size);
 378 
 379   // Create new page
 380   return create_page(type, size);
 381 }
 382 
 383 ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
 384   ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve());
 385   if (page == NULL) {
 386     // Out of memory
 387     return NULL;
 388   }
 389 
 390   // Update used statistics
 391   increase_used(size, flags.relocation());
 392 
 393   // Send trace event
 394   ZTracer::tracer()->report_page_alloc(size, _used, max_available(flags.no_reserve()), _cache.available(), flags);
 395 
 396   return page;
 397 }
 398 
 399 void ZPageAllocator::check_out_of_memory_during_initialization() {
 400   if (!is_init_completed()) {
 401     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 402   }
 403 }
 404 
 405 ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 406   // Prepare to block
 407   ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
 408 
 409   _lock.lock();
 410 
 411   // Try non-blocking allocation
 412   ZPage* page = alloc_page_common(type, size, flags);
 413   if (page == NULL) {
 414     // Allocation failed, enqueue request
 415     _queue.insert_last(&request);
 416   }
 417 
 418   _lock.unlock();
 419 
 420   if (page == NULL) {
 421     // Allocation failed
 422     ZStatTimer timer(ZCriticalPhaseAllocationStall);
 423 
 424     // We can only block if VM is fully initialized
 425     check_out_of_memory_during_initialization();
 426 
 427     do {
 428       // Start asynchronous GC
 429       ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 430 
 431       // Wait for allocation to complete or fail
 432       page = request.wait();
 433     } while (page == gc_marker);
 434 
 435     {
 436       // Guard deletion of underlying semaphore. This is a workaround for a
 437       // bug in sem_post() in glibc < 2.21, where it's not safe to destroy
 438       // the semaphore immediately after returning from sem_wait(). The
 439       // reason is that sem_post() can touch the semaphore after a waiting
 440       // thread have returned from sem_wait(). To avoid this race we are
 441       // forcing the waiting thread to acquire/release the lock held by the
 442       // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
 443       ZLocker<ZLock> locker(&_lock);
 444     }
 445   }
 446 
 447   return page;
 448 }
 449 
 450 ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 451   ZLocker<ZLock> locker(&_lock);
 452   return alloc_page_common(type, size, flags);
 453 }
 454 
 455 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 456   ZPage* const page = flags.non_blocking()
 457                       ? alloc_page_nonblocking(type, size, flags)
 458                       : alloc_page_blocking(type, size, flags);
 459   if (page == NULL) {
 460     // Out of memory
 461     return NULL;
 462   }
 463 
 464   // Map page if needed
 465   map_page(page);
 466 
 467   // Reset page. This updates the page's sequence number and must
 468   // be done after page allocation, which potentially blocked in
 469   // a safepoint where the global sequence number was updated.
 470   page->reset();
 471 
 472   // Update allocation statistics. Exclude worker threads to avoid
 473   // artificial inflation of the allocation rate due to relocation.
 474   if (!flags.worker_thread()) {
 475     // Note that there are two allocation rate counters, which have
 476     // different purposes and are sampled at different frequencies.
 477     const size_t bytes = page->size();
 478     ZStatInc(ZCounterAllocationRate, bytes);
 479     ZStatInc(ZStatAllocRate::counter(), bytes);
 480   }
 481 
 482   return page;
 483 }
 484 
 485 void ZPageAllocator::satisfy_alloc_queue() {
 486   for (;;) {
 487     ZPageAllocRequest* const request = _queue.first();
 488     if (request == NULL) {
 489       // Allocation queue is empty
 490       return;
 491     }
 492 
 493     ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
 494     if (page == NULL) {
 495       // Allocation could not be satisfied, give up
 496       return;
 497     }
 498 
 499     // Allocation succeeded, dequeue and satisfy request. Note that
 500     // the dequeue operation must happen first, since the request
 501     // will immediately be deallocated once it has been satisfied.
 502     _queue.remove(request);
 503     request->satisfy(page);
 504   }
 505 }
 506 
 507 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 508   if (reclaimed) {
 509     // Clear memory
 510     page->clear();
 511   }
 512 
 513   ZLocker<ZLock> locker(&_lock);
 514 
 515   // Update used statistics
 516   decrease_used(page->size(), reclaimed);
 517 
 518   // Set time when last used
 519   page->set_last_used();
 520 
 521   // Cache page
 522   _cache.free_page(page);
 523 
 524   // Try satisfy blocked allocations
 525   satisfy_alloc_queue();
 526 }
 527 
 528 size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl) {
 529   ZList<ZPage> list;
 530 
 531   // Flush pages
 532   _cache.flush(cl, &list);
 533 
 534   const size_t overflushed = cl->overflushed();
 535   if (overflushed > 0) {
 536     // Overflushed, keep part of last page
 537     ZPage* const page = list.last()->split(overflushed);
 538     _cache.free_page(page);
 539   }
 540 
 541   // Destroy pages
 542   size_t flushed = 0;
 543   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 544     flushed += page->size();
 545     destroy_page(page);
 546   }
 547 
 548   return flushed;
 549 }
 550 
 551 class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
 552 public:
 553   ZPageCacheFlushForAllocationClosure(size_t requested) :
 554       ZPageCacheFlushClosure(requested) {}
 555 
 556   virtual bool do_page(const ZPage* page) {
 557     if (_flushed < _requested) {
 558       // Flush page
 559       _flushed += page->size();
 560       return true;
 561     }
 562 
 563     // Don't flush page
 564     return false;
 565   }
 566 };
 567 
 568 void ZPageAllocator::flush_cache_for_allocation(size_t requested) {
 569   assert(requested <= _cache.available(), "Invalid request");
 570 
 571   // Flush pages
 572   ZPageCacheFlushForAllocationClosure cl(requested);
 573   const size_t flushed = flush_cache(&cl);
 574 
 575   assert(requested == flushed, "Failed to flush");
 576 
 577   const size_t cached_after = _cache.available();
 578   const size_t cached_before = cached_after + flushed;
 579 
 580   log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0lf%%)->" SIZE_FORMAT "M(%.0lf%%), "
 581                      "Flushed: " SIZE_FORMAT "M",
 582                      cached_before / M, percent_of(cached_before, max_capacity()),
 583                      cached_after / M, percent_of(cached_after, max_capacity()),
 584                      flushed / M);
 585 
 586   // Update statistics
 587   ZStatInc(ZCounterPageCacheFlush, flushed);
 588 }
 589 
 590 class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
 591 private:
 592   const uint64_t _now;
 593   const uint64_t _delay;
 594   uint64_t       _timeout;
 595 
 596 public:
 597   ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) :
 598       ZPageCacheFlushClosure(requested),
 599       _now(os::elapsedTime()),
 600       _delay(delay),
 601       _timeout(_delay) {}
 602 
 603   virtual bool do_page(const ZPage* page) {
 604     const uint64_t expires = page->last_used() + _delay;
 605     const uint64_t timeout = expires - MIN2(expires, _now);
 606 
 607     if (_flushed < _requested && timeout == 0) {
 608       // Flush page
 609       _flushed += page->size();
 610       return true;
 611     }
 612 
 613     // Record shortest non-expired timeout
 614     _timeout = MIN2(_timeout, timeout);
 615 
 616     // Don't flush page
 617     return false;
 618   }
 619 
 620   uint64_t timeout() const {
 621     return _timeout;
 622   }
 623 };
 624 
 625 uint64_t ZPageAllocator::uncommit(uint64_t delay) {
 626   // Set the default timeout, when no pages are found in the
 627   // cache or when uncommit is disabled, equal to the delay.
 628   uint64_t timeout = delay;
 629 
 630   if (!_uncommit) {
 631     // Disabled
 632     return timeout;
 633   }
 634 
 635   size_t capacity_before;
 636   size_t capacity_after;
 637   size_t uncommitted;
 638 
 639   {
 640     SuspendibleThreadSetJoiner joiner;
 641     ZLocker<ZLock> locker(&_lock);
 642 
 643     // Don't flush more than we will uncommit. Never uncommit
 644     // the reserve, and never uncommit below min capacity.
 645     const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity);
 646     const size_t guarded = MAX2(needed, _min_capacity);
 647     const size_t uncommittable = _capacity - guarded;
 648     const size_t uncached_available = _capacity - _used - _cache.available();
 649     size_t uncommit = MIN2(uncommittable, uncached_available);
 650     const size_t flush = uncommittable - uncommit;
 651 
 652     if (flush > 0) {
 653       // Flush pages to uncommit
 654       ZPageCacheFlushForUncommitClosure cl(flush, delay);
 655       uncommit += flush_cache(&cl);
 656       timeout = cl.timeout();
 657     }
 658 
 659     // Uncommit
 660     uncommitted = _physical.uncommit(uncommit);
 661     _capacity -= uncommitted;
 662 
 663     capacity_after = _capacity;
 664     capacity_before = capacity_after + uncommitted;
 665   }
 666 
 667   if (uncommitted > 0) {
 668     log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0lf%%)->" SIZE_FORMAT "M(%.0lf%%), "
 669                        "Uncommitted: " SIZE_FORMAT "M",
 670                        capacity_before / M, percent_of(capacity_before, max_capacity()),
 671                        capacity_after / M, percent_of(capacity_after, max_capacity()),
 672                        uncommitted / M);
 673 
 674     // Update statistics
 675     ZStatInc(ZCounterUncommit, uncommitted);
 676   }
 677 
 678   return timeout;
 679 }
 680 
 681 void ZPageAllocator::enable_deferred_delete() const {
 682   _safe_delete.enable_deferred_delete();
 683 }
 684 
 685 void ZPageAllocator::disable_deferred_delete() const {
 686   _safe_delete.disable_deferred_delete();
 687 }
 688 
 689 void ZPageAllocator::debug_map_page(const ZPage* page) const {
 690   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 691   _physical.debug_map(page->physical_memory(), page->start());
 692 }
 693 
 694 class ZPageCacheDebugMapClosure : public StackObj {
 695 private:
 696   const ZPageAllocator* const _allocator;
 697 
 698 public:
 699   ZPageCacheDebugMapClosure(const ZPageAllocator* allocator) :
 700       _allocator(allocator) {}
 701 
 702   virtual void do_page(const ZPage* page) {
 703     _allocator->debug_map_page(page);
 704   }
 705 };
 706 
 707 void ZPageAllocator::debug_map_cached_pages() const {
 708   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 709   ZPageCacheDebugMapClosure cl(this);
 710   _cache.pages_do(&cl);
 711 }
 712 
 713 void ZPageAllocator::debug_unmap_all_pages() const {
 714   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 715   _physical.debug_unmap(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax), 0 /* offset */);
 716 }
 717 
 718 bool ZPageAllocator::is_alloc_stalled() const {
 719   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 720   return !_queue.is_empty();
 721 }
 722 
 723 void ZPageAllocator::check_out_of_memory() {
 724   ZLocker<ZLock> locker(&_lock);
 725 
 726   // Fail allocation requests that were enqueued before the
 727   // last GC cycle started, otherwise start a new GC cycle.
 728   for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
 729     if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
 730       // Start a new GC cycle, keep allocation requests enqueued
 731       request->satisfy(gc_marker);
 732       return;
 733     }
 734 
 735     // Out of memory, fail allocation request
 736     _queue.remove_first();
 737     request->satisfy(NULL);
 738   }
 739 }