1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zCollectedHeap.hpp"
  28 #include "gc/z/zFuture.inline.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zLock.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageAllocator.hpp"
  33 #include "gc/z/zPageCache.inline.hpp"
  34 #include "gc/z/zSafeDelete.inline.hpp"
  35 #include "gc/z/zStat.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zTracer.inline.hpp"
  38 #include "gc/z/zWorkers.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "utilities/debug.hpp"
  44 
  45 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  46 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
  47 static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
  48 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  49 
  50 class ZPageAllocRequest : public StackObj {
  51   friend class ZList<ZPageAllocRequest>;
  52 
  53 private:
  54   const uint8_t                _type;
  55   const size_t                 _size;
  56   const ZAllocationFlags       _flags;
  57   const unsigned int           _total_collections;
  58   ZListNode<ZPageAllocRequest> _node;
  59   ZFuture<ZPage*>              _result;
  60 
  61 public:
  62   ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
  63       _type(type),
  64       _size(size),
  65       _flags(flags),
  66       _total_collections(total_collections),
  67       _node(),
  68       _result() {}
  69 
  70   uint8_t type() const {
  71     return _type;
  72   }
  73 
  74   size_t size() const {
  75     return _size;
  76   }
  77 
  78   ZAllocationFlags flags() const {
  79     return _flags;
  80   }
  81 
  82   unsigned int total_collections() const {
  83     return _total_collections;
  84   }
  85 
  86   ZPage* peek() {
  87     return _result.peek();
  88   }
  89 
  90   ZPage* wait() {
  91     return _result.get();
  92   }
  93 
  94   void satisfy(ZPage* page) {
  95     _result.set(page);
  96   }
  97 };
  98 
  99 ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
 100 
 101 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
 102                                size_t min_capacity,
 103                                size_t initial_capacity,
 104                                size_t max_capacity,
 105                                size_t max_reserve) :
 106     _lock(),
 107     _virtual(max_capacity),
 108     _physical(max_capacity),
 109     _cache(),
 110     _min_capacity(min_capacity),
 111     _max_capacity(max_capacity),
 112     _max_reserve(max_reserve),
 113     _current_max_capacity(max_capacity),
 114     _capacity(0),
 115     _used_high(0),
 116     _used_low(0),
 117     _used(0),
 118     _allocated(0),
 119     _reclaimed(0),
 120     _queue(),
 121     _satisfied(),
 122     _safe_delete(),
 123     _uncommit(false),
 124     _initialized(false) {
 125 
 126   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
 127     return;
 128   }
 129 
 130   log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
 131   log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
 132   log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
 133   log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
 134   log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 135 
 136   // Warn if system limits could stop us from reaching max capacity
 137   _physical.warn_commit_limits(max_capacity);
 138 
 139   // Commit initial capacity
 140   _capacity = _physical.commit(initial_capacity);
 141   if (_capacity != initial_capacity) {
 142     log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
 143     return;
 144   }
 145 
 146   // If uncommit is not explicitly disabled, max capacity is greater than
 147   // min capacity, and uncommit is supported by the platform, then we will
 148   // try to uncommit unused memory.
 149   _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit();
 150   if (_uncommit) {
 151     log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 152   } else {
 153     log_info(gc, init)("Uncommit: Disabled");
 154   }
 155 
 156   // Pre-map initial capacity
 157   prime_cache(workers, initial_capacity);
 158 
 159   // Successfully initialized
 160   _initialized = true;
 161 }
 162 
 163 class ZPreTouchTask : public ZTask {
 164 private:
 165   const ZPhysicalMemoryManager* const _physical;
 166   volatile uintptr_t                  _start;
 167   const uintptr_t                     _end;
 168 
 169 public:
 170   ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) :
 171       ZTask("ZPreTouchTask"),
 172       _physical(physical),
 173       _start(start),
 174       _end(end) {}
 175 
 176   virtual void work() {
 177     for (;;) {
 178       // Get granule offset
 179       const size_t size = ZGranuleSize;
 180       const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
 181       if (offset >= _end) {
 182         // Done
 183         break;
 184       }
 185 
 186       // Pre-touch granule
 187       _physical->pretouch(offset, size);
 188     }
 189   }
 190 };
 191 
 192 void ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
 193   // Allocate physical memory
 194   const ZPhysicalMemory pmem = _physical.alloc(size);
 195   guarantee(!pmem.is_null(), "Invalid size");
 196 
 197   // Allocate virtual memory
 198   const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */);
 199   guarantee(!vmem.is_null(), "Invalid size");
 200 
 201   // Allocate page
 202   ZPage* const page = new ZPage(vmem, pmem);
 203 
 204   // Map page
 205   map_page(page);
 206   page->set_pre_mapped();
 207 
 208   if (AlwaysPreTouch) {
 209     // Pre-touch page
 210     ZPreTouchTask task(&_physical, page->start(), page->end());
 211     workers->run_parallel(&task);
 212   }
 213 
 214   // Add page to cache
 215   page->set_last_used();
 216   _cache.free_page(page);
 217 }
 218 
 219 bool ZPageAllocator::is_initialized() const {
 220   return _initialized;
 221 }
 222 
 223 size_t ZPageAllocator::min_capacity() const {
 224   return _min_capacity;
 225 }
 226 
 227 size_t ZPageAllocator::max_capacity() const {
 228   return _max_capacity;
 229 }
 230 
 231 size_t ZPageAllocator::soft_max_capacity() const {
 232   // Note that SoftMaxHeapSize is a manageable flag
 233   return MIN2(SoftMaxHeapSize, _current_max_capacity);
 234 }
 235 
 236 size_t ZPageAllocator::capacity() const {
 237   return _capacity;
 238 }
 239 
 240 size_t ZPageAllocator::max_reserve() const {
 241   return _max_reserve;
 242 }
 243 
 244 size_t ZPageAllocator::used_high() const {
 245   return _used_high;
 246 }
 247 
 248 size_t ZPageAllocator::used_low() const {
 249   return _used_low;
 250 }
 251 
 252 size_t ZPageAllocator::used() const {
 253   return _used;
 254 }
 255 
 256 size_t ZPageAllocator::unused() const {
 257   const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve;
 258   return unused > 0 ? (size_t)unused : 0;
 259 }
 260 
 261 size_t ZPageAllocator::allocated() const {
 262   return _allocated;
 263 }
 264 
 265 size_t ZPageAllocator::reclaimed() const {
 266   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 267 }
 268 
 269 void ZPageAllocator::reset_statistics() {
 270   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 271   _allocated = 0;
 272   _reclaimed = 0;
 273   _used_high = _used_low = _used;
 274 }
 275 
 276 void ZPageAllocator::increase_used(size_t size, bool relocation) {
 277   if (relocation) {
 278     // Allocating a page for the purpose of relocation has a
 279     // negative contribution to the number of reclaimed bytes.
 280     _reclaimed -= size;
 281   }
 282   _allocated += size;
 283   _used += size;
 284   if (_used > _used_high) {
 285     _used_high = _used;
 286   }
 287 }
 288 
 289 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
 290   // Only pages explicitly released with the reclaimed flag set
 291   // counts as reclaimed bytes. This flag is true when we release
 292   // a page after relocation, and is false when we release a page
 293   // to undo an allocation.
 294   if (reclaimed) {
 295     _reclaimed += size;
 296   } else {
 297     _allocated -= size;
 298   }
 299   _used -= size;
 300   if (_used < _used_low) {
 301     _used_low = _used;
 302   }
 303 }
 304 
 305 ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
 306   // Allocate virtual memory
 307   const ZVirtualMemory vmem = _virtual.alloc(size);
 308   if (vmem.is_null()) {
 309     // Out of address space
 310     return NULL;
 311   }
 312 
 313   // Allocate physical memory
 314   const ZPhysicalMemory pmem = _physical.alloc(size);
 315   assert(!pmem.is_null(), "Invalid size");
 316 
 317   // Allocate page
 318   return new ZPage(type, vmem, pmem);
 319 }
 320 
 321 void ZPageAllocator::destroy_page(ZPage* page) {
 322   const ZVirtualMemory& vmem = page->virtual_memory();
 323   const ZPhysicalMemory& pmem = page->physical_memory();
 324 
 325   // Unmap memory
 326   _physical.unmap(pmem, vmem.start());
 327 
 328   // Free physical memory
 329   _physical.free(pmem);
 330 
 331   // Free virtual memory
 332   _virtual.free(vmem);
 333 
 334   // Delete page safely
 335   _safe_delete(page);
 336 }
 337 
 338 void ZPageAllocator::map_page(const ZPage* page) const {
 339   // Map physical memory
 340   _physical.map(page->physical_memory(), page->start());
 341 }
 342 
 343 size_t ZPageAllocator::max_available(bool no_reserve) const {
 344   size_t available = _current_max_capacity - _used;
 345 
 346   if (no_reserve) {
 347     // The reserve should not be considered available
 348     available -= MIN2(available, _max_reserve);
 349   }
 350 
 351   return available;
 352 }
 353 
 354 bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) {
 355   if (max_available(no_reserve) < size) {
 356     // Not enough free memory
 357     return false;
 358   }
 359 
 360   // We add the max_reserve to the requested size to avoid losing
 361   // the reserve because of failure to increase capacity before
 362   // reaching max capacity.
 363   size += _max_reserve;
 364 
 365   // Don't try to increase capacity if enough unused capacity
 366   // is available or if current max capacity has been reached.
 367   const size_t available = _capacity - _used;
 368   if (available < size && _capacity < _current_max_capacity) {
 369     // Try to increase capacity
 370     const size_t commit = MIN2(size - available, _current_max_capacity - _capacity);
 371     const size_t committed = _physical.commit(commit);
 372     _capacity += committed;
 373 
 374     log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, "
 375                         "Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, "
 376                         "Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M",
 377                         size / M, no_reserve ? "True" : "False", available / M,
 378                         commit / M, committed / M, _capacity / M);
 379 
 380     if (committed != commit) {
 381       // Failed, or partly failed, to increase capacity. Adjust current
 382       // max capacity to avoid further attempts to increase capacity.
 383       log_error(gc)("Forced to lower max Java heap size from "
 384                     SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
 385                     _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
 386                     _capacity / M, percent_of(_capacity, _max_capacity));
 387 
 388       _current_max_capacity = _capacity;
 389     }
 390   }
 391 
 392   if (!no_reserve) {
 393     size -= _max_reserve;
 394   }
 395 
 396   const size_t new_available = _capacity - _used;
 397   return new_available >= size;
 398 }
 399 
 400 void ZPageAllocator::ensure_uncached_available(size_t size) {
 401   assert(_capacity - _used >= size, "Invalid size");
 402   const size_t uncached_available = _capacity - _used - _cache.available();
 403   if (size > uncached_available) {
 404     flush_cache_for_allocation(size - uncached_available);
 405   }
 406 }
 407 
 408 ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) {
 409   if (!ensure_available(size, no_reserve)) {
 410     // Not enough free memory
 411     return NULL;
 412   }
 413 
 414   // Try allocate page from the cache
 415   ZPage* const page = _cache.alloc_page(type, size);
 416   if (page != NULL) {
 417     return page;
 418   }
 419 
 420   // Try flush pages from the cache
 421   ensure_uncached_available(size);
 422 
 423   // Create new page
 424   return create_page(type, size);
 425 }
 426 
 427 ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
 428   EventZPageAllocation event;
 429 
 430   ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve());
 431   if (page == NULL) {
 432     // Out of memory
 433     return NULL;
 434   }
 435 
 436   // Update used statistics
 437   increase_used(size, flags.relocation());
 438 
 439   // Send trace event
 440   event.commit(type, size, _used, max_available(flags.no_reserve()),
 441                _cache.available(), flags.non_blocking(), flags.no_reserve());
 442 
 443   return page;
 444 }
 445 
 446 void ZPageAllocator::check_out_of_memory_during_initialization() {
 447   if (!is_init_completed()) {
 448     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 449   }
 450 }
 451 
 452 ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 453   // Prepare to block
 454   ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
 455 
 456   _lock.lock();
 457 
 458   // Try non-blocking allocation
 459   ZPage* page = alloc_page_common(type, size, flags);
 460   if (page == NULL) {
 461     // Allocation failed, enqueue request
 462     _queue.insert_last(&request);
 463   }
 464 
 465   _lock.unlock();
 466 
 467   if (page == NULL) {
 468     // Allocation failed
 469     ZStatTimer timer(ZCriticalPhaseAllocationStall);
 470     EventZAllocationStall event;
 471 
 472     // We can only block if VM is fully initialized
 473     check_out_of_memory_during_initialization();
 474 
 475     do {
 476       // Start asynchronous GC
 477       ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 478 
 479       // Wait for allocation to complete or fail
 480       page = request.wait();
 481     } while (page == gc_marker);
 482 
 483     {
 484       //
 485       // We grab the lock here for two different reasons:
 486       //
 487       // 1) Guard deletion of underlying semaphore. This is a workaround for
 488       // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
 489       // the semaphore immediately after returning from sem_wait(). The
 490       // reason is that sem_post() can touch the semaphore after a waiting
 491       // thread have returned from sem_wait(). To avoid this race we are
 492       // forcing the waiting thread to acquire/release the lock held by the
 493       // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
 494       //
 495       // 2) Guard the list of satisfied pages.
 496       //
 497       ZLocker<ZLock> locker(&_lock);
 498       _satisfied.remove(&request);
 499     }
 500 
 501     event.commit(type, size);
 502   }
 503 
 504   return page;
 505 }
 506 
 507 ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 508   ZLocker<ZLock> locker(&_lock);
 509   return alloc_page_common(type, size, flags);
 510 }
 511 
 512 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 513   ZPage* const page = flags.non_blocking()
 514                       ? alloc_page_nonblocking(type, size, flags)
 515                       : alloc_page_blocking(type, size, flags);
 516   if (page == NULL) {
 517     // Out of memory
 518     return NULL;
 519   }
 520 
 521   // Map page if needed
 522   if (!page->is_mapped()) {
 523     map_page(page);
 524   }
 525 
 526   // Reset page. This updates the page's sequence number and must
 527   // be done after page allocation, which potentially blocked in
 528   // a safepoint where the global sequence number was updated.
 529   page->reset();
 530 
 531   // Update allocation statistics. Exclude worker threads to avoid
 532   // artificial inflation of the allocation rate due to relocation.
 533   if (!flags.worker_thread()) {
 534     // Note that there are two allocation rate counters, which have
 535     // different purposes and are sampled at different frequencies.
 536     const size_t bytes = page->size();
 537     ZStatInc(ZCounterAllocationRate, bytes);
 538     ZStatInc(ZStatAllocRate::counter(), bytes);
 539   }
 540 
 541   return page;
 542 }
 543 
 544 void ZPageAllocator::satisfy_alloc_queue() {
 545   for (;;) {
 546     ZPageAllocRequest* const request = _queue.first();
 547     if (request == NULL) {
 548       // Allocation queue is empty
 549       return;
 550     }
 551 
 552     ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
 553     if (page == NULL) {
 554       // Allocation could not be satisfied, give up
 555       return;
 556     }
 557 
 558     // Allocation succeeded, dequeue and satisfy request. Note that
 559     // the dequeue operation must happen first, since the request
 560     // will immediately be deallocated once it has been satisfied.
 561     _queue.remove(request);
 562     _satisfied.insert_first(request);
 563     request->satisfy(page);
 564   }
 565 }
 566 
 567 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 568   ZLocker<ZLock> locker(&_lock);
 569 
 570   // Update used statistics
 571   decrease_used(page->size(), reclaimed);
 572 
 573   // Set time when last used
 574   page->set_last_used();
 575 
 576   // Cache page
 577   _cache.free_page(page);
 578 
 579   // Try satisfy blocked allocations
 580   satisfy_alloc_queue();
 581 }
 582 
 583 size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation) {
 584   EventZPageCacheFlush event;
 585 
 586   ZList<ZPage> list;
 587 
 588   // Flush pages
 589   _cache.flush(cl, &list);
 590 
 591   const size_t overflushed = cl->overflushed();
 592   if (overflushed > 0) {
 593     // Overflushed, keep part of last page
 594     ZPage* const page = list.last()->split(overflushed);
 595     _cache.free_page(page);
 596   }
 597 
 598   // Destroy pages
 599   size_t flushed = 0;
 600   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 601     flushed += page->size();
 602     destroy_page(page);
 603   }
 604 
 605   // Send event
 606   event.commit(flushed, for_allocation);
 607 
 608   return flushed;
 609 }
 610 
 611 class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
 612 public:
 613   ZPageCacheFlushForAllocationClosure(size_t requested) :
 614       ZPageCacheFlushClosure(requested) {}
 615 
 616   virtual bool do_page(const ZPage* page) {
 617     if (_flushed < _requested) {
 618       // Flush page
 619       _flushed += page->size();
 620       return true;
 621     }
 622 
 623     // Don't flush page
 624     return false;
 625   }
 626 };
 627 
 628 void ZPageAllocator::flush_cache_for_allocation(size_t requested) {
 629   assert(requested <= _cache.available(), "Invalid request");
 630 
 631   // Flush pages
 632   ZPageCacheFlushForAllocationClosure cl(requested);
 633   const size_t flushed = flush_cache(&cl, true /* for_allocation */);
 634 
 635   assert(requested == flushed, "Failed to flush");
 636 
 637   const size_t cached_after = _cache.available();
 638   const size_t cached_before = cached_after + flushed;
 639 
 640   log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
 641                      "Flushed: " SIZE_FORMAT "M",
 642                      cached_before / M, percent_of(cached_before, max_capacity()),
 643                      cached_after / M, percent_of(cached_after, max_capacity()),
 644                      flushed / M);
 645 
 646   // Update statistics
 647   ZStatInc(ZCounterPageCacheFlush, flushed);
 648 }
 649 
 650 class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
 651 private:
 652   const uint64_t _now;
 653   const uint64_t _delay;
 654   uint64_t       _timeout;
 655 
 656 public:
 657   ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) :
 658       ZPageCacheFlushClosure(requested),
 659       _now(os::elapsedTime()),
 660       _delay(delay),
 661       _timeout(_delay) {}
 662 
 663   virtual bool do_page(const ZPage* page) {
 664     const uint64_t expires = page->last_used() + _delay;
 665     const uint64_t timeout = expires - MIN2(expires, _now);
 666 
 667     if (_flushed < _requested && timeout == 0) {
 668       // Flush page
 669       _flushed += page->size();
 670       return true;
 671     }
 672 
 673     // Record shortest non-expired timeout
 674     _timeout = MIN2(_timeout, timeout);
 675 
 676     // Don't flush page
 677     return false;
 678   }
 679 
 680   uint64_t timeout() const {
 681     return _timeout;
 682   }
 683 };
 684 
 685 uint64_t ZPageAllocator::uncommit(uint64_t delay) {
 686   // Set the default timeout, when no pages are found in the
 687   // cache or when uncommit is disabled, equal to the delay.
 688   uint64_t timeout = delay;
 689 
 690   if (!_uncommit) {
 691     // Disabled
 692     return timeout;
 693   }
 694 
 695   EventZUncommit event;
 696   size_t capacity_before;
 697   size_t capacity_after;
 698   size_t uncommitted;
 699 
 700   {
 701     SuspendibleThreadSetJoiner joiner;
 702     ZLocker<ZLock> locker(&_lock);
 703 
 704     // Don't flush more than we will uncommit. Never uncommit
 705     // the reserve, and never uncommit below min capacity.
 706     const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity);
 707     const size_t guarded = MAX2(needed, _min_capacity);
 708     const size_t uncommittable = _capacity - guarded;
 709     const size_t uncached_available = _capacity - _used - _cache.available();
 710     size_t uncommit = MIN2(uncommittable, uncached_available);
 711     const size_t flush = uncommittable - uncommit;
 712 
 713     if (flush > 0) {
 714       // Flush pages to uncommit
 715       ZPageCacheFlushForUncommitClosure cl(flush, delay);
 716       uncommit += flush_cache(&cl, false /* for_allocation */);
 717       timeout = cl.timeout();
 718     }
 719 
 720     // Uncommit
 721     uncommitted = _physical.uncommit(uncommit);
 722     _capacity -= uncommitted;
 723 
 724     capacity_after = _capacity;
 725     capacity_before = capacity_after + uncommitted;
 726   }
 727 
 728   if (uncommitted > 0) {
 729     log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
 730                        "Uncommitted: " SIZE_FORMAT "M",
 731                        capacity_before / M, percent_of(capacity_before, max_capacity()),
 732                        capacity_after / M, percent_of(capacity_after, max_capacity()),
 733                        uncommitted / M);
 734 
 735     // Send event
 736     event.commit(capacity_before, capacity_after, uncommitted);
 737 
 738     // Update statistics
 739     ZStatInc(ZCounterUncommit, uncommitted);
 740   }
 741 
 742   return timeout;
 743 }
 744 
 745 void ZPageAllocator::enable_deferred_delete() const {
 746   _safe_delete.enable_deferred_delete();
 747 }
 748 
 749 void ZPageAllocator::disable_deferred_delete() const {
 750   _safe_delete.disable_deferred_delete();
 751 }
 752 
 753 void ZPageAllocator::debug_map_page(const ZPage* page) const {
 754   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 755   _physical.debug_map(page->physical_memory(), page->start());
 756 }
 757 
 758 void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
 759   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 760   _physical.debug_unmap(page->physical_memory(), page->start());
 761 }
 762 
 763 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
 764   ZListIterator<ZPageAllocRequest> iter(&_satisfied);
 765   for (ZPageAllocRequest* request; iter.next(&request);) {
 766     const ZPage* const page = request->peek();
 767     if (page != NULL) {
 768       cl->do_page(page);
 769     }
 770   }
 771 
 772   _cache.pages_do(cl);
 773 }
 774 
 775 bool ZPageAllocator::is_alloc_stalled() const {
 776   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 777   return !_queue.is_empty();
 778 }
 779 
 780 void ZPageAllocator::check_out_of_memory() {
 781   ZLocker<ZLock> locker(&_lock);
 782 
 783   // Fail allocation requests that were enqueued before the
 784   // last GC cycle started, otherwise start a new GC cycle.
 785   for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
 786     if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
 787       // Start a new GC cycle, keep allocation requests enqueued
 788       request->satisfy(gc_marker);
 789       return;
 790     }
 791 
 792     // Out of memory, fail allocation request
 793     _queue.remove(request);
 794     _satisfied.insert_first(request);
 795     request->satisfy(NULL);
 796   }
 797 }