1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zCollectedHeap.hpp"
  28 #include "gc/z/zFuture.inline.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zLock.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageAllocator.hpp"
  33 #include "gc/z/zPageCache.hpp"
  34 #include "gc/z/zSafeDelete.inline.hpp"
  35 #include "gc/z/zStat.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zTracer.inline.hpp"
  38 #include "gc/z/zWorkers.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "logging/log.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "utilities/debug.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 
  47 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  48 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
  49 static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
  50 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  51 
  52 enum ZPageAllocationStall {
  53   ZPageAllocationStallSuccess,
  54   ZPageAllocationStallFailed,
  55   ZPageAllocationStallStartGC
  56 };
  57 
  58 class ZPageAllocation : public StackObj {
  59   friend class ZList<ZPageAllocation>;
  60 
  61 private:
  62   const uint8_t                 _type;
  63   const size_t                  _size;
  64   const ZAllocationFlags        _flags;
  65   const unsigned int            _total_collections;
  66   ZList<ZPage>                  _pages;
  67   ZListNode<ZPageAllocation>    _node;
  68   ZFuture<ZPageAllocationStall> _stall_result;
  69 
  70 public:
  71   ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) :
  72       _type(type),
  73       _size(size),
  74       _flags(flags),
  75       _total_collections(is_init_completed() ? ZCollectedHeap::heap()->total_collections() : 0),
  76       _pages(),
  77       _node(),
  78       _stall_result() {}
  79 
  80   uint8_t type() const {
  81     return _type;
  82   }
  83 
  84   size_t size() const {
  85     return _size;
  86   }
  87 
  88   ZAllocationFlags flags() const {
  89     return _flags;
  90   }
  91 
  92   unsigned int total_collections() const {
  93     return _total_collections;
  94   }
  95 
  96   ZPageAllocationStall wait() {
  97     return _stall_result.get();
  98   }
  99 
 100   ZList<ZPage>* pages() {
 101     return &_pages;
 102   }
 103 
 104   void satisfy(ZPageAllocationStall result) {
 105     _stall_result.set(result);
 106   }
 107 };
 108 
 109 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
 110                                size_t min_capacity,
 111                                size_t initial_capacity,
 112                                size_t max_capacity,
 113                                size_t max_reserve) :
 114     _lock(),
 115     _cache(),
 116     _virtual(max_capacity),
 117     _physical(max_capacity),
 118     _min_capacity(min_capacity),
 119     _max_capacity(max_capacity),
 120     _max_reserve(max_reserve),
 121     _current_max_capacity(max_capacity),
 122     _capacity(0),
 123     _used(0),
 124     _used_high(0),
 125     _used_low(0),
 126     _allocated(0),
 127     _reclaimed(0),
 128     _stalled(),
 129     _satisfied(),
 130     _safe_delete(),
 131     _uncommit(false),
 132     _initialized(false) {
 133 
 134   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
 135     return;
 136   }
 137 
 138   log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
 139   log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
 140   log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
 141   log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
 142   log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 143 
 144   // Warn if system limits could stop us from reaching max capacity
 145   _physical.warn_commit_limits(max_capacity);
 146 
 147   // Check if uncommit should be enabled
 148   _uncommit = _physical.should_enable_uncommit(min_capacity, max_capacity);
 149 
 150   // Pre-map initial capacity
 151   if (!prime_cache(workers, initial_capacity)) {
 152     log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
 153     return;
 154   }
 155 
 156   // Successfully initialized
 157   _initialized = true;
 158 }
 159 
 160 class ZPreTouchTask : public ZTask {
 161 private:
 162   const ZPhysicalMemoryManager* const _physical;
 163   volatile uintptr_t                  _start;
 164   const uintptr_t                     _end;
 165 
 166 public:
 167   ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) :
 168       ZTask("ZPreTouchTask"),
 169       _physical(physical),
 170       _start(start),
 171       _end(end) {}
 172 
 173   virtual void work() {
 174     for (;;) {
 175       // Get granule offset
 176       const size_t size = ZGranuleSize;
 177       const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
 178       if (offset >= _end) {
 179         // Done
 180         break;
 181       }
 182 
 183       // Pre-touch granule
 184       _physical->pretouch(offset, size);
 185     }
 186   }
 187 };
 188 
 189 bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
 190   ZAllocationFlags flags;
 191 
 192   flags.set_non_blocking();
 193   flags.set_low_address();
 194 
 195   ZPage* const page = alloc_page(ZPageTypeLarge, size, flags);
 196   if (page == NULL) {
 197     return false;
 198   }
 199 
 200   if (AlwaysPreTouch) {
 201     // Pre-touch page
 202     ZPreTouchTask task(&_physical, page->start(), page->end());
 203     workers->run_parallel(&task);
 204   }
 205 
 206   free_page(page, false /* reclaimed */);
 207 
 208   return true;
 209 }
 210 
 211 bool ZPageAllocator::is_initialized() const {
 212   return _initialized;
 213 }
 214 
 215 size_t ZPageAllocator::min_capacity() const {
 216   return _min_capacity;
 217 }
 218 
 219 size_t ZPageAllocator::max_capacity() const {
 220   return _max_capacity;
 221 }
 222 
 223 size_t ZPageAllocator::soft_max_capacity() const {
 224   // Note that SoftMaxHeapSize is a manageable flag
 225   const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize);
 226   const size_t current_max_capacity = Atomic::load(&_current_max_capacity);
 227   return MIN2(soft_max_capacity, current_max_capacity);
 228 }
 229 
 230 size_t ZPageAllocator::capacity() const {
 231   return Atomic::load(&_capacity);
 232 }
 233 
 234 size_t ZPageAllocator::max_reserve() const {
 235   return _max_reserve;
 236 }
 237 
 238 size_t ZPageAllocator::used_high() const {
 239   return _used_high;
 240 }
 241 
 242 size_t ZPageAllocator::used_low() const {
 243   return _used_low;
 244 }
 245 
 246 size_t ZPageAllocator::used() const {
 247   return Atomic::load(&_used);
 248 }
 249 
 250 size_t ZPageAllocator::unused() const {
 251   const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
 252   const ssize_t used = (ssize_t)Atomic::load(&_used);
 253   const ssize_t max_reserve = (ssize_t)_max_reserve;
 254   const ssize_t unused = capacity - used - max_reserve;
 255   return unused > 0 ? (size_t)unused : 0;
 256 }
 257 
 258 size_t ZPageAllocator::allocated() const {
 259   return _allocated;
 260 }
 261 
 262 size_t ZPageAllocator::reclaimed() const {
 263   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 264 }
 265 
 266 void ZPageAllocator::reset_statistics() {
 267   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 268   _allocated = 0;
 269   _reclaimed = 0;
 270   _used_high = _used_low = _used;
 271 }
 272 
 273 size_t ZPageAllocator::increase_capacity(size_t size) {
 274   const size_t increase = MIN2(size, _current_max_capacity - _capacity);
 275 
 276   // Update atomically since we have concurrent readers
 277   Atomic::add(&_capacity, increase);
 278 
 279   return increase;
 280 }
 281 
 282 void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
 283   // Update atomically since we have concurrent readers
 284   Atomic::sub(&_capacity, size);
 285 
 286   if (set_max_capacity) {
 287     // Adjust current max capacity to avoid further attempts to increase capacity
 288     log_error(gc)("Forced to lower max Java heap size from "
 289                   SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
 290                   _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
 291                   _capacity / M, percent_of(_capacity, _max_capacity));
 292 
 293     // Update atomically since we have concurrent readers
 294     Atomic::store(&_current_max_capacity, _capacity);
 295   }
 296 }
 297 
 298 void ZPageAllocator::increase_used(size_t size, bool allocation, bool relocation) {
 299   if (allocation) {
 300     if (relocation) {
 301       // Allocating a page for the purpose of relocation has a
 302       // negative contribution to the number of reclaimed bytes.
 303       _reclaimed -= size;
 304     }
 305     _allocated += size;
 306   }
 307 
 308   // Update atomically since we have concurrent readers
 309   Atomic::add(&_used, size);
 310 
 311   if (_used > _used_high) {
 312     _used_high = _used;
 313   }
 314 }
 315 
 316 void ZPageAllocator::decrease_used(size_t size, bool free, bool reclaimed) {
 317   if (free) {
 318     // Only pages explicitly released with the reclaimed flag set
 319     // counts as reclaimed bytes. This flag is true when we release
 320     // a page after relocation, and is false when we release a page
 321     // to undo an allocation.
 322     if (reclaimed) {
 323       _reclaimed += size;
 324     } else {
 325       _allocated -= size;
 326     }
 327   }
 328 
 329   // Update atomically since we have concurrent readers
 330   Atomic::sub(&_used, size);
 331 
 332   if (_used < _used_low) {
 333     _used_low = _used;
 334   }
 335 }
 336 
 337 bool ZPageAllocator::commit_page(ZPage* page) {
 338   // Commit physical memory
 339   return _physical.commit(page->physical_memory());
 340 }
 341 
 342 void ZPageAllocator::uncommit_page(ZPage* page) {
 343   // Uncommit physical memory, if uncommit is supported/enabled
 344   if (_uncommit) {
 345     _physical.uncommit(page->physical_memory());
 346   }
 347 }
 348 
 349 bool ZPageAllocator::map_page(const ZPage* page) const {
 350   // Map physical memory
 351   return _physical.map(page->physical_memory(), page->start());
 352 }
 353 
 354 void ZPageAllocator::unmap_page(const ZPage* page) const {
 355   // Unmap physical memory
 356   _physical.unmap(page->physical_memory(), page->start());
 357 }
 358 
 359 void ZPageAllocator::destroy_page(ZPage* page) {
 360   // Free virtual memory
 361   _virtual.free(page->virtual_memory());
 362 
 363   // Free physical memory
 364   _physical.free(page->physical_memory());
 365 
 366   // Delete page safely
 367   _safe_delete(page);
 368 }
 369 
 370 bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const {
 371   size_t available = _current_max_capacity - _used;
 372 
 373   if (no_reserve) {
 374     // The reserve should not be considered available
 375     available -= MIN2(available, _max_reserve);
 376   }
 377 
 378   return available >= size;
 379 }
 380 
 381 bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const {
 382   size_t available = _capacity - _used;
 383 
 384   if (no_reserve) {
 385     // The reserve should not be considered available
 386     available -= MIN2(available, _max_reserve);
 387   } else if (_capacity != _current_max_capacity) {
 388     // Always increase capacity before using the reserve
 389     return false;
 390   }
 391 
 392   return available >= size;
 393 }
 394 
 395 bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages) {
 396   if (!is_alloc_allowed(size, no_reserve)) {
 397     // Out of memory
 398     return false;
 399   }
 400 
 401   // Try allocate from the page cache
 402   if (is_alloc_allowed_from_cache(size, no_reserve)) {
 403     ZPage* const page = _cache.alloc_page(type, size);
 404     if (page != NULL) {
 405       // Success
 406       pages->insert_last(page);
 407       return true;
 408     }
 409   }
 410 
 411   // Try increase capacity
 412   const size_t increased = increase_capacity(size);
 413   if (increased < size) {
 414     // Could not increase capacity enough to satisfy the allocation
 415     // completely. Flush the page cache to satisfy the remainder.
 416     const size_t remaining = size - increased;
 417     _cache.flush_for_allocation(remaining, pages);
 418   }
 419 
 420   // Success
 421   return true;
 422 }
 423 
 424 bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
 425   EventZPageAllocation event;
 426   const uint8_t type = allocation->type();
 427   const size_t size = allocation->size();
 428   const ZAllocationFlags flags = allocation->flags();
 429   ZList<ZPage>* const pages = allocation->pages();
 430 
 431   // Try allocate without using the reserve
 432   if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) {
 433     // If allowed to, try allocate using the reserve
 434     if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) {
 435       // Out of memory
 436       return false;
 437     }
 438   }
 439 
 440   // Updated used statistics
 441   increase_used(size, true /* allocation */, flags.relocation());
 442 
 443   // Send event
 444   event.commit(type, size, flags.non_blocking(), flags.no_reserve(),
 445                _used, _current_max_capacity - _used, _capacity - _used);
 446 
 447   // Success
 448   return true;
 449 }
 450 
 451 static void check_out_of_memory_during_initialization() {
 452   if (!is_init_completed()) {
 453     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 454   }
 455 }
 456 
 457 bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
 458   ZStatTimer timer(ZCriticalPhaseAllocationStall);
 459   EventZAllocationStall event;
 460   ZPageAllocationStall result;
 461 
 462   // We can only block if the VM is fully initialized
 463   check_out_of_memory_during_initialization();
 464 
 465   do {
 466     // Start asynchronous GC
 467     ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 468 
 469     // Wait for allocation to complete, fail or request a GC
 470     result = allocation->wait();
 471   } while (result == ZPageAllocationStallStartGC);
 472 
 473   {
 474     //
 475     // We grab the lock here for two different reasons:
 476     //
 477     // 1) Guard deletion of underlying semaphore. This is a workaround for
 478     // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
 479     // the semaphore immediately after returning from sem_wait(). The
 480     // reason is that sem_post() can touch the semaphore after a waiting
 481     // thread have returned from sem_wait(). To avoid this race we are
 482     // forcing the waiting thread to acquire/release the lock held by the
 483     // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
 484     //
 485     // 2) Guard the list of satisfied pages.
 486     //
 487     ZLocker<ZLock> locker(&_lock);
 488     _satisfied.remove(allocation);
 489   }
 490 
 491   // Send event
 492   event.commit(allocation->type(), allocation->size());
 493 
 494   return (result == ZPageAllocationStallSuccess);
 495 }
 496 
 497 bool ZPageAllocator::alloc_page_prepare(ZPageAllocation* allocation) {
 498   {
 499     ZLocker<ZLock> locker(&_lock);
 500 
 501     if (alloc_page_common(allocation)) {
 502       // Success
 503       return true;
 504     }
 505 
 506     // Failed
 507     if (allocation->flags().non_blocking()) {
 508       // Don't stall
 509       return false;
 510     }
 511 
 512     // Enqueue allocation request
 513     _stalled.insert_last(allocation);
 514   }
 515 
 516   // Stall
 517   return alloc_page_stall(allocation);
 518 }
 519 
 520 ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
 521   const size_t size = allocation->size();
 522 
 523   // Allocate virtual memory
 524   const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
 525   if (vmem.is_null()) {
 526     log_error(gc)("Out of address space");
 527     return NULL;
 528   }
 529 
 530   ZPhysicalMemory pmem;
 531   size_t flushed = 0;
 532 
 533   // Unmap, transfer physical memory, and destroy flushed pages
 534   ZListRemoveIterator<ZPage> iter(allocation->pages());
 535   for (ZPage* page; iter.next(&page);) {
 536     flushed += page->size();
 537     unmap_page(page);
 538     pmem.transfer_segments(page->physical_memory());
 539     destroy_page(page);
 540   }
 541 
 542   if (flushed > 0) {
 543     // Update statistics
 544     ZStatInc(ZCounterPageCacheFlush, flushed);
 545     log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M);
 546   }
 547 
 548   // Allocate any remaining physical memory
 549   if (flushed < size) {
 550     const size_t remaining = size - flushed;
 551     _physical.alloc(pmem, remaining);
 552   }
 553 
 554   // Create new page
 555   return new ZPage(allocation->type(), vmem, pmem);
 556 }
 557 
 558 static bool is_alloc_satisfied(ZPageAllocation* allocation) {
 559   // The allocation is immediately satisfied if the list of pages contains
 560   // exactly one page, with the type and size that was requested.
 561   return allocation->pages()->size() == 1 &&
 562          allocation->pages()->first()->type() == allocation->type() &&
 563          allocation->pages()->first()->size() == allocation->size();
 564 }
 565 
 566 ZPage* ZPageAllocator::alloc_page_finish(ZPageAllocation* allocation) {
 567   // Fast path
 568   if (is_alloc_satisfied(allocation)) {
 569     return allocation->pages()->remove_first();
 570   }
 571 
 572   // Slow path
 573   ZPage* const page = alloc_page_create(allocation);
 574   if (page == NULL) {
 575     // Out of address space
 576     return NULL;
 577   }
 578 
 579   // Commit page
 580   if (!commit_page(page)) {
 581     // Failed or partially failed. Split of any successfully committed
 582     // part of the page into a new page and insert it into list of pages,
 583     // so that it will be re-inserted into the page cache.
 584     ZPage* const committed_page = page->split_committed();
 585     if (committed_page != NULL) {
 586       if (map_page(committed_page)) {
 587         // Success
 588         allocation->pages()->insert_last(committed_page);
 589       } else {
 590         // Failed
 591         uncommit_page(committed_page);
 592         destroy_page(committed_page);
 593       }
 594     }
 595 
 596     destroy_page(page);
 597     return NULL;
 598   }
 599 
 600   // Map page
 601   if (!map_page(page)) {
 602     // Failed
 603     uncommit_page(page);
 604     destroy_page(page);
 605     return NULL;
 606   }
 607 
 608   // Success
 609   return page;
 610 }
 611 
 612 void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) {
 613   size_t freed = 0;
 614 
 615   // Free any allocated pages
 616   ZListRemoveIterator<ZPage> iter(allocation->pages());
 617   for (ZPage* page; iter.next(&page);) {
 618     freed += page->size();
 619     free_page(page, false /* reclaimed */);
 620   }
 621 
 622   ZLocker<ZLock> locker(&_lock);
 623 
 624   // Adjust capacity and used to reflect the failed capacity increase
 625   const size_t remaining = allocation->size() - freed;
 626   decrease_used(remaining, false /* free */, false /* reclaimed */);
 627   decrease_capacity(remaining, true /* set_max_capacity */);
 628 }
 629 
 630 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 631 retry:
 632   ZPageAllocation allocation(type, size, flags);
 633 
 634   // Allocate one or more pages from the page cache. If the allocation
 635   // succeeds but the returned pages don't cover the complete allocation,
 636   // then we are allowed to allocate the remaining memory directly from
 637   // the physical memory manager.
 638   if (!alloc_page_prepare(&allocation)) {
 639     // Out of memory
 640     return NULL;
 641   }
 642 
 643   ZPage* const page = alloc_page_finish(&allocation);
 644   if (page == NULL) {
 645     // Failed to commit or map. Clean up and retry, in the hope that
 646     // we can still allocate by flushing the page cache (more agressively).
 647     alloc_page_failed(&allocation);
 648     goto retry;
 649   }
 650 
 651   // Reset page. This updates the page's sequence number and must
 652   // be done after page allocation, which potentially blocked in
 653   // a safepoint where the global sequence number was updated.
 654   page->reset();
 655 
 656   // Update allocation statistics. Exclude worker threads to avoid
 657   // artificial inflation of the allocation rate due to relocation.
 658   if (!flags.worker_thread()) {
 659     // Note that there are two allocation rate counters, which have
 660     // different purposes and are sampled at different frequencies.
 661     const size_t bytes = page->size();
 662     ZStatInc(ZCounterAllocationRate, bytes);
 663     ZStatInc(ZStatAllocRate::counter(), bytes);
 664   }
 665 
 666   return page;
 667 }
 668 
 669 void ZPageAllocator::satisfy_stalled() {
 670   for (;;) {
 671     ZPageAllocation* const allocation = _stalled.first();
 672     if (allocation == NULL) {
 673       // Allocation queue is empty
 674       return;
 675     }
 676 
 677     if (!alloc_page_common(allocation)) {
 678       // Allocation could not be satisfied, give up
 679       return;
 680     }
 681 
 682     // Allocation succeeded, dequeue and satisfy allocation request.
 683     // Note that we must dequeue the allocation request first, since
 684     // it will immediately be deallocated once it has been satisfied.
 685     _stalled.remove(allocation);
 686     _satisfied.insert_last(allocation);
 687     allocation->satisfy(ZPageAllocationStallSuccess);
 688   }
 689 }
 690 
 691 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 692   ZLocker<ZLock> locker(&_lock);
 693 
 694   // Update used statistics
 695   decrease_used(page->size(), true /* free */, reclaimed);
 696 
 697   // Set time when last used
 698   page->set_last_used();
 699 
 700   // Cache page
 701   _cache.free_page(page);
 702 
 703   // Try satisfy stalled allocations
 704   satisfy_stalled();
 705 }
 706 
 707 size_t ZPageAllocator::uncommit_inner(uint64_t delay, uint64_t* timeout) {
 708   // We need to join the suspendible thread set while manipulating capacity and
 709   // used, to make sure GC safepoints will have a consistent view. However, when
 710   // ZVerifyViews is enabled we need to join at a broader scope to also make sure
 711   // we don't change the address good mask after pages have been flushed, and
 712   // thereby made invisible to pages_do(), but before they have been unmapped.
 713   SuspendibleThreadSetJoiner joiner(ZVerifyViews);
 714   ZList<ZPage> pages;
 715   size_t flushed;
 716 
 717   {
 718     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
 719     ZLocker<ZLock> locker(&_lock);
 720 
 721     // Never uncommit the reserve, and never uncommit below min capacity. We flush
 722     // out and uncommit chunks at a time (~0.8% of the max capacity, but at least
 723     // one granule and at most 256M), in case demand for memory increases while we
 724     // are uncommitting.
 725     const size_t retain = clamp(_used + _max_reserve, _min_capacity, _current_max_capacity);
 726     const size_t release = _capacity - retain;
 727     const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
 728     const size_t flush = MIN2(release, limit);
 729 
 730     // Flush pages to uncommit
 731     flushed = _cache.flush_for_uncommit(flush, delay, timeout, &pages);
 732     if (flushed == 0) {
 733       // Nothing flushed
 734       return 0;
 735     }
 736 
 737     // Adjust used to reflect that these pages are no longer available
 738     increase_used(flushed, false /* allocation */, false /* relocation */);
 739   }
 740 
 741   // Unmap, uncommit, and destroy flushed pages
 742   ZListRemoveIterator<ZPage> iter(&pages);
 743   for (ZPage* page; iter.next(&page);) {
 744     unmap_page(page);
 745     uncommit_page(page);
 746     destroy_page(page);
 747   }
 748 
 749   {
 750     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
 751     ZLocker<ZLock> locker(&_lock);
 752 
 753     // Adjust used and capacity to reflect the uncommit
 754     decrease_used(flushed, false /* free */, false /* reclaimed */);
 755     decrease_capacity(flushed, false /* set_max_capacity */);
 756   }
 757 
 758   return flushed;
 759 }
 760 
 761 uint64_t ZPageAllocator::uncommit() {
 762   EventZUncommit event;
 763   const uint64_t delay = ZUncommitDelay;
 764   uint64_t timeout = delay;
 765   size_t uncommitted = 0;
 766 
 767   while (Atomic::load(&_uncommit)) {
 768     const size_t flushed = uncommit_inner(delay, &timeout);
 769     if (flushed == 0) {
 770       // Done
 771       break;
 772     }
 773 
 774     uncommitted += flushed;
 775   }
 776 
 777   if (uncommitted > 0) {
 778     // Send event
 779     event.commit(uncommitted);
 780 
 781     // Update statistics
 782     ZStatInc(ZCounterUncommit, uncommitted);
 783     log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)",
 784                        uncommitted / M, percent_of(uncommitted, _max_capacity));
 785   }
 786 
 787   log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
 788 
 789   return timeout;
 790 }
 791 
 792 void ZPageAllocator::uncommit_cancel() {
 793   Atomic::store(&_uncommit, false);
 794 }
 795 
 796 void ZPageAllocator::enable_deferred_delete() const {
 797   _safe_delete.enable_deferred_delete();
 798 }
 799 
 800 void ZPageAllocator::disable_deferred_delete() const {
 801   _safe_delete.disable_deferred_delete();
 802 }
 803 
 804 void ZPageAllocator::debug_map_page(const ZPage* page) const {
 805   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 806   _physical.debug_map(page->physical_memory(), page->start());
 807 }
 808 
 809 void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
 810   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 811   _physical.debug_unmap(page->physical_memory(), page->start());
 812 }
 813 
 814 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
 815   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 816 
 817   ZListIterator<ZPageAllocation> iter(&_satisfied);
 818   for (ZPageAllocation* allocation; iter.next(&allocation);) {
 819     ZListIterator<ZPage> iter(allocation->pages());
 820     for (ZPage* page; iter.next(&page);) {
 821       cl->do_page(page);
 822     }
 823   }
 824 
 825   _cache.pages_do(cl);
 826 }
 827 
 828 bool ZPageAllocator::is_alloc_stalled() const {
 829   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 830   return !_stalled.is_empty();
 831 }
 832 
 833 void ZPageAllocator::check_out_of_memory() {
 834   ZLocker<ZLock> locker(&_lock);
 835 
 836   // Fail allocation requests that were enqueued before the
 837   // last GC cycle started, otherwise start a new GC cycle.
 838   for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) {
 839     if (allocation->total_collections() == ZCollectedHeap::heap()->total_collections()) {
 840       // Start a new GC cycle, keep allocation requests enqueued
 841       allocation->satisfy(ZPageAllocationStallStartGC);
 842       return;
 843     }
 844 
 845     // Out of memory, fail allocation request
 846     _stalled.remove(allocation);
 847     _satisfied.insert_last(allocation);
 848     allocation->satisfy(ZPageAllocationStallFailed);
 849   }
 850 }