1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zCollectedHeap.hpp"
  28 #include "gc/z/zFuture.inline.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zLock.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageAllocator.hpp"
  33 #include "gc/z/zPageCache.hpp"
  34 #include "gc/z/zSafeDelete.inline.hpp"
  35 #include "gc/z/zStat.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zTracer.inline.hpp"
  38 #include "gc/z/zWorkers.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "logging/log.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "utilities/debug.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 
  47 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  48 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
  49 static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
  50 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  51 
  52 enum ZPageAllocationStall {
  53   ZPageAllocationStallSuccess,
  54   ZPageAllocationStallFailed,
  55   ZPageAllocationStallStartGC
  56 };
  57 
  58 class ZPageAllocation : public StackObj {
  59   friend class ZList<ZPageAllocation>;
  60 
  61 private:
  62   const uint8_t                 _type;
  63   const size_t                  _size;
  64   const ZAllocationFlags        _flags;
  65   const unsigned int            _total_collections;
  66   ZList<ZPage>                  _pages;
  67   ZListNode<ZPageAllocation>    _node;
  68   ZFuture<ZPageAllocationStall> _stall_result;
  69 
  70 public:
  71   ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) :
  72       _type(type),
  73       _size(size),
  74       _flags(flags),
  75       _total_collections(is_init_completed() ? ZCollectedHeap::heap()->total_collections() : 0),
  76       _pages(),
  77       _node(),
  78       _stall_result() {}
  79 
  80   uint8_t type() const {
  81     return _type;
  82   }
  83 
  84   size_t size() const {
  85     return _size;
  86   }
  87 
  88   ZAllocationFlags flags() const {
  89     return _flags;
  90   }
  91 
  92   unsigned int total_collections() const {
  93     return _total_collections;
  94   }
  95 
  96   ZPageAllocationStall wait() {
  97     return _stall_result.get();
  98   }
  99 
 100   ZList<ZPage>* pages() {
 101     return &_pages;
 102   }
 103 
 104   void satisfy(ZPageAllocationStall result) {
 105     _stall_result.set(result);
 106   }
 107 };
 108 
 109 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
 110                                size_t min_capacity,
 111                                size_t initial_capacity,
 112                                size_t max_capacity,
 113                                size_t max_reserve) :
 114     _lock(),
 115     _cache(),
 116     _virtual(max_capacity),
 117     _physical(max_capacity),
 118     _min_capacity(min_capacity),
 119     _max_capacity(max_capacity),
 120     _max_reserve(max_reserve),
 121     _current_max_capacity(max_capacity),
 122     _capacity(0),
 123     _used(0),
 124     _used_high(0),
 125     _used_low(0),
 126     _allocated(0),
 127     _reclaimed(0),
 128     _stalled(),
 129     _satisfied(),
 130     _safe_delete(),
 131     _uncommit(false),
 132     _initialized(false) {
 133 
 134   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
 135     return;
 136   }
 137 
 138   log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
 139   log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
 140   log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
 141   log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
 142   if (ZPageSizeMedium > 0) {
 143     log_info(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
 144   } else {
 145     log_info(gc, init)("Medium Page Size: N/A");
 146   }
 147   log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 148 
 149   // Warn if system limits could stop us from reaching max capacity
 150   _physical.warn_commit_limits(max_capacity);
 151 
 152   // Check if uncommit should be enabled
 153   _uncommit = _physical.should_enable_uncommit(min_capacity, max_capacity);
 154 
 155   // Pre-map initial capacity
 156   if (!prime_cache(workers, initial_capacity)) {
 157     log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
 158     return;
 159   }
 160 
 161   // Successfully initialized
 162   _initialized = true;
 163 }
 164 
 165 class ZPreTouchTask : public ZTask {
 166 private:
 167   const ZPhysicalMemoryManager* const _physical;
 168   volatile uintptr_t                  _start;
 169   const uintptr_t                     _end;
 170 
 171 public:
 172   ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) :
 173       ZTask("ZPreTouchTask"),
 174       _physical(physical),
 175       _start(start),
 176       _end(end) {}
 177 
 178   virtual void work() {
 179     for (;;) {
 180       // Get granule offset
 181       const size_t size = ZGranuleSize;
 182       const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
 183       if (offset >= _end) {
 184         // Done
 185         break;
 186       }
 187 
 188       // Pre-touch granule
 189       _physical->pretouch(offset, size);
 190     }
 191   }
 192 };
 193 
 194 bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
 195   ZAllocationFlags flags;
 196 
 197   flags.set_non_blocking();
 198   flags.set_low_address();
 199 
 200   ZPage* const page = alloc_page(ZPageTypeLarge, size, flags);
 201   if (page == NULL) {
 202     return false;
 203   }
 204 
 205   if (AlwaysPreTouch) {
 206     // Pre-touch page
 207     ZPreTouchTask task(&_physical, page->start(), page->end());
 208     workers->run_parallel(&task);
 209   }
 210 
 211   free_page(page, false /* reclaimed */);
 212 
 213   return true;
 214 }
 215 
 216 bool ZPageAllocator::is_initialized() const {
 217   return _initialized;
 218 }
 219 
 220 size_t ZPageAllocator::min_capacity() const {
 221   return _min_capacity;
 222 }
 223 
 224 size_t ZPageAllocator::max_capacity() const {
 225   return _max_capacity;
 226 }
 227 
 228 size_t ZPageAllocator::soft_max_capacity() const {
 229   // Note that SoftMaxHeapSize is a manageable flag
 230   const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize);
 231   const size_t current_max_capacity = Atomic::load(&_current_max_capacity);
 232   return MIN2(soft_max_capacity, current_max_capacity);
 233 }
 234 
 235 size_t ZPageAllocator::capacity() const {
 236   return Atomic::load(&_capacity);
 237 }
 238 
 239 size_t ZPageAllocator::max_reserve() const {
 240   return _max_reserve;
 241 }
 242 
 243 size_t ZPageAllocator::used_high() const {
 244   return _used_high;
 245 }
 246 
 247 size_t ZPageAllocator::used_low() const {
 248   return _used_low;
 249 }
 250 
 251 size_t ZPageAllocator::used() const {
 252   return Atomic::load(&_used);
 253 }
 254 
 255 size_t ZPageAllocator::unused() const {
 256   const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
 257   const ssize_t used = (ssize_t)Atomic::load(&_used);
 258   const ssize_t max_reserve = (ssize_t)_max_reserve;
 259   const ssize_t unused = capacity - used - max_reserve;
 260   return unused > 0 ? (size_t)unused : 0;
 261 }
 262 
 263 size_t ZPageAllocator::allocated() const {
 264   return _allocated;
 265 }
 266 
 267 size_t ZPageAllocator::reclaimed() const {
 268   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 269 }
 270 
 271 void ZPageAllocator::reset_statistics() {
 272   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 273   _allocated = 0;
 274   _reclaimed = 0;
 275   _used_high = _used_low = _used;
 276 }
 277 
 278 size_t ZPageAllocator::increase_capacity(size_t size) {
 279   const size_t increase = MIN2(size, _current_max_capacity - _capacity);
 280 
 281   // Update atomically since we have concurrent readers
 282   Atomic::add(&_capacity, increase);
 283 
 284   return increase;
 285 }
 286 
 287 void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
 288   // Update atomically since we have concurrent readers
 289   Atomic::sub(&_capacity, size);
 290 
 291   if (set_max_capacity) {
 292     // Adjust current max capacity to avoid further attempts to increase capacity
 293     log_error(gc)("Forced to lower max Java heap size from "
 294                   SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
 295                   _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
 296                   _capacity / M, percent_of(_capacity, _max_capacity));
 297 
 298     // Update atomically since we have concurrent readers
 299     Atomic::store(&_current_max_capacity, _capacity);
 300   }
 301 }
 302 
 303 void ZPageAllocator::increase_used(size_t size, bool allocation, bool relocation) {
 304   if (allocation) {
 305     if (relocation) {
 306       // Allocating a page for the purpose of relocation has a
 307       // negative contribution to the number of reclaimed bytes.
 308       _reclaimed -= size;
 309     }
 310     _allocated += size;
 311   }
 312 
 313   // Update atomically since we have concurrent readers
 314   Atomic::add(&_used, size);
 315 
 316   if (_used > _used_high) {
 317     _used_high = _used;
 318   }
 319 }
 320 
 321 void ZPageAllocator::decrease_used(size_t size, bool free, bool reclaimed) {
 322   if (free) {
 323     // Only pages explicitly released with the reclaimed flag set
 324     // counts as reclaimed bytes. This flag is true when we release
 325     // a page after relocation, and is false when we release a page
 326     // to undo an allocation.
 327     if (reclaimed) {
 328       _reclaimed += size;
 329     } else {
 330       _allocated -= size;
 331     }
 332   }
 333 
 334   // Update atomically since we have concurrent readers
 335   Atomic::sub(&_used, size);
 336 
 337   if (_used < _used_low) {
 338     _used_low = _used;
 339   }
 340 }
 341 
 342 bool ZPageAllocator::commit_page(ZPage* page) {
 343   // Commit physical memory
 344   return _physical.commit(page->physical_memory());
 345 }
 346 
 347 void ZPageAllocator::uncommit_page(ZPage* page) {
 348   // Uncommit physical memory, if uncommit is supported/enabled
 349   if (_uncommit) {
 350     _physical.uncommit(page->physical_memory());
 351   }
 352 }
 353 
 354 bool ZPageAllocator::map_page(const ZPage* page) const {
 355   // Map physical memory
 356   return _physical.map(page->physical_memory(), page->start());
 357 }
 358 
 359 void ZPageAllocator::unmap_page(const ZPage* page) const {
 360   // Unmap physical memory
 361   _physical.unmap(page->physical_memory(), page->start());
 362 }
 363 
 364 void ZPageAllocator::destroy_page(ZPage* page) {
 365   // Free virtual memory
 366   _virtual.free(page->virtual_memory());
 367 
 368   // Free physical memory
 369   _physical.free(page->physical_memory());
 370 
 371   // Delete page safely
 372   _safe_delete(page);
 373 }
 374 
 375 bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const {
 376   size_t available = _current_max_capacity - _used;
 377 
 378   if (no_reserve) {
 379     // The reserve should not be considered available
 380     available -= MIN2(available, _max_reserve);
 381   }
 382 
 383   return available >= size;
 384 }
 385 
 386 bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const {
 387   size_t available = _capacity - _used;
 388 
 389   if (no_reserve) {
 390     // The reserve should not be considered available
 391     available -= MIN2(available, _max_reserve);
 392   } else if (_capacity != _current_max_capacity) {
 393     // Always increase capacity before using the reserve
 394     return false;
 395   }
 396 
 397   return available >= size;
 398 }
 399 
 400 bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages) {
 401   if (!is_alloc_allowed(size, no_reserve)) {
 402     // Out of memory
 403     return false;
 404   }
 405 
 406   // Try allocate from the page cache
 407   if (is_alloc_allowed_from_cache(size, no_reserve)) {
 408     ZPage* const page = _cache.alloc_page(type, size);
 409     if (page != NULL) {
 410       // Success
 411       pages->insert_last(page);
 412       return true;
 413     }
 414   }
 415 
 416   // Try increase capacity
 417   const size_t increased = increase_capacity(size);
 418   if (increased < size) {
 419     // Could not increase capacity enough to satisfy the allocation
 420     // completely. Flush the page cache to satisfy the remainder.
 421     const size_t remaining = size - increased;
 422     _cache.flush_for_allocation(remaining, pages);
 423   }
 424 
 425   // Success
 426   return true;
 427 }
 428 
 429 bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
 430   EventZPageAllocation event;
 431   const uint8_t type = allocation->type();
 432   const size_t size = allocation->size();
 433   const ZAllocationFlags flags = allocation->flags();
 434   ZList<ZPage>* const pages = allocation->pages();
 435 
 436   // Try allocate without using the reserve
 437   if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) {
 438     // If allowed to, try allocate using the reserve
 439     if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) {
 440       // Out of memory
 441       return false;
 442     }
 443   }
 444 
 445   // Updated used statistics
 446   increase_used(size, true /* allocation */, flags.relocation());
 447 
 448   // Send event
 449   event.commit(type, size, flags.non_blocking(), flags.no_reserve(),
 450                _used, _current_max_capacity - _used, _capacity - _used);
 451 
 452   // Success
 453   return true;
 454 }
 455 
 456 static void check_out_of_memory_during_initialization() {
 457   if (!is_init_completed()) {
 458     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 459   }
 460 }
 461 
 462 bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
 463   ZStatTimer timer(ZCriticalPhaseAllocationStall);
 464   EventZAllocationStall event;
 465   ZPageAllocationStall result;
 466 
 467   // We can only block if the VM is fully initialized
 468   check_out_of_memory_during_initialization();
 469 
 470   do {
 471     // Start asynchronous GC
 472     ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 473 
 474     // Wait for allocation to complete, fail or request a GC
 475     result = allocation->wait();
 476   } while (result == ZPageAllocationStallStartGC);
 477 
 478   {
 479     //
 480     // We grab the lock here for two different reasons:
 481     //
 482     // 1) Guard deletion of underlying semaphore. This is a workaround for
 483     // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
 484     // the semaphore immediately after returning from sem_wait(). The
 485     // reason is that sem_post() can touch the semaphore after a waiting
 486     // thread have returned from sem_wait(). To avoid this race we are
 487     // forcing the waiting thread to acquire/release the lock held by the
 488     // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
 489     //
 490     // 2) Guard the list of satisfied pages.
 491     //
 492     ZLocker<ZLock> locker(&_lock);
 493     _satisfied.remove(allocation);
 494   }
 495 
 496   // Send event
 497   event.commit(allocation->type(), allocation->size());
 498 
 499   return (result == ZPageAllocationStallSuccess);
 500 }
 501 
 502 bool ZPageAllocator::alloc_page_prepare(ZPageAllocation* allocation) {
 503   {
 504     ZLocker<ZLock> locker(&_lock);
 505 
 506     if (alloc_page_common(allocation)) {
 507       // Success
 508       return true;
 509     }
 510 
 511     // Failed
 512     if (allocation->flags().non_blocking()) {
 513       // Don't stall
 514       return false;
 515     }
 516 
 517     // Enqueue allocation request
 518     _stalled.insert_last(allocation);
 519   }
 520 
 521   // Stall
 522   return alloc_page_stall(allocation);
 523 }
 524 
 525 ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
 526   const size_t size = allocation->size();
 527 
 528   // Allocate virtual memory
 529   const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
 530   if (vmem.is_null()) {
 531     log_error(gc)("Out of address space");
 532     return NULL;
 533   }
 534 
 535   ZPhysicalMemory pmem;
 536   size_t flushed = 0;
 537 
 538   // Unmap, transfer physical memory, and destroy flushed pages
 539   ZListRemoveIterator<ZPage> iter(allocation->pages());
 540   for (ZPage* page; iter.next(&page);) {
 541     flushed += page->size();
 542     unmap_page(page);
 543     pmem.transfer_segments(page->physical_memory());
 544     destroy_page(page);
 545   }
 546 
 547   if (flushed > 0) {
 548     // Update statistics
 549     ZStatInc(ZCounterPageCacheFlush, flushed);
 550     log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M);
 551   }
 552 
 553   // Allocate any remaining physical memory
 554   if (flushed < size) {
 555     const size_t remaining = size - flushed;
 556     _physical.alloc(pmem, remaining);
 557   }
 558 
 559   // Create new page
 560   return new ZPage(allocation->type(), vmem, pmem);
 561 }
 562 
 563 static bool is_alloc_satisfied(ZPageAllocation* allocation) {
 564   // The allocation is immediately satisfied if the list of pages contains
 565   // exactly one page, with the type and size that was requested.
 566   return allocation->pages()->size() == 1 &&
 567          allocation->pages()->first()->type() == allocation->type() &&
 568          allocation->pages()->first()->size() == allocation->size();
 569 }
 570 
 571 ZPage* ZPageAllocator::alloc_page_finish(ZPageAllocation* allocation) {
 572   // Fast path
 573   if (is_alloc_satisfied(allocation)) {
 574     return allocation->pages()->remove_first();
 575   }
 576 
 577   // Slow path
 578   ZPage* const page = alloc_page_create(allocation);
 579   if (page == NULL) {
 580     // Out of address space
 581     return NULL;
 582   }
 583 
 584   // Commit page
 585   if (!commit_page(page)) {
 586     // Failed or partially failed. Split of any successfully committed
 587     // part of the page into a new page and insert it into list of pages,
 588     // so that it will be re-inserted into the page cache.
 589     ZPage* const committed_page = page->split_committed();
 590     if (committed_page != NULL) {
 591       if (map_page(committed_page)) {
 592         // Success
 593         allocation->pages()->insert_last(committed_page);
 594       } else {
 595         // Failed
 596         uncommit_page(committed_page);
 597         destroy_page(committed_page);
 598       }
 599     }
 600 
 601     destroy_page(page);
 602     return NULL;
 603   }
 604 
 605   // Map page
 606   if (!map_page(page)) {
 607     // Failed
 608     uncommit_page(page);
 609     destroy_page(page);
 610     return NULL;
 611   }
 612 
 613   // Success
 614   return page;
 615 }
 616 
 617 void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) {
 618   size_t freed = 0;
 619 
 620   // Free any allocated pages
 621   ZListRemoveIterator<ZPage> iter(allocation->pages());
 622   for (ZPage* page; iter.next(&page);) {
 623     freed += page->size();
 624     free_page(page, false /* reclaimed */);
 625   }
 626 
 627   ZLocker<ZLock> locker(&_lock);
 628 
 629   // Adjust capacity and used to reflect the failed capacity increase
 630   const size_t remaining = allocation->size() - freed;
 631   decrease_used(remaining, false /* free */, false /* reclaimed */);
 632   decrease_capacity(remaining, true /* set_max_capacity */);
 633 }
 634 
 635 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 636 retry:
 637   ZPageAllocation allocation(type, size, flags);
 638 
 639   // Allocate one or more pages from the page cache. If the allocation
 640   // succeeds but the returned pages don't cover the complete allocation,
 641   // then we are allowed to allocate the remaining memory directly from
 642   // the physical memory manager.
 643   if (!alloc_page_prepare(&allocation)) {
 644     // Out of memory
 645     return NULL;
 646   }
 647 
 648   ZPage* const page = alloc_page_finish(&allocation);
 649   if (page == NULL) {
 650     // Failed to commit or map. Clean up and retry, in the hope that
 651     // we can still allocate by flushing the page cache (more agressively).
 652     alloc_page_failed(&allocation);
 653     goto retry;
 654   }
 655 
 656   // Reset page. This updates the page's sequence number and must
 657   // be done after page allocation, which potentially blocked in
 658   // a safepoint where the global sequence number was updated.
 659   page->reset();
 660 
 661   // Update allocation statistics. Exclude worker threads to avoid
 662   // artificial inflation of the allocation rate due to relocation.
 663   if (!flags.worker_thread()) {
 664     // Note that there are two allocation rate counters, which have
 665     // different purposes and are sampled at different frequencies.
 666     const size_t bytes = page->size();
 667     ZStatInc(ZCounterAllocationRate, bytes);
 668     ZStatInc(ZStatAllocRate::counter(), bytes);
 669   }
 670 
 671   return page;
 672 }
 673 
 674 void ZPageAllocator::satisfy_stalled() {
 675   for (;;) {
 676     ZPageAllocation* const allocation = _stalled.first();
 677     if (allocation == NULL) {
 678       // Allocation queue is empty
 679       return;
 680     }
 681 
 682     if (!alloc_page_common(allocation)) {
 683       // Allocation could not be satisfied, give up
 684       return;
 685     }
 686 
 687     // Allocation succeeded, dequeue and satisfy allocation request.
 688     // Note that we must dequeue the allocation request first, since
 689     // it will immediately be deallocated once it has been satisfied.
 690     _stalled.remove(allocation);
 691     _satisfied.insert_last(allocation);
 692     allocation->satisfy(ZPageAllocationStallSuccess);
 693   }
 694 }
 695 
 696 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 697   ZLocker<ZLock> locker(&_lock);
 698 
 699   // Update used statistics
 700   decrease_used(page->size(), true /* free */, reclaimed);
 701 
 702   // Set time when last used
 703   page->set_last_used();
 704 
 705   // Cache page
 706   _cache.free_page(page);
 707 
 708   // Try satisfy stalled allocations
 709   satisfy_stalled();
 710 }
 711 
 712 size_t ZPageAllocator::uncommit_inner(uint64_t delay, uint64_t* timeout) {
 713   // We need to join the suspendible thread set while manipulating capacity and
 714   // used, to make sure GC safepoints will have a consistent view. However, when
 715   // ZVerifyViews is enabled we need to join at a broader scope to also make sure
 716   // we don't change the address good mask after pages have been flushed, and
 717   // thereby made invisible to pages_do(), but before they have been unmapped.
 718   SuspendibleThreadSetJoiner joiner(ZVerifyViews);
 719   ZList<ZPage> pages;
 720   size_t flushed;
 721 
 722   {
 723     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
 724     ZLocker<ZLock> locker(&_lock);
 725 
 726     // Never uncommit the reserve, and never uncommit below min capacity. We flush
 727     // out and uncommit chunks at a time (~0.8% of the max capacity, but at least
 728     // one granule and at most 256M), in case demand for memory increases while we
 729     // are uncommitting.
 730     const size_t retain = clamp(_used + _max_reserve, _min_capacity, _current_max_capacity);
 731     const size_t release = _capacity - retain;
 732     const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
 733     const size_t flush = MIN2(release, limit);
 734 
 735     // Flush pages to uncommit
 736     flushed = _cache.flush_for_uncommit(flush, delay, timeout, &pages);
 737     if (flushed == 0) {
 738       // Nothing flushed
 739       return 0;
 740     }
 741 
 742     // Adjust used to reflect that these pages are no longer available
 743     increase_used(flushed, false /* allocation */, false /* relocation */);
 744   }
 745 
 746   // Unmap, uncommit, and destroy flushed pages
 747   ZListRemoveIterator<ZPage> iter(&pages);
 748   for (ZPage* page; iter.next(&page);) {
 749     unmap_page(page);
 750     uncommit_page(page);
 751     destroy_page(page);
 752   }
 753 
 754   {
 755     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
 756     ZLocker<ZLock> locker(&_lock);
 757 
 758     // Adjust used and capacity to reflect the uncommit
 759     decrease_used(flushed, false /* free */, false /* reclaimed */);
 760     decrease_capacity(flushed, false /* set_max_capacity */);
 761   }
 762 
 763   return flushed;
 764 }
 765 
 766 uint64_t ZPageAllocator::uncommit() {
 767   EventZUncommit event;
 768   const uint64_t delay = ZUncommitDelay;
 769   uint64_t timeout = delay;
 770   size_t uncommitted = 0;
 771 
 772   while (Atomic::load(&_uncommit)) {
 773     const size_t flushed = uncommit_inner(delay, &timeout);
 774     if (flushed == 0) {
 775       // Done
 776       break;
 777     }
 778 
 779     uncommitted += flushed;
 780   }
 781 
 782   if (uncommitted > 0) {
 783     // Send event
 784     event.commit(uncommitted);
 785 
 786     // Update statistics
 787     ZStatInc(ZCounterUncommit, uncommitted);
 788     log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)",
 789                        uncommitted / M, percent_of(uncommitted, _max_capacity));
 790   }
 791 
 792   log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
 793 
 794   return timeout;
 795 }
 796 
 797 void ZPageAllocator::uncommit_cancel() {
 798   Atomic::store(&_uncommit, false);
 799 }
 800 
 801 void ZPageAllocator::enable_deferred_delete() const {
 802   _safe_delete.enable_deferred_delete();
 803 }
 804 
 805 void ZPageAllocator::disable_deferred_delete() const {
 806   _safe_delete.disable_deferred_delete();
 807 }
 808 
 809 void ZPageAllocator::debug_map_page(const ZPage* page) const {
 810   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 811   _physical.debug_map(page->physical_memory(), page->start());
 812 }
 813 
 814 void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
 815   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 816   _physical.debug_unmap(page->physical_memory(), page->start());
 817 }
 818 
 819 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
 820   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 821 
 822   ZListIterator<ZPageAllocation> iter(&_satisfied);
 823   for (ZPageAllocation* allocation; iter.next(&allocation);) {
 824     ZListIterator<ZPage> iter(allocation->pages());
 825     for (ZPage* page; iter.next(&page);) {
 826       cl->do_page(page);
 827     }
 828   }
 829 
 830   _cache.pages_do(cl);
 831 }
 832 
 833 bool ZPageAllocator::is_alloc_stalled() const {
 834   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 835   return !_stalled.is_empty();
 836 }
 837 
 838 void ZPageAllocator::check_out_of_memory() {
 839   ZLocker<ZLock> locker(&_lock);
 840 
 841   // Fail allocation requests that were enqueued before the
 842   // last GC cycle started, otherwise start a new GC cycle.
 843   for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) {
 844     if (allocation->total_collections() == ZCollectedHeap::heap()->total_collections()) {
 845       // Start a new GC cycle, keep allocation requests enqueued
 846       allocation->satisfy(ZPageAllocationStallStartGC);
 847       return;
 848     }
 849 
 850     // Out of memory, fail allocation request
 851     _stalled.remove(allocation);
 852     _satisfied.insert_last(allocation);
 853     allocation->satisfy(ZPageAllocationStallFailed);
 854   }
 855 }