1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/suspendibleThreadSet.hpp"
  26 #include "gc/z/zAddress.inline.hpp"
  27 #include "gc/z/zCollectedHeap.hpp"
  28 #include "gc/z/zFuture.inline.hpp"
  29 #include "gc/z/zGlobals.hpp"
  30 #include "gc/z/zLock.inline.hpp"
  31 #include "gc/z/zPage.inline.hpp"
  32 #include "gc/z/zPageAllocator.hpp"
  33 #include "gc/z/zPageCache.hpp"
  34 #include "gc/z/zSafeDelete.inline.hpp"
  35 #include "gc/z/zStat.hpp"
  36 #include "gc/z/zTask.hpp"
  37 #include "gc/z/zTracer.inline.hpp"
  38 #include "gc/z/zWorkers.hpp"
  39 #include "jfr/jfrEvents.hpp"
  40 #include "logging/log.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "utilities/debug.hpp"
  45 #include "utilities/globalDefinitions.hpp"
  46 
  47 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  48 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
  49 static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
  50 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  51 
  52 enum ZPageAllocationStall {
  53   ZPageAllocationStallSuccess,
  54   ZPageAllocationStallFailed,
  55   ZPageAllocationStallStartGC
  56 };
  57 
  58 class ZPageAllocation : public StackObj {
  59   friend class ZList<ZPageAllocation>;
  60 
  61 private:
  62   const uint8_t                 _type;
  63   const size_t                  _size;
  64   const ZAllocationFlags        _flags;
  65   const uint32_t                _seqnum;
  66   ZList<ZPage>                  _pages;
  67   ZListNode<ZPageAllocation>    _node;
  68   ZFuture<ZPageAllocationStall> _stall_result;
  69 
  70 public:
  71   ZPageAllocation(uint8_t type, size_t size, ZAllocationFlags flags) :
  72       _type(type),
  73       _size(size),
  74       _flags(flags),
  75       _seqnum(ZGlobalSeqNum),
  76       _pages(),
  77       _node(),
  78       _stall_result() {}
  79 
  80   uint8_t type() const {
  81     return _type;
  82   }
  83 
  84   size_t size() const {
  85     return _size;
  86   }
  87 
  88   ZAllocationFlags flags() const {
  89     return _flags;
  90   }
  91 
  92   uint32_t seqnum() const {
  93     return _seqnum;
  94   }
  95 
  96   ZPageAllocationStall wait() {
  97     return _stall_result.get();
  98   }
  99 
 100   ZList<ZPage>* pages() {
 101     return &_pages;
 102   }
 103 
 104   void satisfy(ZPageAllocationStall result) {
 105     _stall_result.set(result);
 106   }
 107 };
 108 
 109 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
 110                                size_t min_capacity,
 111                                size_t initial_capacity,
 112                                size_t max_capacity,
 113                                size_t max_reserve) :
 114     _lock(),
 115     _cache(),
 116     _virtual(max_capacity),
 117     _physical(max_capacity),
 118     _min_capacity(min_capacity),
 119     _max_capacity(max_capacity),
 120     _max_reserve(max_reserve),
 121     _current_max_capacity(max_capacity),
 122     _capacity(0),
 123     _claimed(0),
 124     _used(0),
 125     _used_high(0),
 126     _used_low(0),
 127     _allocated(0),
 128     _reclaimed(0),
 129     _stalled(),
 130     _satisfied(),
 131     _safe_delete(),
 132     _uncommit(false),
 133     _initialized(false) {
 134 
 135   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
 136     return;
 137   }
 138 
 139   log_info(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
 140   log_info(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
 141   log_info(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
 142   log_info(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
 143   if (ZPageSizeMedium > 0) {
 144     log_info(gc, init)("Medium Page Size: " SIZE_FORMAT "M", ZPageSizeMedium / M);
 145   } else {
 146     log_info(gc, init)("Medium Page Size: N/A");
 147   }
 148   log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 149 
 150   // Warn if system limits could stop us from reaching max capacity
 151   _physical.warn_commit_limits(max_capacity);
 152 
 153   // Check if uncommit should be enabled
 154   _uncommit = _physical.should_enable_uncommit(min_capacity, max_capacity);
 155 
 156   // Pre-map initial capacity
 157   if (!prime_cache(workers, initial_capacity)) {
 158     log_error(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
 159     return;
 160   }
 161 
 162   // Successfully initialized
 163   _initialized = true;
 164 }
 165 
 166 class ZPreTouchTask : public ZTask {
 167 private:
 168   const ZPhysicalMemoryManager* const _physical;
 169   volatile uintptr_t                  _start;
 170   const uintptr_t                     _end;
 171 
 172 public:
 173   ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) :
 174       ZTask("ZPreTouchTask"),
 175       _physical(physical),
 176       _start(start),
 177       _end(end) {}
 178 
 179   virtual void work() {
 180     for (;;) {
 181       // Get granule offset
 182       const size_t size = ZGranuleSize;
 183       const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
 184       if (offset >= _end) {
 185         // Done
 186         break;
 187       }
 188 
 189       // Pre-touch granule
 190       _physical->pretouch(offset, size);
 191     }
 192   }
 193 };
 194 
 195 bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
 196   ZAllocationFlags flags;
 197 
 198   flags.set_non_blocking();
 199   flags.set_low_address();
 200 
 201   ZPage* const page = alloc_page(ZPageTypeLarge, size, flags);
 202   if (page == NULL) {
 203     return false;
 204   }
 205 
 206   if (AlwaysPreTouch) {
 207     // Pre-touch page
 208     ZPreTouchTask task(&_physical, page->start(), page->end());
 209     workers->run_parallel(&task);
 210   }
 211 
 212   free_page(page, false /* reclaimed */);
 213 
 214   return true;
 215 }
 216 
 217 bool ZPageAllocator::is_initialized() const {
 218   return _initialized;
 219 }
 220 
 221 size_t ZPageAllocator::min_capacity() const {
 222   return _min_capacity;
 223 }
 224 
 225 size_t ZPageAllocator::max_capacity() const {
 226   return _max_capacity;
 227 }
 228 
 229 size_t ZPageAllocator::soft_max_capacity() const {
 230   // Note that SoftMaxHeapSize is a manageable flag
 231   const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize);
 232   const size_t current_max_capacity = Atomic::load(&_current_max_capacity);
 233   return MIN2(soft_max_capacity, current_max_capacity);
 234 }
 235 
 236 size_t ZPageAllocator::capacity() const {
 237   return Atomic::load(&_capacity);
 238 }
 239 
 240 size_t ZPageAllocator::max_reserve() const {
 241   return _max_reserve;
 242 }
 243 
 244 size_t ZPageAllocator::used_high() const {
 245   return _used_high;
 246 }
 247 
 248 size_t ZPageAllocator::used_low() const {
 249   return _used_low;
 250 }
 251 
 252 size_t ZPageAllocator::used() const {
 253   return Atomic::load(&_used);
 254 }
 255 
 256 size_t ZPageAllocator::unused() const {
 257   const ssize_t capacity = (ssize_t)Atomic::load(&_capacity);
 258   const ssize_t used = (ssize_t)Atomic::load(&_used);
 259   const ssize_t claimed = (ssize_t)Atomic::load(&_claimed);
 260   const ssize_t max_reserve = (ssize_t)_max_reserve;
 261   const ssize_t unused = capacity - used - claimed - max_reserve;
 262   return unused > 0 ? (size_t)unused : 0;
 263 }
 264 
 265 size_t ZPageAllocator::allocated() const {
 266   return _allocated;
 267 }
 268 
 269 size_t ZPageAllocator::reclaimed() const {
 270   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 271 }
 272 
 273 void ZPageAllocator::reset_statistics() {
 274   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 275   _allocated = 0;
 276   _reclaimed = 0;
 277   _used_high = _used_low = _used;
 278 }
 279 
 280 size_t ZPageAllocator::increase_capacity(size_t size) {
 281   const size_t increase = MIN2(size, _current_max_capacity - _capacity);
 282 
 283   // Update atomically since we have concurrent readers
 284   Atomic::add(&_capacity, increase);
 285 
 286   return increase;
 287 }
 288 
 289 void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) {
 290   // Update atomically since we have concurrent readers
 291   Atomic::sub(&_capacity, size);
 292 
 293   if (set_max_capacity) {
 294     // Adjust current max capacity to avoid further attempts to increase capacity
 295     log_error(gc)("Forced to lower max Java heap size from "
 296                   SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
 297                   _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
 298                   _capacity / M, percent_of(_capacity, _max_capacity));
 299 
 300     // Update atomically since we have concurrent readers
 301     Atomic::store(&_current_max_capacity, _capacity);
 302   }
 303 }
 304 
 305 void ZPageAllocator::increase_used(size_t size, bool relocation) {
 306   if (relocation) {
 307     // Allocating a page for the purpose of relocation has a
 308     // negative contribution to the number of reclaimed bytes.
 309     _reclaimed -= size;
 310   }
 311   _allocated += size;
 312 
 313   // Update atomically since we have concurrent readers
 314   const size_t used = Atomic::add(&_used, size);
 315   if (used > _used_high) {
 316     _used_high = used;
 317   }
 318 }
 319 
 320 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
 321   // Only pages explicitly released with the reclaimed flag set
 322   // counts as reclaimed bytes. This flag is true when we release
 323   // a page after relocation, and is false when we release a page
 324   // to undo an allocation.
 325   if (reclaimed) {
 326     _reclaimed += size;
 327   } else {
 328     _allocated -= size;
 329   }
 330 
 331   // Update atomically since we have concurrent readers
 332   const size_t used = Atomic::sub(&_used, size);
 333   if (used < _used_low) {
 334     _used_low = used;
 335   }
 336 }
 337 
 338 bool ZPageAllocator::commit_page(ZPage* page) {
 339   // Commit physical memory
 340   return _physical.commit(page->physical_memory());
 341 }
 342 
 343 void ZPageAllocator::uncommit_page(ZPage* page) {
 344   if (!_uncommit) {
 345     // Uncommit not supported/enabled
 346     return;
 347   }
 348 
 349   // Uncommit physical memory
 350   _physical.uncommit(page->physical_memory());
 351 }
 352 
 353 bool ZPageAllocator::map_page(const ZPage* page) const {
 354   // Map physical memory
 355   return _physical.map(page->physical_memory(), page->start());
 356 }
 357 
 358 void ZPageAllocator::unmap_page(const ZPage* page) const {
 359   // Unmap physical memory
 360   _physical.unmap(page->physical_memory(), page->start());
 361 }
 362 
 363 void ZPageAllocator::destroy_page(ZPage* page) {
 364   // Free virtual memory
 365   _virtual.free(page->virtual_memory());
 366 
 367   // Free physical memory
 368   _physical.free(page->physical_memory());
 369 
 370   // Delete page safely
 371   _safe_delete(page);
 372 }
 373 
 374 bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const {
 375   size_t available = _current_max_capacity - _used - _claimed;
 376 
 377   if (no_reserve) {
 378     // The reserve should not be considered available
 379     available -= MIN2(available, _max_reserve);
 380   }
 381 
 382   return available >= size;
 383 }
 384 
 385 bool ZPageAllocator::is_alloc_allowed_from_cache(size_t size, bool no_reserve) const {
 386   size_t available = _capacity - _used - _claimed;
 387 
 388   if (no_reserve) {
 389     // The reserve should not be considered available
 390     available -= MIN2(available, _max_reserve);
 391   } else if (_capacity != _current_max_capacity) {
 392     // Always increase capacity before using the reserve
 393     return false;
 394   }
 395 
 396   return available >= size;
 397 }
 398 
 399 bool ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve, ZList<ZPage>* pages) {
 400   if (!is_alloc_allowed(size, no_reserve)) {
 401     // Out of memory
 402     return false;
 403   }
 404 
 405   // Try allocate from the page cache
 406   if (is_alloc_allowed_from_cache(size, no_reserve)) {
 407     ZPage* const page = _cache.alloc_page(type, size);
 408     if (page != NULL) {
 409       // Success
 410       pages->insert_last(page);
 411       return true;
 412     }
 413   }
 414 
 415   // Try increase capacity
 416   const size_t increased = increase_capacity(size);
 417   if (increased < size) {
 418     // Could not increase capacity enough to satisfy the allocation
 419     // completely. Flush the page cache to satisfy the remainder.
 420     const size_t remaining = size - increased;
 421     _cache.flush_for_allocation(remaining, pages);
 422   }
 423 
 424   // Success
 425   return true;
 426 }
 427 
 428 bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) {
 429   EventZPageAllocation event;
 430   const uint8_t type = allocation->type();
 431   const size_t size = allocation->size();
 432   const ZAllocationFlags flags = allocation->flags();
 433   ZList<ZPage>* const pages = allocation->pages();
 434 
 435   // Try allocate without using the reserve
 436   if (!alloc_page_common_inner(type, size, true /* no_reserve */, pages)) {
 437     // If allowed to, try allocate using the reserve
 438     if (flags.no_reserve() || !alloc_page_common_inner(type, size, false /* no_reserve */, pages)) {
 439       // Out of memory
 440       return false;
 441     }
 442   }
 443 
 444   // Updated used statistics
 445   increase_used(size, flags.relocation());
 446 
 447   // Send event
 448   event.commit(type, size, flags.non_blocking(), flags.no_reserve(), _used,
 449                _current_max_capacity - _used - _claimed, _capacity - _used - _claimed);
 450 
 451   // Success
 452   return true;
 453 }
 454 
 455 static void check_out_of_memory_during_initialization() {
 456   if (!is_init_completed()) {
 457     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 458   }
 459 }
 460 
 461 bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
 462   ZStatTimer timer(ZCriticalPhaseAllocationStall);
 463   EventZAllocationStall event;
 464   ZPageAllocationStall result;
 465 
 466   // We can only block if the VM is fully initialized
 467   check_out_of_memory_during_initialization();
 468 
 469   do {
 470     // Start asynchronous GC
 471     ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 472 
 473     // Wait for allocation to complete, fail or request a GC
 474     result = allocation->wait();
 475   } while (result == ZPageAllocationStallStartGC);
 476 
 477   {
 478     //
 479     // We grab the lock here for two different reasons:
 480     //
 481     // 1) Guard deletion of underlying semaphore. This is a workaround for
 482     // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
 483     // the semaphore immediately after returning from sem_wait(). The
 484     // reason is that sem_post() can touch the semaphore after a waiting
 485     // thread have returned from sem_wait(). To avoid this race we are
 486     // forcing the waiting thread to acquire/release the lock held by the
 487     // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
 488     //
 489     // 2) Guard the list of satisfied pages.
 490     //
 491     ZLocker<ZLock> locker(&_lock);
 492     _satisfied.remove(allocation);
 493   }
 494 
 495   // Send event
 496   event.commit(allocation->type(), allocation->size());
 497 
 498   return (result == ZPageAllocationStallSuccess);
 499 }
 500 
 501 bool ZPageAllocator::alloc_page_stage0(ZPageAllocation* allocation) {
 502   {
 503     ZLocker<ZLock> locker(&_lock);
 504 
 505     if (alloc_page_common(allocation)) {
 506       // Success
 507       return true;
 508     }
 509 
 510     // Failed
 511     if (allocation->flags().non_blocking()) {
 512       // Don't stall
 513       return false;
 514     }
 515 
 516     // Enqueue allocation request
 517     _stalled.insert_last(allocation);
 518   }
 519 
 520   // Stall
 521   return alloc_page_stall(allocation);
 522 }
 523 
 524 ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) {
 525   const size_t size = allocation->size();
 526 
 527   // Allocate virtual memory. To make error handling a lot more straight
 528   // forward, we allocate virtual memory before destroying flushed pages.
 529   const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address());
 530   if (vmem.is_null()) {
 531     log_error(gc)("Out of address space");
 532     return NULL;
 533   }
 534 
 535   ZPhysicalMemory pmem;
 536   size_t flushed = 0;
 537 
 538   // Unmap, transfer physical memory, and destroy flushed pages
 539   ZListRemoveIterator<ZPage> iter(allocation->pages());
 540   for (ZPage* page; iter.next(&page);) {
 541     flushed += page->size();
 542     unmap_page(page);
 543     pmem.transfer_segments(page->physical_memory());
 544     destroy_page(page);
 545   }
 546 
 547   if (flushed > 0) {
 548     // Update statistics
 549     ZStatInc(ZCounterPageCacheFlush, flushed);
 550     log_debug(gc, heap)("Page Cache Flushed: " SIZE_FORMAT "M", flushed / M);
 551   }
 552 
 553   // Allocate any remaining physical memory
 554   if (flushed < size) {
 555     const size_t remaining = size - flushed;
 556     _physical.alloc(pmem, remaining);
 557   }
 558 
 559   // Create new page
 560   return new ZPage(allocation->type(), vmem, pmem);
 561 }
 562 
 563 static bool is_alloc_satisfied(ZPageAllocation* allocation) {
 564   // The allocation is immediately satisfied if the list of pages contains
 565   // exactly one page, with the type and size that was requested.
 566   return allocation->pages()->size() == 1 &&
 567          allocation->pages()->first()->type() == allocation->type() &&
 568          allocation->pages()->first()->size() == allocation->size();
 569 }
 570 
 571 ZPage* ZPageAllocator::alloc_page_stage1(ZPageAllocation* allocation) {
 572   // Fast path
 573   if (is_alloc_satisfied(allocation)) {
 574     return allocation->pages()->remove_first();
 575   }
 576 
 577   // Slow path
 578   ZPage* const page = alloc_page_create(allocation);
 579   if (page == NULL) {
 580     // Out of address space
 581     return NULL;
 582   }
 583 
 584   // Commit page
 585   if (!commit_page(page)) {
 586     // Failed or partially failed. Split of any successfully committed
 587     // part of the page into a new page and insert it into list of pages,
 588     // so that it will be re-inserted into the page cache.
 589     ZPage* const committed_page = page->split_committed();
 590     if (committed_page != NULL) {
 591       if (map_page(committed_page)) {
 592         // Success
 593         allocation->pages()->insert_last(committed_page);
 594       } else {
 595         // Failed
 596         uncommit_page(committed_page);
 597         destroy_page(committed_page);
 598       }
 599     }
 600 
 601     destroy_page(page);
 602     return NULL;
 603   }
 604 
 605   // Map page
 606   if (!map_page(page)) {
 607     // Failed
 608     uncommit_page(page);
 609     destroy_page(page);
 610     return NULL;
 611   }
 612 
 613   // Success
 614   return page;
 615 }
 616 
 617 void ZPageAllocator::alloc_page_failed(ZPageAllocation* allocation) {
 618   size_t freed = 0;
 619 
 620   // Free any allocated pages
 621   ZListRemoveIterator<ZPage> iter(allocation->pages());
 622   for (ZPage* page; iter.next(&page);) {
 623     freed += page->size();
 624     free_page(page, false /* reclaimed */);
 625   }
 626 
 627   ZLocker<ZLock> locker(&_lock);
 628 
 629   // Adjust capacity and used to reflect the failed capacity increase
 630   const size_t remaining = allocation->size() - freed;
 631   decrease_used(remaining, false /* reclaimed */);
 632   decrease_capacity(remaining, true /* set_max_capacity */);
 633 }
 634 
 635 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 636 retry:
 637   ZPageAllocation allocation(type, size, flags);
 638 
 639   // Allocate one or more pages from the page cache. If the allocation
 640   // succeeds but the returned pages don't cover the complete allocation,
 641   // then we are allowed to allocate the remaining memory directly from
 642   // the physical memory manager.
 643   if (!alloc_page_stage0(&allocation)) {
 644     // Out of memory
 645     return NULL;
 646   }
 647 
 648   ZPage* const page = alloc_page_stage1(&allocation);
 649   if (page == NULL) {
 650     // Failed to commit or map. Clean up and retry, in the hope that
 651     // we can still allocate by flushing the page cache (more agressively).
 652     alloc_page_failed(&allocation);
 653     goto retry;
 654   }
 655 
 656   // Reset page. This updates the page's sequence number and must
 657   // be done after page allocation, which potentially blocked in
 658   // a safepoint where the global sequence number was updated.
 659   page->reset();
 660 
 661   // Update allocation statistics. Exclude worker threads to avoid
 662   // artificial inflation of the allocation rate due to relocation.
 663   if (!flags.worker_thread()) {
 664     // Note that there are two allocation rate counters, which have
 665     // different purposes and are sampled at different frequencies.
 666     const size_t bytes = page->size();
 667     ZStatInc(ZCounterAllocationRate, bytes);
 668     ZStatInc(ZStatAllocRate::counter(), bytes);
 669   }
 670 
 671   return page;
 672 }
 673 
 674 void ZPageAllocator::satisfy_stalled() {
 675   for (;;) {
 676     ZPageAllocation* const allocation = _stalled.first();
 677     if (allocation == NULL) {
 678       // Allocation queue is empty
 679       return;
 680     }
 681 
 682     if (!alloc_page_common(allocation)) {
 683       // Allocation could not be satisfied, give up
 684       return;
 685     }
 686 
 687     // Allocation succeeded, dequeue and satisfy allocation request.
 688     // Note that we must dequeue the allocation request first, since
 689     // it will immediately be deallocated once it has been satisfied.
 690     _stalled.remove(allocation);
 691     _satisfied.insert_last(allocation);
 692     allocation->satisfy(ZPageAllocationStallSuccess);
 693   }
 694 }
 695 
 696 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 697   ZLocker<ZLock> locker(&_lock);
 698 
 699   // Update used statistics
 700   decrease_used(page->size(), reclaimed);
 701 
 702   // Set time when last used
 703   page->set_last_used();
 704 
 705   // Cache page
 706   _cache.free_page(page);
 707 
 708   // Try satisfy stalled allocations
 709   satisfy_stalled();
 710 }
 711 
 712 size_t ZPageAllocator::uncommit_inner(uint64_t delay, uint64_t* timeout) {
 713   // We need to join the suspendible thread set while manipulating capacity and
 714   // used, to make sure GC safepoints will have a consistent view. However, when
 715   // ZVerifyViews is enabled we need to join at a broader scope to also make sure
 716   // we don't change the address good mask after pages have been flushed, and
 717   // thereby made invisible to pages_do(), but before they have been unmapped.
 718   SuspendibleThreadSetJoiner joiner(ZVerifyViews);
 719   ZList<ZPage> pages;
 720   size_t flushed;
 721 
 722   {
 723     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
 724     ZLocker<ZLock> locker(&_lock);
 725 
 726     // Never uncommit the reserve, and never uncommit below min capacity. We flush
 727     // out and uncommit chunks at a time (~0.8% of the max capacity, but at least
 728     // one granule and at most 256M), in case demand for memory increases while we
 729     // are uncommitting.
 730     const size_t retain = clamp(_used + _max_reserve, _min_capacity, _capacity);
 731     const size_t release = _capacity - retain;
 732     const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M);
 733     const size_t flush = MIN2(release, limit);
 734 
 735     // Flush pages to uncommit
 736     flushed = _cache.flush_for_uncommit(flush, delay, timeout, &pages);
 737     if (flushed == 0) {
 738       // Nothing flushed
 739       return 0;
 740     }
 741 
 742     // Record flushed pages as claimed
 743     Atomic::add(&_claimed, flushed);
 744   }
 745 
 746   // Unmap, uncommit, and destroy flushed pages
 747   ZListRemoveIterator<ZPage> iter(&pages);
 748   for (ZPage* page; iter.next(&page);) {
 749     unmap_page(page);
 750     uncommit_page(page);
 751     destroy_page(page);
 752   }
 753 
 754   {
 755     SuspendibleThreadSetJoiner joiner(!ZVerifyViews);
 756     ZLocker<ZLock> locker(&_lock);
 757 
 758     // Adjust claimed and capacity to reflect the uncommit
 759     Atomic::sub(&_claimed, flushed);
 760     decrease_capacity(flushed, false /* set_max_capacity */);
 761   }
 762 
 763   return flushed;
 764 }
 765 
 766 uint64_t ZPageAllocator::uncommit() {
 767   EventZUncommit event;
 768   const uint64_t delay = ZUncommitDelay;
 769   uint64_t timeout = delay;
 770   size_t uncommitted = 0;
 771 
 772   while (Atomic::load(&_uncommit)) {
 773     const size_t flushed = uncommit_inner(delay, &timeout);
 774     if (flushed == 0) {
 775       // Done
 776       break;
 777     }
 778 
 779     uncommitted += flushed;
 780   }
 781 
 782   if (uncommitted > 0) {
 783     // Send event
 784     event.commit(uncommitted);
 785 
 786     // Update statistics
 787     ZStatInc(ZCounterUncommit, uncommitted);
 788     log_info(gc, heap)("Uncommitted: " SIZE_FORMAT "M(%.0f%%)",
 789                        uncommitted / M, percent_of(uncommitted, _max_capacity));
 790   }
 791 
 792   log_trace(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
 793 
 794   return timeout;
 795 }
 796 
 797 void ZPageAllocator::uncommit_cancel() {
 798   Atomic::store(&_uncommit, false);
 799 }
 800 
 801 void ZPageAllocator::enable_deferred_delete() const {
 802   _safe_delete.enable_deferred_delete();
 803 }
 804 
 805 void ZPageAllocator::disable_deferred_delete() const {
 806   _safe_delete.disable_deferred_delete();
 807 }
 808 
 809 void ZPageAllocator::debug_map_page(const ZPage* page) const {
 810   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 811   _physical.debug_map(page->physical_memory(), page->start());
 812 }
 813 
 814 void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
 815   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 816   _physical.debug_unmap(page->physical_memory(), page->start());
 817 }
 818 
 819 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
 820   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 821 
 822   ZListIterator<ZPageAllocation> iter(&_satisfied);
 823   for (ZPageAllocation* allocation; iter.next(&allocation);) {
 824     ZListIterator<ZPage> iter(allocation->pages());
 825     for (ZPage* page; iter.next(&page);) {
 826       cl->do_page(page);
 827     }
 828   }
 829 
 830   _cache.pages_do(cl);
 831 }
 832 
 833 bool ZPageAllocator::is_alloc_stalled() const {
 834   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 835   return !_stalled.is_empty();
 836 }
 837 
 838 void ZPageAllocator::check_out_of_memory() {
 839   ZLocker<ZLock> locker(&_lock);
 840 
 841   // Fail allocation requests that were enqueued before the
 842   // last GC cycle started, otherwise start a new GC cycle.
 843   for (ZPageAllocation* allocation = _stalled.first(); allocation != NULL; allocation = _stalled.first()) {
 844     if (allocation->seqnum() == ZGlobalSeqNum) {
 845       // Start a new GC cycle, keep allocation requests enqueued
 846       allocation->satisfy(ZPageAllocationStallStartGC);
 847       return;
 848     }
 849 
 850     // Out of memory, fail allocation request
 851     _stalled.remove(allocation);
 852     _satisfied.insert_last(allocation);
 853     allocation->satisfy(ZPageAllocationStallFailed);
 854   }
 855 }