1 /*
   2  * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcLogPrecious.hpp"
  26 #include "gc/shared/suspendibleThreadSet.hpp"
  27 #include "gc/z/zAddress.inline.hpp"
  28 #include "gc/z/zCollectedHeap.hpp"
  29 #include "gc/z/zFuture.inline.hpp"
  30 #include "gc/z/zGlobals.hpp"
  31 #include "gc/z/zLock.inline.hpp"
  32 #include "gc/z/zPage.inline.hpp"
  33 #include "gc/z/zPageAllocator.hpp"
  34 #include "gc/z/zPageCache.inline.hpp"
  35 #include "gc/z/zSafeDelete.inline.hpp"
  36 #include "gc/z/zStat.hpp"
  37 #include "gc/z/zTask.hpp"
  38 #include "gc/z/zTracer.inline.hpp"
  39 #include "gc/z/zWorkers.hpp"
  40 #include "jfr/jfrEvents.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  47 static const ZStatCounter       ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond);
  48 static const ZStatCounter       ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
  49 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  50 
  51 class ZPageAllocRequest : public StackObj {
  52   friend class ZList<ZPageAllocRequest>;
  53 
  54 private:
  55   const uint8_t                _type;
  56   const size_t                 _size;
  57   const ZAllocationFlags       _flags;
  58   const unsigned int           _total_collections;
  59   ZListNode<ZPageAllocRequest> _node;
  60   ZFuture<ZPage*>              _result;
  61 
  62 public:
  63   ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
  64       _type(type),
  65       _size(size),
  66       _flags(flags),
  67       _total_collections(total_collections),
  68       _node(),
  69       _result() {}
  70 
  71   uint8_t type() const {
  72     return _type;
  73   }
  74 
  75   size_t size() const {
  76     return _size;
  77   }
  78 
  79   ZAllocationFlags flags() const {
  80     return _flags;
  81   }
  82 
  83   unsigned int total_collections() const {
  84     return _total_collections;
  85   }
  86 
  87   ZPage* peek() {
  88     return _result.peek();
  89   }
  90 
  91   ZPage* wait() {
  92     return _result.get();
  93   }
  94 
  95   void satisfy(ZPage* page) {
  96     _result.set(page);
  97   }
  98 };
  99 
 100 ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
 101 
 102 ZPageAllocator::ZPageAllocator(ZWorkers* workers,
 103                                size_t min_capacity,
 104                                size_t initial_capacity,
 105                                size_t max_capacity,
 106                                size_t max_reserve) :
 107     _lock(),
 108     _virtual(max_capacity),
 109     _physical(),
 110     _cache(),
 111     _min_capacity(min_capacity),
 112     _max_capacity(max_capacity),
 113     _max_reserve(max_reserve),
 114     _current_max_capacity(max_capacity),
 115     _capacity(0),
 116     _used_high(0),
 117     _used_low(0),
 118     _used(0),
 119     _allocated(0),
 120     _reclaimed(0),
 121     _queue(),
 122     _satisfied(),
 123     _safe_delete(),
 124     _uncommit(false),
 125     _initialized(false) {
 126 
 127   if (!_virtual.is_initialized() || !_physical.is_initialized()) {
 128     return;
 129   }
 130 
 131   log_info_p(gc, init)("Min Capacity: " SIZE_FORMAT "M", min_capacity / M);
 132   log_info_p(gc, init)("Initial Capacity: " SIZE_FORMAT "M", initial_capacity / M);
 133   log_info_p(gc, init)("Max Capacity: " SIZE_FORMAT "M", max_capacity / M);
 134   log_info_p(gc, init)("Max Reserve: " SIZE_FORMAT "M", max_reserve / M);
 135   log_info_p(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
 136 
 137   // Warn if system limits could stop us from reaching max capacity
 138   _physical.warn_commit_limits(max_capacity);
 139 
 140   // Commit initial capacity
 141   _capacity = _physical.commit(initial_capacity);
 142   if (_capacity != initial_capacity) {
 143     log_error_p(gc)("Failed to allocate initial Java heap (" SIZE_FORMAT "M)", initial_capacity / M);
 144     return;
 145   }
 146 
 147   // If uncommit is not explicitly disabled, max capacity is greater than
 148   // min capacity, and uncommit is supported by the platform, then we will
 149   // try to uncommit unused memory.
 150   _uncommit = ZUncommit && (max_capacity > min_capacity) && _physical.supports_uncommit();
 151   if (_uncommit) {
 152     log_info(gc, init)("Uncommit: Enabled, Delay: " UINTX_FORMAT "s", ZUncommitDelay);
 153   } else {
 154     log_info(gc, init)("Uncommit: Disabled");
 155   }
 156 
 157   // Pre-map initial capacity
 158   prime_cache(workers, initial_capacity);
 159 
 160   // Successfully initialized
 161   _initialized = true;
 162 }
 163 
 164 class ZPreTouchTask : public ZTask {
 165 private:
 166   const ZPhysicalMemoryManager* const _physical;
 167   volatile uintptr_t                  _start;
 168   const uintptr_t                     _end;
 169 
 170 public:
 171   ZPreTouchTask(const ZPhysicalMemoryManager* physical, uintptr_t start, uintptr_t end) :
 172       ZTask("ZPreTouchTask"),
 173       _physical(physical),
 174       _start(start),
 175       _end(end) {}
 176 
 177   virtual void work() {
 178     for (;;) {
 179       // Get granule offset
 180       const size_t size = ZGranuleSize;
 181       const uintptr_t offset = Atomic::fetch_and_add(&_start, size);
 182       if (offset >= _end) {
 183         // Done
 184         break;
 185       }
 186 
 187       // Pre-touch granule
 188       _physical->pretouch(offset, size);
 189     }
 190   }
 191 };
 192 
 193 void ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
 194   // Allocate physical memory
 195   const ZPhysicalMemory pmem = _physical.alloc(size);
 196   guarantee(!pmem.is_null(), "Invalid size");
 197 
 198   // Allocate virtual memory
 199   const ZVirtualMemory vmem = _virtual.alloc(size, true /* alloc_from_front */);
 200   guarantee(!vmem.is_null(), "Invalid size");
 201 
 202   // Allocate page
 203   ZPage* const page = new ZPage(vmem, pmem);
 204 
 205   // Map page
 206   map_page(page);
 207   page->set_pre_mapped();
 208 
 209   if (AlwaysPreTouch) {
 210     // Pre-touch page
 211     ZPreTouchTask task(&_physical, page->start(), page->end());
 212     workers->run_parallel(&task);
 213   }
 214 
 215   // Add page to cache
 216   page->set_last_used();
 217   _cache.free_page(page);
 218 }
 219 
 220 bool ZPageAllocator::is_initialized() const {
 221   return _initialized;
 222 }
 223 
 224 size_t ZPageAllocator::min_capacity() const {
 225   return _min_capacity;
 226 }
 227 
 228 size_t ZPageAllocator::max_capacity() const {
 229   return _max_capacity;
 230 }
 231 
 232 size_t ZPageAllocator::soft_max_capacity() const {
 233   // Note that SoftMaxHeapSize is a manageable flag
 234   return MIN2(SoftMaxHeapSize, _current_max_capacity);
 235 }
 236 
 237 size_t ZPageAllocator::capacity() const {
 238   return _capacity;
 239 }
 240 
 241 size_t ZPageAllocator::max_reserve() const {
 242   return _max_reserve;
 243 }
 244 
 245 size_t ZPageAllocator::used_high() const {
 246   return _used_high;
 247 }
 248 
 249 size_t ZPageAllocator::used_low() const {
 250   return _used_low;
 251 }
 252 
 253 size_t ZPageAllocator::used() const {
 254   return _used;
 255 }
 256 
 257 size_t ZPageAllocator::unused() const {
 258   const ssize_t unused = (ssize_t)_capacity - (ssize_t)_used - (ssize_t)_max_reserve;
 259   return unused > 0 ? (size_t)unused : 0;
 260 }
 261 
 262 size_t ZPageAllocator::allocated() const {
 263   return _allocated;
 264 }
 265 
 266 size_t ZPageAllocator::reclaimed() const {
 267   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 268 }
 269 
 270 void ZPageAllocator::reset_statistics() {
 271   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 272   _allocated = 0;
 273   _reclaimed = 0;
 274   _used_high = _used_low = _used;
 275 }
 276 
 277 void ZPageAllocator::increase_used(size_t size, bool relocation) {
 278   if (relocation) {
 279     // Allocating a page for the purpose of relocation has a
 280     // negative contribution to the number of reclaimed bytes.
 281     _reclaimed -= size;
 282   }
 283   _allocated += size;
 284   _used += size;
 285   if (_used > _used_high) {
 286     _used_high = _used;
 287   }
 288 }
 289 
 290 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
 291   // Only pages explicitly released with the reclaimed flag set
 292   // counts as reclaimed bytes. This flag is true when we release
 293   // a page after relocation, and is false when we release a page
 294   // to undo an allocation.
 295   if (reclaimed) {
 296     _reclaimed += size;
 297   } else {
 298     _allocated -= size;
 299   }
 300   _used -= size;
 301   if (_used < _used_low) {
 302     _used_low = _used;
 303   }
 304 }
 305 
 306 ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
 307   // Allocate virtual memory
 308   const ZVirtualMemory vmem = _virtual.alloc(size);
 309   if (vmem.is_null()) {
 310     // Out of address space
 311     return NULL;
 312   }
 313 
 314   // Allocate physical memory
 315   const ZPhysicalMemory pmem = _physical.alloc(size);
 316   assert(!pmem.is_null(), "Invalid size");
 317 
 318   // Allocate page
 319   return new ZPage(type, vmem, pmem);
 320 }
 321 
 322 void ZPageAllocator::destroy_page(ZPage* page) {
 323   const ZVirtualMemory& vmem = page->virtual_memory();
 324   const ZPhysicalMemory& pmem = page->physical_memory();
 325 
 326   // Unmap memory
 327   _physical.unmap(pmem, vmem.start());
 328 
 329   // Free physical memory
 330   _physical.free(pmem);
 331 
 332   // Free virtual memory
 333   _virtual.free(vmem);
 334 
 335   // Delete page safely
 336   _safe_delete(page);
 337 }
 338 
 339 void ZPageAllocator::map_page(const ZPage* page) const {
 340   // Map physical memory
 341   _physical.map(page->physical_memory(), page->start());
 342 }
 343 
 344 size_t ZPageAllocator::max_available(bool no_reserve) const {
 345   size_t available = _current_max_capacity - _used;
 346 
 347   if (no_reserve) {
 348     // The reserve should not be considered available
 349     available -= MIN2(available, _max_reserve);
 350   }
 351 
 352   return available;
 353 }
 354 
 355 bool ZPageAllocator::ensure_available(size_t size, bool no_reserve) {
 356   if (max_available(no_reserve) < size) {
 357     // Not enough free memory
 358     return false;
 359   }
 360 
 361   // We add the max_reserve to the requested size to avoid losing
 362   // the reserve because of failure to increase capacity before
 363   // reaching max capacity.
 364   size += _max_reserve;
 365 
 366   // Don't try to increase capacity if enough unused capacity
 367   // is available or if current max capacity has been reached.
 368   const size_t available = _capacity - _used;
 369   if (available < size && _capacity < _current_max_capacity) {
 370     // Try to increase capacity
 371     const size_t commit = MIN2(size - available, _current_max_capacity - _capacity);
 372     const size_t committed = _physical.commit(commit);
 373     _capacity += committed;
 374 
 375     log_trace(gc, heap)("Make Available: Size: " SIZE_FORMAT "M, NoReserve: %s, "
 376                         "Available: " SIZE_FORMAT "M, Commit: " SIZE_FORMAT "M, "
 377                         "Committed: " SIZE_FORMAT "M, Capacity: " SIZE_FORMAT "M",
 378                         size / M, no_reserve ? "True" : "False", available / M,
 379                         commit / M, committed / M, _capacity / M);
 380 
 381     if (committed != commit) {
 382       // Failed, or partly failed, to increase capacity. Adjust current
 383       // max capacity to avoid further attempts to increase capacity.
 384       log_error_p(gc)("Forced to lower max Java heap size from "
 385                       SIZE_FORMAT "M(%.0f%%) to " SIZE_FORMAT "M(%.0f%%)",
 386                       _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity),
 387                       _capacity / M, percent_of(_capacity, _max_capacity));
 388 
 389       _current_max_capacity = _capacity;
 390     }
 391   }
 392 
 393   if (!no_reserve) {
 394     size -= _max_reserve;
 395   }
 396 
 397   const size_t new_available = _capacity - _used;
 398   return new_available >= size;
 399 }
 400 
 401 void ZPageAllocator::ensure_uncached_available(size_t size) {
 402   assert(_capacity - _used >= size, "Invalid size");
 403   const size_t uncached_available = _capacity - _used - _cache.available();
 404   if (size > uncached_available) {
 405     flush_cache_for_allocation(size - uncached_available);
 406   }
 407 }
 408 
 409 ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, bool no_reserve) {
 410   if (!ensure_available(size, no_reserve)) {
 411     // Not enough free memory
 412     return NULL;
 413   }
 414 
 415   // Try allocate page from the cache
 416   ZPage* const page = _cache.alloc_page(type, size);
 417   if (page != NULL) {
 418     return page;
 419   }
 420 
 421   // Try flush pages from the cache
 422   ensure_uncached_available(size);
 423 
 424   // Create new page
 425   return create_page(type, size);
 426 }
 427 
 428 ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
 429   EventZPageAllocation event;
 430 
 431   ZPage* const page = alloc_page_common_inner(type, size, flags.no_reserve());
 432   if (page == NULL) {
 433     // Out of memory
 434     return NULL;
 435   }
 436 
 437   // Update used statistics
 438   increase_used(size, flags.relocation());
 439 
 440   // Send trace event
 441   event.commit(type, size, _used, max_available(flags.no_reserve()),
 442                _cache.available(), flags.non_blocking(), flags.no_reserve());
 443 
 444   return page;
 445 }
 446 
 447 void ZPageAllocator::check_out_of_memory_during_initialization() {
 448   if (!is_init_completed()) {
 449     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 450   }
 451 }
 452 
 453 ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 454   // Prepare to block
 455   ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
 456 
 457   _lock.lock();
 458 
 459   // Try non-blocking allocation
 460   ZPage* page = alloc_page_common(type, size, flags);
 461   if (page == NULL) {
 462     // Allocation failed, enqueue request
 463     _queue.insert_last(&request);
 464   }
 465 
 466   _lock.unlock();
 467 
 468   if (page == NULL) {
 469     // Allocation failed
 470     ZStatTimer timer(ZCriticalPhaseAllocationStall);
 471     EventZAllocationStall event;
 472 
 473     // We can only block if VM is fully initialized
 474     check_out_of_memory_during_initialization();
 475 
 476     do {
 477       // Start asynchronous GC
 478       ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 479 
 480       // Wait for allocation to complete or fail
 481       page = request.wait();
 482     } while (page == gc_marker);
 483 
 484     {
 485       //
 486       // We grab the lock here for two different reasons:
 487       //
 488       // 1) Guard deletion of underlying semaphore. This is a workaround for
 489       // a bug in sem_post() in glibc < 2.21, where it's not safe to destroy
 490       // the semaphore immediately after returning from sem_wait(). The
 491       // reason is that sem_post() can touch the semaphore after a waiting
 492       // thread have returned from sem_wait(). To avoid this race we are
 493       // forcing the waiting thread to acquire/release the lock held by the
 494       // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
 495       //
 496       // 2) Guard the list of satisfied pages.
 497       //
 498       ZLocker<ZLock> locker(&_lock);
 499       _satisfied.remove(&request);
 500     }
 501 
 502     event.commit(type, size);
 503   }
 504 
 505   return page;
 506 }
 507 
 508 ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 509   ZLocker<ZLock> locker(&_lock);
 510   return alloc_page_common(type, size, flags);
 511 }
 512 
 513 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 514   ZPage* const page = flags.non_blocking()
 515                       ? alloc_page_nonblocking(type, size, flags)
 516                       : alloc_page_blocking(type, size, flags);
 517   if (page == NULL) {
 518     // Out of memory
 519     return NULL;
 520   }
 521 
 522   // Map page if needed
 523   if (!page->is_mapped()) {
 524     map_page(page);
 525   }
 526 
 527   // Reset page. This updates the page's sequence number and must
 528   // be done after page allocation, which potentially blocked in
 529   // a safepoint where the global sequence number was updated.
 530   page->reset();
 531 
 532   // Update allocation statistics. Exclude worker threads to avoid
 533   // artificial inflation of the allocation rate due to relocation.
 534   if (!flags.worker_thread()) {
 535     // Note that there are two allocation rate counters, which have
 536     // different purposes and are sampled at different frequencies.
 537     const size_t bytes = page->size();
 538     ZStatInc(ZCounterAllocationRate, bytes);
 539     ZStatInc(ZStatAllocRate::counter(), bytes);
 540   }
 541 
 542   return page;
 543 }
 544 
 545 void ZPageAllocator::satisfy_alloc_queue() {
 546   for (;;) {
 547     ZPageAllocRequest* const request = _queue.first();
 548     if (request == NULL) {
 549       // Allocation queue is empty
 550       return;
 551     }
 552 
 553     ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
 554     if (page == NULL) {
 555       // Allocation could not be satisfied, give up
 556       return;
 557     }
 558 
 559     // Allocation succeeded, dequeue and satisfy request. Note that
 560     // the dequeue operation must happen first, since the request
 561     // will immediately be deallocated once it has been satisfied.
 562     _queue.remove(request);
 563     _satisfied.insert_first(request);
 564     request->satisfy(page);
 565   }
 566 }
 567 
 568 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 569   ZLocker<ZLock> locker(&_lock);
 570 
 571   // Update used statistics
 572   decrease_used(page->size(), reclaimed);
 573 
 574   // Set time when last used
 575   page->set_last_used();
 576 
 577   // Cache page
 578   _cache.free_page(page);
 579 
 580   // Try satisfy blocked allocations
 581   satisfy_alloc_queue();
 582 }
 583 
 584 size_t ZPageAllocator::flush_cache(ZPageCacheFlushClosure* cl, bool for_allocation) {
 585   EventZPageCacheFlush event;
 586 
 587   ZList<ZPage> list;
 588 
 589   // Flush pages
 590   _cache.flush(cl, &list);
 591 
 592   const size_t overflushed = cl->overflushed();
 593   if (overflushed > 0) {
 594     // Overflushed, keep part of last page
 595     ZPage* const page = list.last()->split(overflushed);
 596     _cache.free_page(page);
 597   }
 598 
 599   // Destroy pages
 600   size_t flushed = 0;
 601   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 602     flushed += page->size();
 603     destroy_page(page);
 604   }
 605 
 606   // Send event
 607   event.commit(flushed, for_allocation);
 608 
 609   return flushed;
 610 }
 611 
 612 class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
 613 public:
 614   ZPageCacheFlushForAllocationClosure(size_t requested) :
 615       ZPageCacheFlushClosure(requested) {}
 616 
 617   virtual bool do_page(const ZPage* page) {
 618     if (_flushed < _requested) {
 619       // Flush page
 620       _flushed += page->size();
 621       return true;
 622     }
 623 
 624     // Don't flush page
 625     return false;
 626   }
 627 };
 628 
 629 void ZPageAllocator::flush_cache_for_allocation(size_t requested) {
 630   assert(requested <= _cache.available(), "Invalid request");
 631 
 632   // Flush pages
 633   ZPageCacheFlushForAllocationClosure cl(requested);
 634   const size_t flushed = flush_cache(&cl, true /* for_allocation */);
 635 
 636   assert(requested == flushed, "Failed to flush");
 637 
 638   const size_t cached_after = _cache.available();
 639   const size_t cached_before = cached_after + flushed;
 640 
 641   log_info(gc, heap)("Page Cache: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
 642                      "Flushed: " SIZE_FORMAT "M",
 643                      cached_before / M, percent_of(cached_before, max_capacity()),
 644                      cached_after / M, percent_of(cached_after, max_capacity()),
 645                      flushed / M);
 646 
 647   // Update statistics
 648   ZStatInc(ZCounterPageCacheFlush, flushed);
 649 }
 650 
 651 class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
 652 private:
 653   const uint64_t _now;
 654   const uint64_t _delay;
 655   uint64_t       _timeout;
 656 
 657 public:
 658   ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t delay) :
 659       ZPageCacheFlushClosure(requested),
 660       _now(os::elapsedTime()),
 661       _delay(delay),
 662       _timeout(_delay) {}
 663 
 664   virtual bool do_page(const ZPage* page) {
 665     const uint64_t expires = page->last_used() + _delay;
 666     const uint64_t timeout = expires - MIN2(expires, _now);
 667 
 668     if (_flushed < _requested && timeout == 0) {
 669       // Flush page
 670       _flushed += page->size();
 671       return true;
 672     }
 673 
 674     // Record shortest non-expired timeout
 675     _timeout = MIN2(_timeout, timeout);
 676 
 677     // Don't flush page
 678     return false;
 679   }
 680 
 681   uint64_t timeout() const {
 682     return _timeout;
 683   }
 684 };
 685 
 686 uint64_t ZPageAllocator::uncommit(uint64_t delay) {
 687   // Set the default timeout, when no pages are found in the
 688   // cache or when uncommit is disabled, equal to the delay.
 689   uint64_t timeout = delay;
 690 
 691   if (!_uncommit) {
 692     // Disabled
 693     return timeout;
 694   }
 695 
 696   EventZUncommit event;
 697   size_t capacity_before;
 698   size_t capacity_after;
 699   size_t uncommitted;
 700 
 701   {
 702     SuspendibleThreadSetJoiner joiner;
 703     ZLocker<ZLock> locker(&_lock);
 704 
 705     // Don't flush more than we will uncommit. Never uncommit
 706     // the reserve, and never uncommit below min capacity.
 707     const size_t needed = MIN2(_used + _max_reserve, _current_max_capacity);
 708     const size_t guarded = MAX2(needed, _min_capacity);
 709     const size_t uncommittable = _capacity - guarded;
 710     const size_t uncached_available = _capacity - _used - _cache.available();
 711     size_t uncommit = MIN2(uncommittable, uncached_available);
 712     const size_t flush = uncommittable - uncommit;
 713 
 714     if (flush > 0) {
 715       // Flush pages to uncommit
 716       ZPageCacheFlushForUncommitClosure cl(flush, delay);
 717       uncommit += flush_cache(&cl, false /* for_allocation */);
 718       timeout = cl.timeout();
 719     }
 720 
 721     // Uncommit
 722     uncommitted = _physical.uncommit(uncommit);
 723     _capacity -= uncommitted;
 724 
 725     capacity_after = _capacity;
 726     capacity_before = capacity_after + uncommitted;
 727   }
 728 
 729   if (uncommitted > 0) {
 730     log_info(gc, heap)("Capacity: " SIZE_FORMAT "M(%.0f%%)->" SIZE_FORMAT "M(%.0f%%), "
 731                        "Uncommitted: " SIZE_FORMAT "M",
 732                        capacity_before / M, percent_of(capacity_before, max_capacity()),
 733                        capacity_after / M, percent_of(capacity_after, max_capacity()),
 734                        uncommitted / M);
 735 
 736     // Send event
 737     event.commit(capacity_before, capacity_after, uncommitted);
 738 
 739     // Update statistics
 740     ZStatInc(ZCounterUncommit, uncommitted);
 741   }
 742 
 743   return timeout;
 744 }
 745 
 746 void ZPageAllocator::enable_deferred_delete() const {
 747   _safe_delete.enable_deferred_delete();
 748 }
 749 
 750 void ZPageAllocator::disable_deferred_delete() const {
 751   _safe_delete.disable_deferred_delete();
 752 }
 753 
 754 void ZPageAllocator::debug_map_page(const ZPage* page) const {
 755   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 756   _physical.debug_map(page->physical_memory(), page->start());
 757 }
 758 
 759 void ZPageAllocator::debug_unmap_page(const ZPage* page) const {
 760   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 761   _physical.debug_unmap(page->physical_memory(), page->start());
 762 }
 763 
 764 void ZPageAllocator::pages_do(ZPageClosure* cl) const {
 765   ZListIterator<ZPageAllocRequest> iter(&_satisfied);
 766   for (ZPageAllocRequest* request; iter.next(&request);) {
 767     const ZPage* const page = request->peek();
 768     if (page != NULL) {
 769       cl->do_page(page);
 770     }
 771   }
 772 
 773   _cache.pages_do(cl);
 774 }
 775 
 776 bool ZPageAllocator::is_alloc_stalled() const {
 777   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 778   return !_queue.is_empty();
 779 }
 780 
 781 void ZPageAllocator::check_out_of_memory() {
 782   ZLocker<ZLock> locker(&_lock);
 783 
 784   // Fail allocation requests that were enqueued before the
 785   // last GC cycle started, otherwise start a new GC cycle.
 786   for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
 787     if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
 788       // Start a new GC cycle, keep allocation requests enqueued
 789       request->satisfy(gc_marker);
 790       return;
 791     }
 792 
 793     // Out of memory, fail allocation request
 794     _queue.remove(request);
 795     _satisfied.insert_first(request);
 796     request->satisfy(NULL);
 797   }
 798 }