1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zAddress.inline.hpp"
  26 #include "gc/z/zCollectedHeap.hpp"
  27 #include "gc/z/zFuture.inline.hpp"
  28 #include "gc/z/zGlobals.hpp"
  29 #include "gc/z/zLock.inline.hpp"
  30 #include "gc/z/zPage.inline.hpp"
  31 #include "gc/z/zPageAllocator.hpp"
  32 #include "gc/z/zPageCache.inline.hpp"
  33 #include "gc/z/zPreMappedMemory.inline.hpp"
  34 #include "gc/z/zStat.hpp"
  35 #include "gc/z/zTracer.inline.hpp"
  36 #include "runtime/init.hpp"
  37 
  38 static const ZStatCounter       ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond);
  39 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall");
  40 
  41 class ZPageAllocRequest : public StackObj {
  42   friend class ZList<ZPageAllocRequest>;
  43 
  44 private:
  45   const uint8_t                _type;
  46   const size_t                 _size;
  47   const ZAllocationFlags       _flags;
  48   const unsigned int           _total_collections;
  49   ZListNode<ZPageAllocRequest> _node;
  50   ZFuture<ZPage*>              _result;
  51 
  52 public:
  53   ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) :
  54       _type(type),
  55       _size(size),
  56       _flags(flags),
  57       _total_collections(total_collections) {}
  58 
  59   uint8_t type() const {
  60     return _type;
  61   }
  62 
  63   size_t size() const {
  64     return _size;
  65   }
  66 
  67   ZAllocationFlags flags() const {
  68     return _flags;
  69   }
  70 
  71   unsigned int total_collections() const {
  72     return _total_collections;
  73   }
  74 
  75   ZPage* wait() {
  76     return _result.get();
  77   }
  78 
  79   void satisfy(ZPage* page) {
  80     _result.set(page);
  81   }
  82 };
  83 
  84 ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1;
  85 
  86 ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) :
  87     _virtual(),
  88     _physical(max_capacity, ZPageSizeMin),
  89     _cache(),
  90     _pre_mapped(_virtual, _physical, min_capacity),
  91     _max_reserve(max_reserve),
  92     _used_high(0),
  93     _used_low(0),
  94     _used(0),
  95     _allocated(0),
  96     _reclaimed(0),
  97     _queue(),
  98     _detached() {}
  99 
 100 bool ZPageAllocator::is_initialized() const {
 101   return _physical.is_initialized() &&
 102          _virtual.is_initialized() &&
 103          _pre_mapped.is_initialized();
 104 }
 105 
 106 size_t ZPageAllocator::max_capacity() const {
 107   return _physical.max_capacity();
 108 }
 109 
 110 size_t ZPageAllocator::capacity() const {
 111   return _physical.capacity();
 112 }
 113 
 114 size_t ZPageAllocator::max_reserve() const {
 115   return _max_reserve;
 116 }
 117 
 118 size_t ZPageAllocator::used_high() const {
 119   return _used_high;
 120 }
 121 
 122 size_t ZPageAllocator::used_low() const {
 123   return _used_low;
 124 }
 125 
 126 size_t ZPageAllocator::used() const {
 127   return _used;
 128 }
 129 
 130 size_t ZPageAllocator::allocated() const {
 131   return _allocated;
 132 }
 133 
 134 size_t ZPageAllocator::reclaimed() const {
 135   return _reclaimed > 0 ? (size_t)_reclaimed : 0;
 136 }
 137 
 138 void ZPageAllocator::reset_statistics() {
 139   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 140   _allocated = 0;
 141   _reclaimed = 0;
 142   _used_high = _used_low = _used;
 143 }
 144 
 145 void ZPageAllocator::increase_used(size_t size, bool relocation) {
 146   if (relocation) {
 147     // Allocating a page for the purpose of relocation has a
 148     // negative contribution to the number of relcaimed bytes.
 149     _reclaimed -= size;
 150   }
 151   _allocated += size;
 152   _used += size;
 153   if (_used > _used_high) {
 154     _used_high = _used;
 155   }
 156 }
 157 
 158 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) {
 159   if (reclaimed) {
 160     // Only pages explicitly released with the reclaimed flag set
 161     // counts as reclaimed bytes. This flag is typically true when
 162     // a worker releases a page after relocation, and is typically
 163     // false when we release a page to undo an allocation.
 164     _reclaimed += size;
 165   }
 166   _used -= size;
 167   if (_used < _used_low) {
 168     _used_low = _used;
 169   }
 170 }
 171 
 172 size_t ZPageAllocator::available(ZAllocationFlags flags) const {
 173   size_t available = max_capacity() - used();
 174   assert(_physical.available() + _pre_mapped.available() + _cache.available()  == available, "Should be equal");
 175 
 176   if (flags.no_reserve()) {
 177     // The memory reserve should not be considered free
 178     available -= MIN2(available, max_reserve());
 179   }
 180 
 181   return available;
 182 }
 183 
 184 ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) {
 185   // Allocate physical memory
 186   const ZPhysicalMemory pmem = _physical.alloc(size);
 187   if (pmem.is_null()) {
 188     // Out of memory
 189     return NULL;
 190   }
 191 
 192   // Allocate virtual memory
 193   const ZVirtualMemory vmem = _virtual.alloc(size);
 194   if (vmem.is_null()) {
 195     // Out of address space
 196     _physical.free(pmem);
 197     return NULL;
 198   }
 199 
 200   // Allocate page
 201   return new ZPage(type, vmem, pmem);
 202 }
 203 
 204 void ZPageAllocator::flush_pre_mapped() {
 205   if (_pre_mapped.available() == 0) {
 206     return;
 207   }
 208 
 209   // Detach the memory mapping.
 210   detach_memory(_pre_mapped.virtual_memory(), _pre_mapped.physical_memory());
 211 
 212   _pre_mapped.clear();
 213 }
 214 
 215 void ZPageAllocator::map_page(ZPage* page) {
 216   // Map physical memory
 217   _physical.map(page->physical_memory(), page->start());
 218 }
 219 
 220 void ZPageAllocator::detach_page(ZPage* page) {
 221   // Detach the memory mapping.
 222   detach_memory(page->virtual_memory(), page->physical_memory());
 223 
 224   // Add to list of detached pages
 225   _detached.insert_last(page);
 226 }
 227 
 228 void ZPageAllocator::destroy_page(ZPage* page) {
 229   assert(page->is_detached(), "Invalid page state");
 230 
 231   // Free virtual memory
 232   {
 233     ZLocker locker(&_lock);
 234     _virtual.free(page->virtual_memory());
 235   }
 236 
 237   delete page;
 238 }
 239 
 240 void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) {
 241   ZLocker locker(&_lock);
 242   list->transfer(&_detached);
 243 }
 244 
 245 void ZPageAllocator::flush_cache(size_t size) {
 246   ZList<ZPage> list;
 247 
 248   _cache.flush(&list, size);
 249 
 250   for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) {
 251     detach_page(page);
 252   }
 253 }
 254 
 255 void ZPageAllocator::check_out_of_memory_during_initialization() {
 256   if (!is_init_completed()) {
 257     vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small");
 258   }
 259 }
 260 
 261 ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) {
 262   const size_t available_total = available(flags);
 263   if (available_total < size) {
 264     // Not enough free memory
 265     return NULL;
 266   }
 267 
 268   // Try allocating from the page cache
 269   ZPage* const cached_page = _cache.alloc_page(type, size);
 270   if (cached_page != NULL) {
 271     return cached_page;
 272   }
 273 
 274   // Try allocate from the pre-mapped memory
 275   ZPage* const pre_mapped_page = _pre_mapped.alloc_page(type, size);
 276   if (pre_mapped_page != NULL) {
 277     return pre_mapped_page;
 278   }
 279 
 280   // Flush any remaining pre-mapped memory so that
 281   // subsequent allocations can use the physical memory.
 282   flush_pre_mapped();
 283 
 284   // Check if physical memory is available
 285   const size_t available_physical = _physical.available();
 286   if (available_physical < size) {
 287     // Flush cache to free up more physical memory
 288     flush_cache(size - available_physical);
 289   }
 290 
 291   // Create new page and allocate physical memory
 292   return create_page(type, size);
 293 }
 294 
 295 ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) {
 296   ZPage* const page = alloc_page_common_inner(type, size, flags);
 297   if (page == NULL) {
 298     // Out of memory
 299     return NULL;
 300   }
 301 
 302   // Update used statistics
 303   increase_used(size, flags.relocation());
 304 
 305   // Send trace event
 306   ZTracer::tracer()->report_page_alloc(size, used(), available(flags), _cache.available(), flags);
 307 
 308   return page;
 309 }
 310 
 311 ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 312   // Prepare to block
 313   ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections());
 314 
 315   _lock.lock();
 316 
 317   // Try non-blocking allocation
 318   ZPage* page = alloc_page_common(type, size, flags);
 319   if (page == NULL) {
 320     // Allocation failed, enqueue request
 321     _queue.insert_last(&request);
 322   }
 323 
 324   _lock.unlock();
 325 
 326   if (page == NULL) {
 327     // Allocation failed
 328     ZStatTimer timer(ZCriticalPhaseAllocationStall);
 329 
 330     // We can only block if VM is fully initialized
 331     check_out_of_memory_during_initialization();
 332 
 333     do {
 334       // Start asynchronous GC
 335       ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
 336 
 337       // Wait for allocation to complete or fail
 338       page = request.wait();
 339     } while (page == gc_marker);
 340   }
 341 
 342   return page;
 343 }
 344 
 345 ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
 346   ZLocker locker(&_lock);
 347   return alloc_page_common(type, size, flags);
 348 }
 349 
 350 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
 351   ZPage* const page = flags.non_blocking()
 352                       ? alloc_page_nonblocking(type, size, flags)
 353                       : alloc_page_blocking(type, size, flags);
 354   if (page == NULL) {
 355     // Out of memory
 356     return NULL;
 357   }
 358 
 359   // Map page if needed
 360   if (!page->is_mapped()) {
 361     map_page(page);
 362   }
 363 
 364   // Reset page. This updates the page's sequence number and must
 365   // be done after page allocation, which potentially blocked in
 366   // a safepoint where the global sequence number was updated.
 367   page->reset();
 368 
 369   // Update allocation statistics. Exclude worker threads to avoid
 370   // artificial inflation of the allocation rate due to relocation.
 371   if (!flags.worker_thread()) {
 372     // Note that there are two allocation rate counters, which have
 373     // different purposes and are sampled at different frequencies.
 374     const size_t bytes = page->size();
 375     ZStatInc(ZCounterAllocationRate, bytes);
 376     ZStatInc(ZStatAllocRate::counter(), bytes);
 377   }
 378 
 379   return page;
 380 }
 381 
 382 void ZPageAllocator::satisfy_alloc_queue() {
 383   for (;;) {
 384     ZPageAllocRequest* const request = _queue.first();
 385     if (request == NULL) {
 386       // Allocation queue is empty
 387       return;
 388     }
 389 
 390     ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags());
 391     if (page == NULL) {
 392       // Allocation could not be satisfied, give up
 393       return;
 394     }
 395 
 396     // Allocation succeeded, dequeue and satisfy request. Note that
 397     // the dequeue operation must happen first, since the request
 398     // will immediately be deallocated once it has been satisfied.
 399     _queue.remove(request);
 400     request->satisfy(page);
 401   }
 402 }
 403 
 404 void ZPageAllocator::detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory& pmem) {
 405   const uintptr_t addr = vmem.start();
 406 
 407   // Unmap physical memory
 408   _physical.unmap(pmem, addr);
 409 
 410   // Free physical memory
 411   _physical.free(pmem);
 412 
 413   // Clear physical mapping
 414   pmem.clear();
 415 }
 416 
 417 void ZPageAllocator::flip_page(ZPage* page) {
 418   const ZPhysicalMemory& pmem = page->physical_memory();
 419   const uintptr_t addr = page->start();
 420 
 421   // Flip physical mapping
 422   _physical.flip(pmem, addr);
 423 }
 424 
 425 void ZPageAllocator::flip_pre_mapped() {
 426   if (_pre_mapped.available() == 0) {
 427     // Nothing to flip
 428     return;
 429   }
 430 
 431   const ZPhysicalMemory& pmem = _pre_mapped.physical_memory();
 432   const ZVirtualMemory& vmem = _pre_mapped.virtual_memory();
 433 
 434   // Flip physical mapping
 435   _physical.flip(pmem, vmem.start());
 436 }
 437 
 438 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
 439   ZLocker locker(&_lock);
 440 
 441   // Update used statistics
 442   decrease_used(page->size(), reclaimed);
 443 
 444   // Cache page
 445   _cache.free_page(page);
 446 
 447   // Try satisfy blocked allocations
 448   satisfy_alloc_queue();
 449 }
 450 
 451 void ZPageAllocator::check_out_of_memory() {
 452   ZLocker locker(&_lock);
 453 
 454   ZPageAllocRequest* const first = _queue.first();
 455   if (first == NULL) {
 456     // Allocation queue is empty
 457     return;
 458   }
 459 
 460   // Fail the allocation request if it was enqueued before the
 461   // last GC cycle started, otherwise start a new GC cycle.
 462   if (first->total_collections() < ZCollectedHeap::heap()->total_collections()) {
 463     // Out of memory, fail all enqueued requests
 464     for (ZPageAllocRequest* request = _queue.remove_first(); request != NULL; request = _queue.remove_first()) {
 465       request->satisfy(NULL);
 466     }
 467   } else {
 468     // Start another GC cycle, keep all enqueued requests
 469     first->satisfy(gc_marker);
 470   }
 471 }