1 /* 2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 */ 23 24 #include "precompiled.hpp" 25 #include "gc/z/zAddress.inline.hpp" 26 #include "gc/z/zCollectedHeap.hpp" 27 #include "gc/z/zFuture.inline.hpp" 28 #include "gc/z/zGlobals.hpp" 29 #include "gc/z/zLock.inline.hpp" 30 #include "gc/z/zPage.inline.hpp" 31 #include "gc/z/zPageAllocator.hpp" 32 #include "gc/z/zPageCache.inline.hpp" 33 #include "gc/z/zPreMappedMemory.inline.hpp" 34 #include "gc/z/zStat.hpp" 35 #include "gc/z/zTracer.inline.hpp" 36 #include "runtime/init.hpp" 37 38 static const ZStatCounter ZCounterAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond); 39 static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall"); 40 41 class ZPageAllocRequest : public StackObj { 42 friend class ZList<ZPageAllocRequest>; 43 44 private: 45 const uint8_t _type; 46 const size_t _size; 47 const ZAllocationFlags _flags; 48 const unsigned int _total_collections; 49 ZListNode<ZPageAllocRequest> _node; 50 ZFuture<ZPage*> _result; 51 52 public: 53 ZPageAllocRequest(uint8_t type, size_t size, ZAllocationFlags flags, unsigned int total_collections) : 54 _type(type), 55 _size(size), 56 _flags(flags), 57 _total_collections(total_collections) {} 58 59 uint8_t type() const { 60 return _type; 61 } 62 63 size_t size() const { 64 return _size; 65 } 66 67 ZAllocationFlags flags() const { 68 return _flags; 69 } 70 71 unsigned int total_collections() const { 72 return _total_collections; 73 } 74 75 ZPage* wait() { 76 return _result.get(); 77 } 78 79 void satisfy(ZPage* page) { 80 _result.set(page); 81 } 82 }; 83 84 ZPage* const ZPageAllocator::gc_marker = (ZPage*)-1; 85 86 ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t max_capacity, size_t max_reserve) : 87 _lock(), 88 _virtual(), 89 _physical(max_capacity), 90 _cache(), 91 _max_reserve(max_reserve), 92 _pre_mapped(_virtual, _physical, try_ensure_unused_for_pre_mapped(min_capacity)), 93 _used_high(0), 94 _used_low(0), 95 _used(0), 96 _allocated(0), 97 _reclaimed(0), 98 _queue(), 99 _detached() {} 100 101 bool ZPageAllocator::is_initialized() const { 102 return _physical.is_initialized() && 103 _virtual.is_initialized() && 104 _pre_mapped.is_initialized(); 105 } 106 107 size_t ZPageAllocator::max_capacity() const { 108 return _physical.max_capacity(); 109 } 110 111 size_t ZPageAllocator::current_max_capacity() const { 112 return _physical.current_max_capacity(); 113 } 114 115 size_t ZPageAllocator::capacity() const { 116 return _physical.capacity(); 117 } 118 119 size_t ZPageAllocator::max_reserve() const { 120 return _max_reserve; 121 } 122 123 size_t ZPageAllocator::used_high() const { 124 return _used_high; 125 } 126 127 size_t ZPageAllocator::used_low() const { 128 return _used_low; 129 } 130 131 size_t ZPageAllocator::used() const { 132 return _used; 133 } 134 135 size_t ZPageAllocator::allocated() const { 136 return _allocated; 137 } 138 139 size_t ZPageAllocator::reclaimed() const { 140 return _reclaimed > 0 ? (size_t)_reclaimed : 0; 141 } 142 143 void ZPageAllocator::reset_statistics() { 144 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 145 _allocated = 0; 146 _reclaimed = 0; 147 _used_high = _used_low = _used; 148 } 149 150 void ZPageAllocator::increase_used(size_t size, bool relocation) { 151 if (relocation) { 152 // Allocating a page for the purpose of relocation has a 153 // negative contribution to the number of reclaimed bytes. 154 _reclaimed -= size; 155 } 156 _allocated += size; 157 _used += size; 158 if (_used > _used_high) { 159 _used_high = _used; 160 } 161 } 162 163 void ZPageAllocator::decrease_used(size_t size, bool reclaimed) { 164 if (reclaimed) { 165 // Only pages explicitly released with the reclaimed flag set 166 // counts as reclaimed bytes. This flag is typically true when 167 // a worker releases a page after relocation, and is typically 168 // false when we release a page to undo an allocation. 169 _reclaimed += size; 170 } 171 _used -= size; 172 if (_used < _used_low) { 173 _used_low = _used; 174 } 175 } 176 177 size_t ZPageAllocator::max_available(bool no_reserve) const { 178 size_t available = current_max_capacity() - used(); 179 180 if (no_reserve) { 181 // The reserve should not be considered available 182 available -= MIN2(available, max_reserve()); 183 } 184 185 return available; 186 } 187 188 size_t ZPageAllocator::try_ensure_unused(size_t size, bool no_reserve) { 189 // Ensure that we always have space available for the reserve. This 190 // is needed to avoid losing the reserve because of failure to map 191 // more memory before reaching max capacity. 192 _physical.try_ensure_unused_capacity(size + max_reserve()); 193 194 size_t unused = _physical.unused_capacity(); 195 196 if (no_reserve) { 197 // The reserve should not be considered unused 198 unused -= MIN2(unused, max_reserve()); 199 } 200 201 return MIN2(size, unused); 202 } 203 204 size_t ZPageAllocator::try_ensure_unused_for_pre_mapped(size_t size) { 205 // This function is called during construction, where the 206 // physical memory manager might have failed to initialied. 207 if (!_physical.is_initialized()) { 208 return 0; 209 } 210 211 return try_ensure_unused(size, true /* no_reserve */); 212 } 213 214 ZPage* ZPageAllocator::create_page(uint8_t type, size_t size) { 215 // Allocate physical memory 216 const ZPhysicalMemory pmem = _physical.alloc(size); 217 if (pmem.is_null()) { 218 // Out of memory 219 return NULL; 220 } 221 222 // Allocate virtual memory 223 const ZVirtualMemory vmem = _virtual.alloc(size); 224 if (vmem.is_null()) { 225 // Out of address space 226 _physical.free(pmem); 227 return NULL; 228 } 229 230 // Allocate page 231 return new ZPage(type, vmem, pmem); 232 } 233 234 void ZPageAllocator::flush_pre_mapped() { 235 if (_pre_mapped.available() == 0) { 236 return; 237 } 238 239 // Detach the memory mapping. 240 detach_memory(_pre_mapped.virtual_memory(), _pre_mapped.physical_memory()); 241 242 _pre_mapped.clear(); 243 } 244 245 void ZPageAllocator::detach_page(ZPage* page) { 246 // Detach the memory mapping. 247 detach_memory(page->virtual_memory(), page->physical_memory()); 248 249 // Add to list of detached pages 250 _detached.insert_last(page); 251 } 252 253 void ZPageAllocator::destroy_page(ZPage* page) { 254 assert(page->is_detached(), "Invalid page state"); 255 256 // Free virtual memory 257 { 258 ZLocker<ZLock> locker(&_lock); 259 _virtual.free(page->virtual_memory()); 260 } 261 262 delete page; 263 } 264 265 void ZPageAllocator::map_page(ZPage* page) { 266 // Map physical memory 267 if (!page->is_mapped()) { 268 _physical.map(page->physical_memory(), page->start()); 269 } else if (ZVerifyViews) { 270 _physical.debug_map(page->physical_memory(), page->start()); 271 } 272 } 273 274 void ZPageAllocator::unmap_all_pages() { 275 ZPhysicalMemory pmem(ZPhysicalMemorySegment(0 /* start */, ZAddressOffsetMax)); 276 _physical.debug_unmap(pmem, 0 /* offset */); 277 pmem.clear(); 278 } 279 280 void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) { 281 ZLocker<ZLock> locker(&_lock); 282 list->transfer(&_detached); 283 } 284 285 void ZPageAllocator::flush_cache(size_t size) { 286 ZList<ZPage> list; 287 288 _cache.flush(&list, size); 289 290 for (ZPage* page = list.remove_first(); page != NULL; page = list.remove_first()) { 291 detach_page(page); 292 } 293 } 294 295 void ZPageAllocator::check_out_of_memory_during_initialization() { 296 if (!is_init_completed()) { 297 vm_exit_during_initialization("java.lang.OutOfMemoryError", "Java heap too small"); 298 } 299 } 300 301 ZPage* ZPageAllocator::alloc_page_common_inner(uint8_t type, size_t size, ZAllocationFlags flags) { 302 const size_t max = max_available(flags.no_reserve()); 303 if (max < size) { 304 // Not enough free memory 305 return NULL; 306 } 307 308 // Try allocating from the page cache 309 ZPage* const cached_page = _cache.alloc_page(type, size); 310 if (cached_page != NULL) { 311 return cached_page; 312 } 313 314 // Try allocate from the pre-mapped memory 315 ZPage* const pre_mapped_page = _pre_mapped.alloc_page(type, size); 316 if (pre_mapped_page != NULL) { 317 return pre_mapped_page; 318 } 319 320 // Flush any remaining pre-mapped memory so that 321 // subsequent allocations can use the physical memory. 322 flush_pre_mapped(); 323 324 // Try ensure that physical memory is available 325 const size_t unused = try_ensure_unused(size, flags.no_reserve()); 326 if (unused < size) { 327 // Flush cache to free up more physical memory 328 flush_cache(size - unused); 329 } 330 331 // Create new page and allocate physical memory 332 return create_page(type, size); 333 } 334 335 ZPage* ZPageAllocator::alloc_page_common(uint8_t type, size_t size, ZAllocationFlags flags) { 336 ZPage* const page = alloc_page_common_inner(type, size, flags); 337 if (page == NULL) { 338 // Out of memory 339 return NULL; 340 } 341 342 // Update used statistics 343 increase_used(size, flags.relocation()); 344 345 // Send trace event 346 ZTracer::tracer()->report_page_alloc(size, used(), max_available(flags.no_reserve()), _cache.available(), flags); 347 348 return page; 349 } 350 351 ZPage* ZPageAllocator::alloc_page_blocking(uint8_t type, size_t size, ZAllocationFlags flags) { 352 // Prepare to block 353 ZPageAllocRequest request(type, size, flags, ZCollectedHeap::heap()->total_collections()); 354 355 _lock.lock(); 356 357 // Try non-blocking allocation 358 ZPage* page = alloc_page_common(type, size, flags); 359 if (page == NULL) { 360 // Allocation failed, enqueue request 361 _queue.insert_last(&request); 362 } 363 364 _lock.unlock(); 365 366 if (page == NULL) { 367 // Allocation failed 368 ZStatTimer timer(ZCriticalPhaseAllocationStall); 369 370 // We can only block if VM is fully initialized 371 check_out_of_memory_during_initialization(); 372 373 do { 374 // Start asynchronous GC 375 ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall); 376 377 // Wait for allocation to complete or fail 378 page = request.wait(); 379 } while (page == gc_marker); 380 381 { 382 // Guard deletion of underlying semaphore. This is a workaround for a 383 // bug in sem_post() in glibc < 2.21, where it's not safe to destroy 384 // the semaphore immediately after returning from sem_wait(). The 385 // reason is that sem_post() can touch the semaphore after a waiting 386 // thread have returned from sem_wait(). To avoid this race we are 387 // forcing the waiting thread to acquire/release the lock held by the 388 // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 389 ZLocker<ZLock> locker(&_lock); 390 } 391 } 392 393 return page; 394 } 395 396 ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) { 397 ZLocker<ZLock> locker(&_lock); 398 return alloc_page_common(type, size, flags); 399 } 400 401 ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { 402 ZPage* const page = flags.non_blocking() 403 ? alloc_page_nonblocking(type, size, flags) 404 : alloc_page_blocking(type, size, flags); 405 if (page == NULL) { 406 // Out of memory 407 return NULL; 408 } 409 410 // Map page if needed 411 map_page(page); 412 413 // Reset page. This updates the page's sequence number and must 414 // be done after page allocation, which potentially blocked in 415 // a safepoint where the global sequence number was updated. 416 page->reset(); 417 418 // Update allocation statistics. Exclude worker threads to avoid 419 // artificial inflation of the allocation rate due to relocation. 420 if (!flags.worker_thread()) { 421 // Note that there are two allocation rate counters, which have 422 // different purposes and are sampled at different frequencies. 423 const size_t bytes = page->size(); 424 ZStatInc(ZCounterAllocationRate, bytes); 425 ZStatInc(ZStatAllocRate::counter(), bytes); 426 } 427 428 return page; 429 } 430 431 void ZPageAllocator::satisfy_alloc_queue() { 432 for (;;) { 433 ZPageAllocRequest* const request = _queue.first(); 434 if (request == NULL) { 435 // Allocation queue is empty 436 return; 437 } 438 439 ZPage* const page = alloc_page_common(request->type(), request->size(), request->flags()); 440 if (page == NULL) { 441 // Allocation could not be satisfied, give up 442 return; 443 } 444 445 // Allocation succeeded, dequeue and satisfy request. Note that 446 // the dequeue operation must happen first, since the request 447 // will immediately be deallocated once it has been satisfied. 448 _queue.remove(request); 449 request->satisfy(page); 450 } 451 } 452 453 void ZPageAllocator::detach_memory(const ZVirtualMemory& vmem, ZPhysicalMemory& pmem) { 454 const uintptr_t addr = vmem.start(); 455 456 // Unmap physical memory 457 _physical.unmap(pmem, addr); 458 459 // Free physical memory 460 _physical.free(pmem); 461 462 // Clear physical mapping 463 pmem.clear(); 464 } 465 466 void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { 467 ZLocker<ZLock> locker(&_lock); 468 469 // Update used statistics 470 decrease_used(page->size(), reclaimed); 471 472 // Cache page 473 _cache.free_page(page); 474 475 // Try satisfy blocked allocations 476 satisfy_alloc_queue(); 477 } 478 479 bool ZPageAllocator::is_alloc_stalled() const { 480 assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); 481 return !_queue.is_empty(); 482 } 483 484 void ZPageAllocator::check_out_of_memory() { 485 ZLocker<ZLock> locker(&_lock); 486 487 // Fail allocation requests that were enqueued before the 488 // last GC cycle started, otherwise start a new GC cycle. 489 for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) { 490 if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) { 491 // Start a new GC cycle, keep allocation requests enqueued 492 request->satisfy(gc_marker); 493 return; 494 } 495 496 // Out of memory, fail allocation request 497 _queue.remove_first(); 498 request->satisfy(NULL); 499 } 500 }