< prev index next >

src/hotspot/share/gc/z/zPageAllocator.cpp

Print this page

        

*** 126,135 **** --- 126,138 ---- _used_low(0), _allocated(0), _reclaimed(0), _stalled(), _satisfied(), + _unmap_lock(), + _unmap_queue(), + _unmap_stop(false), _uncommit_lock(), _uncommit_enabled(false), _uncommit_stop(false), _safe_delete(), _initialized(false) {
*** 360,375 **** _physical.uncommit(page->physical_memory()); } bool ZPageAllocator::map_page(const ZPage* page) const { // Map physical memory ! return _physical.map(page->physical_memory(), page->start()); } void ZPageAllocator::unmap_page(const ZPage* page) const { // Unmap physical memory ! _physical.unmap(page->physical_memory(), page->start()); } void ZPageAllocator::destroy_page(ZPage* page) { // Free virtual memory _virtual.free(page->virtual_memory()); --- 363,378 ---- _physical.uncommit(page->physical_memory()); } bool ZPageAllocator::map_page(const ZPage* page) const { // Map physical memory ! return _physical.map(page->start(), page->physical_memory()); } void ZPageAllocator::unmap_page(const ZPage* page) const { // Unmap physical memory ! _physical.unmap(page->start(), page->size()); } void ZPageAllocator::destroy_page(ZPage* page) { // Free virtual memory _virtual.free(page->virtual_memory());
*** 379,388 **** --- 382,414 ---- // Delete page safely _safe_delete(page); } + void ZPageAllocator::enqueue_unmap_page(ZPage* page) { + ZLocker<ZConditionLock> locker(&_unmap_lock); + _unmap_queue.insert_last(page); + _unmap_lock.notify_all(); + } + + ZPage* ZPageAllocator::dequeue_unmap_page() { + ZLocker<ZConditionLock> locker(&_unmap_lock); + + for (;;) { + if (_unmap_stop) { + return NULL; + } + + ZPage* const page = _unmap_queue.remove_first(); + if (page != NULL) { + return page; + } + + _unmap_lock.wait(); + } + } + bool ZPageAllocator::is_alloc_allowed(size_t size, bool no_reserve) const { size_t available = _current_max_capacity - _used - _claimed; if (no_reserve) { // The reserve should not be considered available
*** 529,554 **** ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) { const size_t size = allocation->size(); // Allocate virtual memory. To make error handling a lot more straight // forward, we allocate virtual memory before destroying flushed pages. const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) { log_error(gc)("Out of address space"); return NULL; } ZPhysicalMemory pmem; size_t flushed = 0; ! // Unmap, transfer physical memory, and destroy flushed pages ZListRemoveIterator<ZPage> iter(allocation->pages()); for (ZPage* page; iter.next(&page);) { flushed += page->size(); - unmap_page(page); pmem.transfer_segments(page->physical_memory()); ! destroy_page(page); } if (flushed > 0) { // Update statistics ZStatInc(ZCounterPageCacheFlush, flushed); --- 555,581 ---- ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) { const size_t size = allocation->size(); // Allocate virtual memory. To make error handling a lot more straight // forward, we allocate virtual memory before destroying flushed pages. + // Flushed pages are also unmapped and destroyed asynchronously, so we + // can't immediately reuse that part of the address space anyway. const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); if (vmem.is_null()) { log_error(gc)("Out of address space"); return NULL; } ZPhysicalMemory pmem; size_t flushed = 0; ! // Transfer physical memory, and enqueue pages for unmap and destroy ZListRemoveIterator<ZPage> iter(allocation->pages()); for (ZPage* page; iter.next(&page);) { flushed += page->size(); pmem.transfer_segments(page->physical_memory()); ! enqueue_unmap_page(page); } if (flushed > 0) { // Update statistics ZStatInc(ZCounterPageCacheFlush, flushed);
*** 717,726 **** --- 744,773 ---- // Try satisfy stalled allocations satisfy_stalled(); } + void ZPageAllocator::unmap_run() { + for (;;) { + ZPage* const page = dequeue_unmap_page(); + if (page == NULL) { + // Stop + return; + } + + // Unmap and destroy page + unmap_page(page); + destroy_page(page); + } + } + + void ZPageAllocator::unmap_stop() { + ZLocker<ZConditionLock> locker(&_unmap_lock); + _unmap_stop = true; + _unmap_lock.notify_all(); + } + size_t ZPageAllocator::uncommit(uint64_t* timeout) { // We need to join the suspendible thread set while manipulating capacity and // used, to make sure GC safepoints will have a consistent view. However, when // ZVerifyViews is enabled we need to join at a broader scope to also make sure // we don't change the address good mask after pages have been flushed, and
*** 848,876 **** _safe_delete.disable_deferred_delete(); } void ZPageAllocator::debug_map_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ! _physical.debug_map(page->physical_memory(), page->start()); } void ZPageAllocator::debug_unmap_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ! _physical.debug_unmap(page->physical_memory(), page->start()); } void ZPageAllocator::pages_do(ZPageClosure* cl) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ! ZListIterator<ZPageAllocation> iter(&_satisfied); ! for (ZPageAllocation* allocation; iter.next(&allocation);) { ! ZListIterator<ZPage> iter(allocation->pages()); ! for (ZPage* page; iter.next(&page);) { cl->do_page(page); } } _cache.pages_do(cl); } bool ZPageAllocator::is_alloc_stalled() const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); --- 895,928 ---- _safe_delete.disable_deferred_delete(); } void ZPageAllocator::debug_map_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ! _physical.debug_map(page->start(), page->physical_memory()); } void ZPageAllocator::debug_unmap_page(const ZPage* page) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ! _physical.debug_unmap(page->start(), page->size()); } void ZPageAllocator::pages_do(ZPageClosure* cl) const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ! ZListIterator<ZPageAllocation> iter_satisfied(&_satisfied); ! for (ZPageAllocation* allocation; iter_satisfied.next(&allocation);) { ! ZListIterator<ZPage> iter_pages(allocation->pages()); ! for (ZPage* page; iter_pages.next(&page);) { cl->do_page(page); } } + ZListIterator<ZPage> iter_unmap_queue(&_unmap_queue); + for (ZPage* page; iter_unmap_queue.next(&page);) { + cl->do_page(page); + } + _cache.pages_do(cl); } bool ZPageAllocator::is_alloc_stalled() const { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
< prev index next >