< prev index next >

src/hotspot/share/gc/z/zPageAllocator.cpp

Print this page

        

*** 258,276 **** void ZPageAllocator::destroy_page(ZPage* page) { assert(page->is_detached(), "Invalid page state"); // Free virtual memory { ! ZLocker locker(&_lock); _virtual.free(page->virtual_memory()); } delete page; } void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) { ! ZLocker locker(&_lock); list->transfer(&_detached); } void ZPageAllocator::flush_cache(size_t size) { ZList<ZPage> list; --- 258,276 ---- void ZPageAllocator::destroy_page(ZPage* page) { assert(page->is_detached(), "Invalid page state"); // Free virtual memory { ! ZLocker<ZLock> locker(&_lock); _virtual.free(page->virtual_memory()); } delete page; } void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) { ! ZLocker<ZLock> locker(&_lock); list->transfer(&_detached); } void ZPageAllocator::flush_cache(size_t size) { ZList<ZPage> list;
*** 374,392 **** // the semaphore immediately after returning from sem_wait(). The // reason is that sem_post() can touch the semaphore after a waiting // thread have returned from sem_wait(). To avoid this race we are // forcing the waiting thread to acquire/release the lock held by the // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 ! ZLocker locker(&_lock); } } return page; } ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) { ! ZLocker locker(&_lock); return alloc_page_common(type, size, flags); } ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { ZPage* const page = flags.non_blocking() --- 374,392 ---- // the semaphore immediately after returning from sem_wait(). The // reason is that sem_post() can touch the semaphore after a waiting // thread have returned from sem_wait(). To avoid this race we are // forcing the waiting thread to acquire/release the lock held by the // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 ! ZLocker<ZLock> locker(&_lock); } } return page; } ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) { ! ZLocker<ZLock> locker(&_lock); return alloc_page_common(type, size, flags); } ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) { ZPage* const page = flags.non_blocking()
*** 475,485 **** // Flip physical mapping _physical.flip(pmem, vmem.start()); } void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { ! ZLocker locker(&_lock); // Update used statistics decrease_used(page->size(), reclaimed); // Cache page --- 475,485 ---- // Flip physical mapping _physical.flip(pmem, vmem.start()); } void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { ! ZLocker<ZLock> locker(&_lock); // Update used statistics decrease_used(page->size(), reclaimed); // Cache page
*** 493,503 **** assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); return !_queue.is_empty(); } void ZPageAllocator::check_out_of_memory() { ! ZLocker locker(&_lock); // Fail allocation requests that were enqueued before the // last GC cycle started, otherwise start a new GC cycle. for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) { if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) { --- 493,503 ---- assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); return !_queue.is_empty(); } void ZPageAllocator::check_out_of_memory() { ! ZLocker<ZLock> locker(&_lock); // Fail allocation requests that were enqueued before the // last GC cycle started, otherwise start a new GC cycle. for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) { if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
< prev index next >