< prev index next >
src/hotspot/share/gc/z/zPageAllocator.cpp
Print this page
@@ -258,19 +258,19 @@
void ZPageAllocator::destroy_page(ZPage* page) {
assert(page->is_detached(), "Invalid page state");
// Free virtual memory
{
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
_virtual.free(page->virtual_memory());
}
delete page;
}
void ZPageAllocator::flush_detached_pages(ZList<ZPage>* list) {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
list->transfer(&_detached);
}
void ZPageAllocator::flush_cache(size_t size) {
ZList<ZPage> list;
@@ -374,19 +374,19 @@
// the semaphore immediately after returning from sem_wait(). The
// reason is that sem_post() can touch the semaphore after a waiting
// thread have returned from sem_wait(). To avoid this race we are
// forcing the waiting thread to acquire/release the lock held by the
// posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
}
}
return page;
}
ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
return alloc_page_common(type, size, flags);
}
ZPage* ZPageAllocator::alloc_page(uint8_t type, size_t size, ZAllocationFlags flags) {
ZPage* const page = flags.non_blocking()
@@ -475,11 +475,11 @@
// Flip physical mapping
_physical.flip(pmem, vmem.start());
}
void ZPageAllocator::free_page(ZPage* page, bool reclaimed) {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
// Update used statistics
decrease_used(page->size(), reclaimed);
// Cache page
@@ -493,11 +493,11 @@
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
return !_queue.is_empty();
}
void ZPageAllocator::check_out_of_memory() {
- ZLocker locker(&_lock);
+ ZLocker<ZLock> locker(&_lock);
// Fail allocation requests that were enqueued before the
// last GC cycle started, otherwise start a new GC cycle.
for (ZPageAllocRequest* request = _queue.first(); request != NULL; request = _queue.first()) {
if (request->total_collections() == ZCollectedHeap::heap()->total_collections()) {
< prev index next >