--- old/src/hotspot/share/gc/z/zLock.hpp 2018-11-20 08:42:03.370881372 +0100 +++ new/src/hotspot/share/gc/z/zLock.hpp 2018-11-20 08:42:03.078868862 +0100 @@ -33,18 +33,35 @@ public: ZLock(); + ~ZLock(); void lock(); bool try_lock(); void unlock(); }; +class ZReentrantLock { +private: + ZLock _lock; + Thread* volatile _owner; + uint64_t _count; + +public: + ZReentrantLock(); + + void lock(); + void unlock(); + + bool is_owned() const; +}; + +template class ZLocker : public StackObj { private: - ZLock* const _lock; + T* const _lock; public: - ZLocker(ZLock* lock); + ZLocker(T* lock); ~ZLocker(); }; --- old/src/hotspot/share/gc/z/zLock.inline.hpp 2018-11-20 08:42:03.762898165 +0100 +++ new/src/hotspot/share/gc/z/zLock.inline.hpp 2018-11-20 08:42:03.479886041 +0100 @@ -25,11 +25,18 @@ #define SHARE_GC_Z_ZLOCK_INLINE_HPP #include "gc/z/zLock.hpp" +#include "runtime/atomic.hpp" +#include "runtime/thread.hpp" +#include "utilities/debug.hpp" inline ZLock::ZLock() { pthread_mutex_init(&_lock, NULL); } +inline ZLock::~ZLock() { + pthread_mutex_destroy(&_lock); +} + inline void ZLock::lock() { pthread_mutex_lock(&_lock); } @@ -42,12 +49,49 @@ pthread_mutex_unlock(&_lock); } -inline ZLocker::ZLocker(ZLock* lock) : +inline ZReentrantLock::ZReentrantLock() : + _lock(), + _owner(NULL), + _count(0) {} + +inline void ZReentrantLock::lock() { + Thread* const thread = Thread::current(); + Thread* const owner = Atomic::load(&_owner); + + if (owner != thread) { + _lock.lock(); + Atomic::store(thread, &_owner); + } + + _count++; +} + +inline void ZReentrantLock::unlock() { + assert(is_owned(), "Invalid owner"); + assert(_count > 0, "Invalid count"); + + _count--; + + if (_count == 0) { + Atomic::store((Thread*)NULL, &_owner); + _lock.unlock(); + } +} + +inline bool ZReentrantLock::is_owned() const { + Thread* const thread = Thread::current(); + Thread* const owner = Atomic::load(&_owner); + return owner == thread; +} + +template +inline ZLocker::ZLocker(T* lock) : _lock(lock) { _lock->lock(); } -inline ZLocker::~ZLocker() { +template +inline ZLocker::~ZLocker() { _lock->unlock(); } --- old/src/hotspot/share/gc/z/zMarkStackAllocator.cpp 2018-11-20 08:42:04.192916587 +0100 +++ new/src/hotspot/share/gc/z/zMarkStackAllocator.cpp 2018-11-20 08:42:03.859902321 +0100 @@ -82,7 +82,7 @@ } uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) { - ZLocker locker(&_expand_lock); + ZLocker locker(&_expand_lock); // Retry allocation before expanding uintptr_t addr = alloc_space(size); --- old/src/hotspot/share/gc/z/zPageAllocator.cpp 2018-11-20 08:42:04.623935051 +0100 +++ new/src/hotspot/share/gc/z/zPageAllocator.cpp 2018-11-20 08:42:04.290920785 +0100 @@ -260,7 +260,7 @@ // Free virtual memory { - ZLocker locker(&_lock); + ZLocker locker(&_lock); _virtual.free(page->virtual_memory()); } @@ -268,7 +268,7 @@ } void ZPageAllocator::flush_detached_pages(ZList* list) { - ZLocker locker(&_lock); + ZLocker locker(&_lock); list->transfer(&_detached); } @@ -376,7 +376,7 @@ // thread have returned from sem_wait(). To avoid this race we are // forcing the waiting thread to acquire/release the lock held by the // posting thread. https://sourceware.org/bugzilla/show_bug.cgi?id=12674 - ZLocker locker(&_lock); + ZLocker locker(&_lock); } } @@ -384,7 +384,7 @@ } ZPage* ZPageAllocator::alloc_page_nonblocking(uint8_t type, size_t size, ZAllocationFlags flags) { - ZLocker locker(&_lock); + ZLocker locker(&_lock); return alloc_page_common(type, size, flags); } @@ -477,7 +477,7 @@ } void ZPageAllocator::free_page(ZPage* page, bool reclaimed) { - ZLocker locker(&_lock); + ZLocker locker(&_lock); // Update used statistics decrease_used(page->size(), reclaimed); @@ -495,7 +495,7 @@ } void ZPageAllocator::check_out_of_memory() { - ZLocker locker(&_lock); + ZLocker locker(&_lock); // Fail allocation requests that were enqueued before the // last GC cycle started, otherwise start a new GC cycle.