# HG changeset patch # User eosterlund # Date 1498460271 -7200 # Mon Jun 26 08:57:51 2017 +0200 # Node ID c119d1b129bc7919a03f37217a778411ffc1e456 # Parent ff6eb4e7a7f0a04876c6e91f2b63d7019c5ecfb4 [mq]: G1LockOrderProblems diff --git a/src/share/vm/gc/g1/ptrQueue.cpp b/src/share/vm/gc/g1/ptrQueue.cpp --- a/src/share/vm/gc/g1/ptrQueue.cpp +++ b/src/share/vm/gc/g1/ptrQueue.cpp @@ -75,16 +75,7 @@ void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { assert(_lock->owned_by_self(), "Required."); - - // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before - // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they - // have the same rank and we may get the "possible deadlock" message - _lock->unlock(); - qset()->enqueue_complete_buffer(node); - // We must relock only because the caller will unlock, for the normal - // case. - _lock->lock_without_safepoint_check(); } @@ -189,34 +180,11 @@ if (_lock) { assert(_lock->owned_by_self(), "Required."); - // The current PtrQ may be the shared dirty card queue and - // may be being manipulated by more than one worker thread - // during a pause. Since the enqueueing of the completed - // buffer unlocks the Shared_DirtyCardQ_lock more than one - // worker thread can 'race' on reading the shared queue attributes - // (_buf and _index) and multiple threads can call into this - // routine for the same buffer. This will cause the completed - // buffer to be added to the CBL multiple times. - - // We "claim" the current buffer by caching value of _buf in - // a local and clearing the field while holding _lock. When - // _lock is released (while enqueueing the completed buffer) - // the thread that acquires _lock will skip this code, - // preventing the subsequent the multiple enqueue, and - // install a newly allocated buffer below. - BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); _buf = NULL; // clear shared _buf field locking_enqueue_completed_buffer(node); // enqueue completed buffer - - // While the current thread was enqueueing the buffer another thread - // may have a allocated a new buffer and inserted it into this pointer - // queue. If that happens then we just return so that the current - // thread doesn't overwrite the buffer allocated by the other thread - // and potentially losing some dirtied cards. - - if (_buf != NULL) return; + assert(_buf == NULL, "multiple enqueuers appear to be racing"); } else { BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); if (qset()->process_or_enqueue_complete_buffer(node)) { diff --git a/src/share/vm/runtime/mutex.hpp b/src/share/vm/runtime/mutex.hpp --- a/src/share/vm/runtime/mutex.hpp +++ b/src/share/vm/runtime/mutex.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -93,6 +93,10 @@ // holding it, i.e., no vm operation can happen, taking other locks, etc. // NOTE: It is critical that the rank 'special' be the lowest (earliest) // (except for "event"?) for the deadlock detection to work correctly. + // The rank access is reserved for locks that may be required to perform + // memory accesses that require special GC barriers, such as SATB barriers. + // Since memory accesses should be able to be performed pretty much anywhere + // in the code, that wannts being more special than the "special" rank. // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate, // which being external to the VM are not subject to deadlock detection. // The rank safepoint is used only for synchronization in reaching a @@ -104,14 +108,15 @@ // at all. enum lock_types { event, - special, - suspend_resume, - leaf = suspend_resume + 2, - safepoint = leaf + 10, - barrier = safepoint + 1, - nonleaf = barrier + 1, - max_nonleaf = nonleaf + 900, - native = max_nonleaf + 1 + access = event + 1, + special = access + 2, + suspend_resume = special + 1, + leaf = suspend_resume + 2, + safepoint = leaf + 10, + barrier = safepoint + 1, + nonleaf = barrier + 1, + max_nonleaf = nonleaf + 900, + native = max_nonleaf + 1 }; // The WaitSet and EntryList linked lists are composed of ParkEvents. diff --git a/src/share/vm/runtime/mutexLocker.cpp b/src/share/vm/runtime/mutexLocker.cpp --- a/src/share/vm/runtime/mutexLocker.cpp +++ b/src/share/vm/runtime/mutexLocker.cpp @@ -181,14 +181,13 @@ def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent } if (UseG1GC) { + def(SATB_Q_FL_lock , PaddedMutex , access, true, Monitor::_safepoint_check_never); + def(SATB_Q_CBL_mon , PaddedMonitor, access, true, Monitor::_safepoint_check_never); + def(Shared_SATB_Q_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never); - def(SATB_Q_FL_lock , PaddedMutex , special , true, Monitor::_safepoint_check_never); - def(SATB_Q_CBL_mon , PaddedMonitor, leaf - 1 , true, Monitor::_safepoint_check_never); - def(Shared_SATB_Q_lock , PaddedMutex , leaf - 1 , true, Monitor::_safepoint_check_never); - - def(DirtyCardQ_FL_lock , PaddedMutex , special , true, Monitor::_safepoint_check_never); - def(DirtyCardQ_CBL_mon , PaddedMonitor, leaf - 1 , true, Monitor::_safepoint_check_never); - def(Shared_DirtyCardQ_lock , PaddedMutex , leaf - 1 , true, Monitor::_safepoint_check_never); + def(DirtyCardQ_FL_lock , PaddedMutex , access, true, Monitor::_safepoint_check_never); + def(DirtyCardQ_CBL_mon , PaddedMonitor, access, true, Monitor::_safepoint_check_never); + def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never); def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never); def(SecondaryFreeList_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);