< prev index next >

src/hotspot/share/runtime/mutexLocker.cpp

Print this page
rev 49260 : [mq]: 8198691.patch


 120 Mutex*   OldSets_lock                 = NULL;
 121 Monitor* RootRegionScan_lock          = NULL;
 122 
 123 Monitor* GCTaskManager_lock           = NULL;
 124 
 125 Mutex*   Management_lock              = NULL;
 126 Monitor* Service_lock                 = NULL;
 127 Monitor* PeriodicTask_lock            = NULL;
 128 Monitor* RedefineClasses_lock         = NULL;
 129 
 130 #if INCLUDE_TRACE
 131 Mutex*   JfrStacktrace_lock           = NULL;
 132 Monitor* JfrMsg_lock                  = NULL;
 133 Mutex*   JfrBuffer_lock               = NULL;
 134 Mutex*   JfrStream_lock               = NULL;
 135 #endif
 136 
 137 #ifndef SUPPORTS_NATIVE_CX8
 138 Mutex*   UnsafeJlong_lock             = NULL;
 139 #endif

 140 
 141 #define MAX_NUM_MUTEX 128
 142 static Monitor * _mutex_array[MAX_NUM_MUTEX];
 143 static int _num_mutex;
 144 
 145 #ifdef ASSERT
 146 void assert_locked_or_safepoint(const Monitor * lock) {
 147   // check if this thread owns the lock (common case)
 148   if (IgnoreLockingAssertions) return;
 149   assert(lock != NULL, "Need non-NULL lock");
 150   if (lock->owned_by_self()) return;
 151   if (SafepointSynchronize::is_at_safepoint()) return;
 152   if (!Universe::is_fully_initialized()) return;
 153   // see if invoker of VM operation owns it
 154   VM_Operation* op = VMThread::vm_operation();
 155   if (op != NULL && op->calling_thread() == lock->owner()) return;
 156   fatal("must own lock %s", lock->name());
 157 }
 158 
 159 // a stronger assertion than the above


 280   def(MethodCompileQueue_lock      , PaddedMonitor, nonleaf+4,   true,  Monitor::_safepoint_check_always);
 281   def(Debug2_lock                  , PaddedMutex  , nonleaf+4,   true,  Monitor::_safepoint_check_never);
 282   def(Debug3_lock                  , PaddedMutex  , nonleaf+4,   true,  Monitor::_safepoint_check_never);
 283   def(CompileThread_lock           , PaddedMonitor, nonleaf+5,   false, Monitor::_safepoint_check_always);
 284   def(PeriodicTask_lock            , PaddedMonitor, nonleaf+5,   true,  Monitor::_safepoint_check_sometimes);
 285   def(RedefineClasses_lock         , PaddedMonitor, nonleaf+5,   true,  Monitor::_safepoint_check_always);
 286   if (WhiteBoxAPI) {
 287     def(Compilation_lock           , PaddedMonitor, leaf,        false, Monitor::_safepoint_check_never);
 288   }
 289 
 290 #if INCLUDE_TRACE
 291   def(JfrMsg_lock                  , PaddedMonitor, leaf,        true,  Monitor::_safepoint_check_always);
 292   def(JfrBuffer_lock               , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 293   def(JfrStream_lock               , PaddedMutex  , leaf+1,      true,  Monitor::_safepoint_check_never);      // ensure to rank lower than 'safepoint'
 294   def(JfrStacktrace_lock           , PaddedMutex  , special,     true,  Monitor::_safepoint_check_sometimes);
 295 #endif
 296 
 297 #ifndef SUPPORTS_NATIVE_CX8
 298   def(UnsafeJlong_lock             , PaddedMutex  , special,     false, Monitor::_safepoint_check_never);
 299 #endif


 300 }
 301 
 302 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
 303   if (SafepointSynchronize::is_at_safepoint()) {
 304     _locked = false;
 305   } else {
 306     _mutex = mutex;
 307     _locked = true;
 308     _mutex->lock();
 309   }
 310 }
 311 
 312 // Print all mutexes/monitors that are currently owned by a thread; called
 313 // by fatal error handler.
 314 void print_owned_locks_on_error(outputStream* st) {
 315   st->print("VM Mutex/Monitor currently owned by a thread: ");
 316   bool none = true;
 317   for (int i = 0; i < _num_mutex; i++) {
 318      // see if it has an owner
 319      if (_mutex_array[i]->owner() != NULL) {


 120 Mutex*   OldSets_lock                 = NULL;
 121 Monitor* RootRegionScan_lock          = NULL;
 122 
 123 Monitor* GCTaskManager_lock           = NULL;
 124 
 125 Mutex*   Management_lock              = NULL;
 126 Monitor* Service_lock                 = NULL;
 127 Monitor* PeriodicTask_lock            = NULL;
 128 Monitor* RedefineClasses_lock         = NULL;
 129 
 130 #if INCLUDE_TRACE
 131 Mutex*   JfrStacktrace_lock           = NULL;
 132 Monitor* JfrMsg_lock                  = NULL;
 133 Mutex*   JfrBuffer_lock               = NULL;
 134 Mutex*   JfrStream_lock               = NULL;
 135 #endif
 136 
 137 #ifndef SUPPORTS_NATIVE_CX8
 138 Mutex*   UnsafeJlong_lock             = NULL;
 139 #endif
 140 Monitor* CodeHeapStateAnalytics_lock  = NULL;
 141 
 142 #define MAX_NUM_MUTEX 128
 143 static Monitor * _mutex_array[MAX_NUM_MUTEX];
 144 static int _num_mutex;
 145 
 146 #ifdef ASSERT
 147 void assert_locked_or_safepoint(const Monitor * lock) {
 148   // check if this thread owns the lock (common case)
 149   if (IgnoreLockingAssertions) return;
 150   assert(lock != NULL, "Need non-NULL lock");
 151   if (lock->owned_by_self()) return;
 152   if (SafepointSynchronize::is_at_safepoint()) return;
 153   if (!Universe::is_fully_initialized()) return;
 154   // see if invoker of VM operation owns it
 155   VM_Operation* op = VMThread::vm_operation();
 156   if (op != NULL && op->calling_thread() == lock->owner()) return;
 157   fatal("must own lock %s", lock->name());
 158 }
 159 
 160 // a stronger assertion than the above


 281   def(MethodCompileQueue_lock      , PaddedMonitor, nonleaf+4,   true,  Monitor::_safepoint_check_always);
 282   def(Debug2_lock                  , PaddedMutex  , nonleaf+4,   true,  Monitor::_safepoint_check_never);
 283   def(Debug3_lock                  , PaddedMutex  , nonleaf+4,   true,  Monitor::_safepoint_check_never);
 284   def(CompileThread_lock           , PaddedMonitor, nonleaf+5,   false, Monitor::_safepoint_check_always);
 285   def(PeriodicTask_lock            , PaddedMonitor, nonleaf+5,   true,  Monitor::_safepoint_check_sometimes);
 286   def(RedefineClasses_lock         , PaddedMonitor, nonleaf+5,   true,  Monitor::_safepoint_check_always);
 287   if (WhiteBoxAPI) {
 288     def(Compilation_lock           , PaddedMonitor, leaf,        false, Monitor::_safepoint_check_never);
 289   }
 290 
 291 #if INCLUDE_TRACE
 292   def(JfrMsg_lock                  , PaddedMonitor, leaf,        true,  Monitor::_safepoint_check_always);
 293   def(JfrBuffer_lock               , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 294   def(JfrStream_lock               , PaddedMutex  , leaf+1,      true,  Monitor::_safepoint_check_never);      // ensure to rank lower than 'safepoint'
 295   def(JfrStacktrace_lock           , PaddedMutex  , special,     true,  Monitor::_safepoint_check_sometimes);
 296 #endif
 297 
 298 #ifndef SUPPORTS_NATIVE_CX8
 299   def(UnsafeJlong_lock             , PaddedMutex  , special,     false, Monitor::_safepoint_check_never);
 300 #endif
 301 
 302   def(CodeHeapStateAnalytics_lock  , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 303 }
 304 
 305 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
 306   if (SafepointSynchronize::is_at_safepoint()) {
 307     _locked = false;
 308   } else {
 309     _mutex = mutex;
 310     _locked = true;
 311     _mutex->lock();
 312   }
 313 }
 314 
 315 // Print all mutexes/monitors that are currently owned by a thread; called
 316 // by fatal error handler.
 317 void print_owned_locks_on_error(outputStream* st) {
 318   st->print("VM Mutex/Monitor currently owned by a thread: ");
 319   bool none = true;
 320   for (int i = 0; i < _num_mutex; i++) {
 321      // see if it has an owner
 322      if (_mutex_array[i]->owner() != NULL) {
< prev index next >