< prev index next >

src/hotspot/share/runtime/mutexLocker.cpp

Print this page

        

@@ -128,11 +128,11 @@
 #endif
 
 #ifndef SUPPORTS_NATIVE_CX8
 Mutex*   UnsafeJlong_lock             = NULL;
 #endif
-Monitor* CodeHeapStateAnalytics_lock  = NULL;
+Mutex*   CodeHeapStateAnalytics_lock  = NULL;
 
 Mutex*   MetaspaceExpand_lock         = NULL;
 Mutex*   ClassLoaderDataGraph_lock    = NULL;
 Monitor* ThreadsSMRDelete_lock        = NULL;
 Mutex*   SharedDecoder_lock           = NULL;

@@ -151,15 +151,15 @@
 Monitor* JVMCI_lock                   = NULL;
 #endif
 
 
 #define MAX_NUM_MUTEX 128
-static Monitor * _mutex_array[MAX_NUM_MUTEX];
+static Mutex* _mutex_array[MAX_NUM_MUTEX];
 static int _num_mutex;
 
 #ifdef ASSERT
-void assert_locked_or_safepoint(const Monitor * lock) {
+void assert_locked_or_safepoint(const Mutex* lock) {
   // check if this thread owns the lock (common case)
   if (IgnoreLockingAssertions) return;
   assert(lock != NULL, "Need non-NULL lock");
   if (lock->owned_by_self()) return;
   if (SafepointSynchronize::is_at_safepoint()) return;

@@ -169,21 +169,21 @@
   if (op != NULL && op->calling_thread() == lock->owner()) return;
   fatal("must own lock %s", lock->name());
 }
 
 // a weaker assertion than the above
-void assert_locked_or_safepoint_weak(const Monitor * lock) {
+void assert_locked_or_safepoint_weak(const Mutex* lock) {
   if (IgnoreLockingAssertions) return;
   assert(lock != NULL, "Need non-NULL lock");
   if (lock->is_locked()) return;
   if (SafepointSynchronize::is_at_safepoint()) return;
   if (!Universe::is_fully_initialized()) return;
   fatal("must own lock %s", lock->name());
 }
 
 // a stronger assertion than the above
-void assert_lock_strong(const Monitor * lock) {
+void assert_lock_strong(const Mutex* lock) {
   if (IgnoreLockingAssertions) return;
   assert(lock != NULL, "Need non-NULL lock");
   if (lock->owned_by_self()) return;
   fatal("must own lock %s", lock->name());
 }

@@ -223,11 +223,11 @@
     def(StringDedupQueue_lock      , PaddedMonitor, leaf,        true,  Monitor::_safepoint_check_never);
     def(StringDedupTable_lock      , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
   }
   def(ParGCRareEvent_lock          , PaddedMutex  , leaf     ,   true,  Monitor::_safepoint_check_always);
   def(CGCPhaseManager_lock         , PaddedMonitor, leaf,        false, Monitor::_safepoint_check_always);
-  def(CodeCache_lock               , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
+  def(CodeCache_lock               , PaddedMonitor, special,     true,  Monitor::_safepoint_check_never);
   def(RawMonitor_lock              , PaddedMutex  , special,     true,  Monitor::_safepoint_check_never);
   def(OopMapCacheAlloc_lock        , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_always); // used for oop_map_cache allocation.
 
   def(MetaspaceExpand_lock         , PaddedMutex  , leaf-1,      true,  Monitor::_safepoint_check_never);
   def(ClassLoaderDataGraph_lock    , PaddedMutex  , nonleaf,     true,  Monitor::_safepoint_check_always);

@@ -332,11 +332,11 @@
 #endif
   def(DumpTimeTable_lock           , PaddedMutex  , leaf,        true,  Monitor::_safepoint_check_never);
 #endif // INCLUDE_CDS
 }
 
-GCMutexLocker::GCMutexLocker(Monitor * mutex) {
+GCMutexLocker::GCMutexLocker(Mutex* mutex) {
   if (SafepointSynchronize::is_at_safepoint()) {
     _locked = false;
   } else {
     _mutex = mutex;
     _locked = true;
< prev index next >