132
133 #ifndef SUPPORTS_NATIVE_CX8
134 Mutex* UnsafeJlong_lock = NULL;
135 #endif
136 Mutex* CodeHeapStateAnalytics_lock = NULL;
137
138 Mutex* MetaspaceExpand_lock = NULL;
139 Mutex* ClassLoaderDataGraph_lock = NULL;
140 Monitor* ThreadsSMRDelete_lock = NULL;
141 Mutex* ThreadIdTableCreate_lock = NULL;
142 Mutex* SharedDecoder_lock = NULL;
143 Mutex* DCmdFactory_lock = NULL;
144 #if INCLUDE_NMT
145 Mutex* NMTQuery_lock = NULL;
146 #endif
147 #if INCLUDE_CDS
148 #if INCLUDE_JVMTI
149 Mutex* CDSClassFileStream_lock = NULL;
150 #endif
151 Mutex* DumpTimeTable_lock = NULL;
152 #endif // INCLUDE_CDS
153
154 #if INCLUDE_JVMCI
155 Monitor* JVMCI_lock = NULL;
156 #endif
157
158
159 #define MAX_NUM_MUTEX 128
160 static Mutex* _mutex_array[MAX_NUM_MUTEX];
161 static int _num_mutex;
162
163 #ifdef ASSERT
164 void assert_locked_or_safepoint(const Mutex* lock) {
165 // check if this thread owns the lock (common case)
166 assert(lock != NULL, "Need non-NULL lock");
167 if (lock->owned_by_self()) return;
168 if (SafepointSynchronize::is_at_safepoint()) return;
169 if (!Universe::is_fully_initialized()) return;
170 // see if invoker of VM operation owns it
171 VM_Operation* op = VMThread::vm_operation();
325 #endif
326
327 #ifndef SUPPORTS_NATIVE_CX8
328 def(UnsafeJlong_lock , PaddedMutex , special, false, _safepoint_check_never);
329 #endif
330
331 def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, _safepoint_check_never);
332 def(NMethodSweeperStats_lock , PaddedMutex , special, true, _safepoint_check_never);
333 def(ThreadsSMRDelete_lock , PaddedMonitor, special, true, _safepoint_check_never);
334 def(ThreadIdTableCreate_lock , PaddedMutex , leaf, false, _safepoint_check_always);
335 def(SharedDecoder_lock , PaddedMutex , native, true, _safepoint_check_never);
336 def(DCmdFactory_lock , PaddedMutex , leaf, true, _safepoint_check_never);
337 #if INCLUDE_NMT
338 def(NMTQuery_lock , PaddedMutex , max_nonleaf, false, _safepoint_check_always);
339 #endif
340 #if INCLUDE_CDS
341 #if INCLUDE_JVMTI
342 def(CDSClassFileStream_lock , PaddedMutex , max_nonleaf, false, _safepoint_check_always);
343 #endif
344 def(DumpTimeTable_lock , PaddedMutex , leaf - 1, true, _safepoint_check_never);
345 #endif // INCLUDE_CDS
346
347 #if INCLUDE_JVMCI
348 def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, _safepoint_check_always);
349 #endif
350 }
351
352 GCMutexLocker::GCMutexLocker(Mutex* mutex) {
353 if (SafepointSynchronize::is_at_safepoint()) {
354 _locked = false;
355 } else {
356 _mutex = mutex;
357 _locked = true;
358 _mutex->lock();
359 }
360 }
361
362 // Print all mutexes/monitors that are currently owned by a thread; called
363 // by fatal error handler.
364 void print_owned_locks_on_error(outputStream* st) {
|
132
133 #ifndef SUPPORTS_NATIVE_CX8
134 Mutex* UnsafeJlong_lock = NULL;
135 #endif
136 Mutex* CodeHeapStateAnalytics_lock = NULL;
137
138 Mutex* MetaspaceExpand_lock = NULL;
139 Mutex* ClassLoaderDataGraph_lock = NULL;
140 Monitor* ThreadsSMRDelete_lock = NULL;
141 Mutex* ThreadIdTableCreate_lock = NULL;
142 Mutex* SharedDecoder_lock = NULL;
143 Mutex* DCmdFactory_lock = NULL;
144 #if INCLUDE_NMT
145 Mutex* NMTQuery_lock = NULL;
146 #endif
147 #if INCLUDE_CDS
148 #if INCLUDE_JVMTI
149 Mutex* CDSClassFileStream_lock = NULL;
150 #endif
151 Mutex* DumpTimeTable_lock = NULL;
152 Mutex* CDSLambda_lock = NULL;
153 #endif // INCLUDE_CDS
154
155 #if INCLUDE_JVMCI
156 Monitor* JVMCI_lock = NULL;
157 #endif
158
159
160 #define MAX_NUM_MUTEX 128
161 static Mutex* _mutex_array[MAX_NUM_MUTEX];
162 static int _num_mutex;
163
164 #ifdef ASSERT
165 void assert_locked_or_safepoint(const Mutex* lock) {
166 // check if this thread owns the lock (common case)
167 assert(lock != NULL, "Need non-NULL lock");
168 if (lock->owned_by_self()) return;
169 if (SafepointSynchronize::is_at_safepoint()) return;
170 if (!Universe::is_fully_initialized()) return;
171 // see if invoker of VM operation owns it
172 VM_Operation* op = VMThread::vm_operation();
326 #endif
327
328 #ifndef SUPPORTS_NATIVE_CX8
329 def(UnsafeJlong_lock , PaddedMutex , special, false, _safepoint_check_never);
330 #endif
331
332 def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, _safepoint_check_never);
333 def(NMethodSweeperStats_lock , PaddedMutex , special, true, _safepoint_check_never);
334 def(ThreadsSMRDelete_lock , PaddedMonitor, special, true, _safepoint_check_never);
335 def(ThreadIdTableCreate_lock , PaddedMutex , leaf, false, _safepoint_check_always);
336 def(SharedDecoder_lock , PaddedMutex , native, true, _safepoint_check_never);
337 def(DCmdFactory_lock , PaddedMutex , leaf, true, _safepoint_check_never);
338 #if INCLUDE_NMT
339 def(NMTQuery_lock , PaddedMutex , max_nonleaf, false, _safepoint_check_always);
340 #endif
341 #if INCLUDE_CDS
342 #if INCLUDE_JVMTI
343 def(CDSClassFileStream_lock , PaddedMutex , max_nonleaf, false, _safepoint_check_always);
344 #endif
345 def(DumpTimeTable_lock , PaddedMutex , leaf - 1, true, _safepoint_check_never);
346 def(CDSLambda_lock , PaddedMutex , leaf, true, _safepoint_check_never);
347 #endif // INCLUDE_CDS
348
349 #if INCLUDE_JVMCI
350 def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, _safepoint_check_always);
351 #endif
352 }
353
354 GCMutexLocker::GCMutexLocker(Mutex* mutex) {
355 if (SafepointSynchronize::is_at_safepoint()) {
356 _locked = false;
357 } else {
358 _mutex = mutex;
359 _locked = true;
360 _mutex->lock();
361 }
362 }
363
364 // Print all mutexes/monitors that are currently owned by a thread; called
365 // by fatal error handler.
366 void print_owned_locks_on_error(outputStream* st) {
|