113 Mutex* FreeList_lock = NULL;
114 Mutex* OldSets_lock = NULL;
115 Monitor* RootRegionScan_lock = NULL;
116
117 Mutex* Management_lock = NULL;
118 Monitor* Service_lock = NULL;
119 Monitor* PeriodicTask_lock = NULL;
120 Monitor* RedefineClasses_lock = NULL;
121
122 #if INCLUDE_JFR
123 Mutex* JfrStacktrace_lock = NULL;
124 Monitor* JfrMsg_lock = NULL;
125 Mutex* JfrBuffer_lock = NULL;
126 Mutex* JfrStream_lock = NULL;
127 Monitor* JfrThreadSampler_lock = NULL;
128 #endif
129
130 #ifndef SUPPORTS_NATIVE_CX8
131 Mutex* UnsafeJlong_lock = NULL;
132 #endif
133 Monitor* CodeHeapStateAnalytics_lock = NULL;
134
135 Mutex* MetaspaceExpand_lock = NULL;
136 Mutex* ClassLoaderDataGraph_lock = NULL;
137 Monitor* ThreadsSMRDelete_lock = NULL;
138 Mutex* SharedDecoder_lock = NULL;
139 Mutex* DCmdFactory_lock = NULL;
140 #if INCLUDE_NMT
141 Mutex* NMTQuery_lock = NULL;
142 #endif
143 #if INCLUDE_CDS
144 #if INCLUDE_JVMTI
145 Mutex* CDSClassFileStream_lock = NULL;
146 #endif
147 Mutex* DumpTimeTable_lock = NULL;
148 #endif // INCLUDE_CDS
149
150 #if INCLUDE_JVMCI
151 Monitor* JVMCI_lock = NULL;
152 #endif
153
154
155 #define MAX_NUM_MUTEX 128
156 static Monitor * _mutex_array[MAX_NUM_MUTEX];
157 static int _num_mutex;
158
159 #ifdef ASSERT
160 void assert_locked_or_safepoint(const Monitor * lock) {
161 // check if this thread owns the lock (common case)
162 if (IgnoreLockingAssertions) return;
163 assert(lock != NULL, "Need non-NULL lock");
164 if (lock->owned_by_self()) return;
165 if (SafepointSynchronize::is_at_safepoint()) return;
166 if (!Universe::is_fully_initialized()) return;
167 // see if invoker of VM operation owns it
168 VM_Operation* op = VMThread::vm_operation();
169 if (op != NULL && op->calling_thread() == lock->owner()) return;
170 fatal("must own lock %s", lock->name());
171 }
172
173 // a weaker assertion than the above
174 void assert_locked_or_safepoint_weak(const Monitor * lock) {
175 if (IgnoreLockingAssertions) return;
176 assert(lock != NULL, "Need non-NULL lock");
177 if (lock->is_locked()) return;
178 if (SafepointSynchronize::is_at_safepoint()) return;
179 if (!Universe::is_fully_initialized()) return;
180 fatal("must own lock %s", lock->name());
181 }
182
183 // a stronger assertion than the above
184 void assert_lock_strong(const Monitor * lock) {
185 if (IgnoreLockingAssertions) return;
186 assert(lock != NULL, "Need non-NULL lock");
187 if (lock->owned_by_self()) return;
188 fatal("must own lock %s", lock->name());
189 }
190 #endif
191
192 #define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
193 var = new type(Mutex::pri, #var, vm_block, safepoint_check_allowed); \
194 assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
195 _mutex_array[_num_mutex++] = var; \
196 }
197
198 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
199 void mutex_init() {
200 def(tty_lock , PaddedMutex , tty, true, Monitor::_safepoint_check_never); // allow to lock in VM
201
202 def(CGC_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // coordinate between fore- and background GC
203 def(STS_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
204
208 def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never);
209
210 def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
211 def(OldSets_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
212 def(RootRegionScan_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);
213
214 def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
215 def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
216
217 def(MarkStackFreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
218 def(MarkStackChunkList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
219
220 def(MonitoringSupport_lock , PaddedMutex , native , true, Monitor::_safepoint_check_never); // used for serviceability monitoring support
221 }
222 if (UseShenandoahGC) {
223 def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
224 def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
225 }
226 def(ParGCRareEvent_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_always);
227 def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_always);
228 def(CodeCache_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
229 def(RawMonitor_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
230 def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation.
231
232 def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
233 def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);
234
235 def(Patching_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching.
236 def(Service_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // used for service thread operations
237 def(JmethodIdCreation_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for creating jmethodIDs.
238
239 def(SystemDictionary_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always);
240 def(ProtectionDomainSet_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
241 def(SharedDictionary_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always);
242 def(Module_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_always);
243 def(InlineCacheBuffer_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
244 def(VMStatistic_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always);
245 def(ExpandHeap_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // Used during compilation by VM thread
246 def(JNIHandleBlockFreeList_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); // handles are used by VM thread
247 def(SignatureHandlerLibrary_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always);
248 def(SymbolArena_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_never);
317 def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
318 def(NMethodSweeperStats_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
319 def(ThreadsSMRDelete_lock , PaddedMonitor, special, false, Monitor::_safepoint_check_never);
320 def(SharedDecoder_lock , PaddedMutex , native, false, Monitor::_safepoint_check_never);
321 def(DCmdFactory_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
322 #if INCLUDE_NMT
323 def(NMTQuery_lock , PaddedMutex , max_nonleaf, false, Monitor::_safepoint_check_always);
324 #endif
325 #if INCLUDE_CDS
326 #if INCLUDE_JVMTI
327 def(CDSClassFileStream_lock , PaddedMutex , max_nonleaf, false, Monitor::_safepoint_check_always);
328 #endif
329
330 #if INCLUDE_JVMCI
331 def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, Monitor::_safepoint_check_always);
332 #endif
333 def(DumpTimeTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
334 #endif // INCLUDE_CDS
335 }
336
337 GCMutexLocker::GCMutexLocker(Monitor * mutex) {
338 if (SafepointSynchronize::is_at_safepoint()) {
339 _locked = false;
340 } else {
341 _mutex = mutex;
342 _locked = true;
343 _mutex->lock();
344 }
345 }
346
347 // Print all mutexes/monitors that are currently owned by a thread; called
348 // by fatal error handler.
349 void print_owned_locks_on_error(outputStream* st) {
350 st->print("VM Mutex/Monitor currently owned by a thread: ");
351 bool none = true;
352 for (int i = 0; i < _num_mutex; i++) {
353 // see if it has an owner
354 if (_mutex_array[i]->owner() != NULL) {
355 if (none) {
356 // print format used by Mutex::print_on_error()
357 st->print_cr(" ([mutex/lock_event])");
|
113 Mutex* FreeList_lock = NULL;
114 Mutex* OldSets_lock = NULL;
115 Monitor* RootRegionScan_lock = NULL;
116
117 Mutex* Management_lock = NULL;
118 Monitor* Service_lock = NULL;
119 Monitor* PeriodicTask_lock = NULL;
120 Monitor* RedefineClasses_lock = NULL;
121
122 #if INCLUDE_JFR
123 Mutex* JfrStacktrace_lock = NULL;
124 Monitor* JfrMsg_lock = NULL;
125 Mutex* JfrBuffer_lock = NULL;
126 Mutex* JfrStream_lock = NULL;
127 Monitor* JfrThreadSampler_lock = NULL;
128 #endif
129
130 #ifndef SUPPORTS_NATIVE_CX8
131 Mutex* UnsafeJlong_lock = NULL;
132 #endif
133 Mutex* CodeHeapStateAnalytics_lock = NULL;
134
135 Mutex* MetaspaceExpand_lock = NULL;
136 Mutex* ClassLoaderDataGraph_lock = NULL;
137 Monitor* ThreadsSMRDelete_lock = NULL;
138 Mutex* SharedDecoder_lock = NULL;
139 Mutex* DCmdFactory_lock = NULL;
140 #if INCLUDE_NMT
141 Mutex* NMTQuery_lock = NULL;
142 #endif
143 #if INCLUDE_CDS
144 #if INCLUDE_JVMTI
145 Mutex* CDSClassFileStream_lock = NULL;
146 #endif
147 Mutex* DumpTimeTable_lock = NULL;
148 #endif // INCLUDE_CDS
149
150 #if INCLUDE_JVMCI
151 Monitor* JVMCI_lock = NULL;
152 #endif
153
154
155 #define MAX_NUM_MUTEX 128
156 static Mutex* _mutex_array[MAX_NUM_MUTEX];
157 static int _num_mutex;
158
159 #ifdef ASSERT
160 void assert_locked_or_safepoint(const Mutex* lock) {
161 // check if this thread owns the lock (common case)
162 if (IgnoreLockingAssertions) return;
163 assert(lock != NULL, "Need non-NULL lock");
164 if (lock->owned_by_self()) return;
165 if (SafepointSynchronize::is_at_safepoint()) return;
166 if (!Universe::is_fully_initialized()) return;
167 // see if invoker of VM operation owns it
168 VM_Operation* op = VMThread::vm_operation();
169 if (op != NULL && op->calling_thread() == lock->owner()) return;
170 fatal("must own lock %s", lock->name());
171 }
172
173 // a weaker assertion than the above
174 void assert_locked_or_safepoint_weak(const Mutex* lock) {
175 if (IgnoreLockingAssertions) return;
176 assert(lock != NULL, "Need non-NULL lock");
177 if (lock->is_locked()) return;
178 if (SafepointSynchronize::is_at_safepoint()) return;
179 if (!Universe::is_fully_initialized()) return;
180 fatal("must own lock %s", lock->name());
181 }
182
183 // a stronger assertion than the above
184 void assert_lock_strong(const Mutex* lock) {
185 if (IgnoreLockingAssertions) return;
186 assert(lock != NULL, "Need non-NULL lock");
187 if (lock->owned_by_self()) return;
188 fatal("must own lock %s", lock->name());
189 }
190 #endif
191
192 #define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
193 var = new type(Mutex::pri, #var, vm_block, safepoint_check_allowed); \
194 assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
195 _mutex_array[_num_mutex++] = var; \
196 }
197
198 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
199 void mutex_init() {
200 def(tty_lock , PaddedMutex , tty, true, Monitor::_safepoint_check_never); // allow to lock in VM
201
202 def(CGC_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // coordinate between fore- and background GC
203 def(STS_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
204
208 def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never);
209
210 def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
211 def(OldSets_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
212 def(RootRegionScan_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);
213
214 def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
215 def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
216
217 def(MarkStackFreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
218 def(MarkStackChunkList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
219
220 def(MonitoringSupport_lock , PaddedMutex , native , true, Monitor::_safepoint_check_never); // used for serviceability monitoring support
221 }
222 if (UseShenandoahGC) {
223 def(StringDedupQueue_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never);
224 def(StringDedupTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
225 }
226 def(ParGCRareEvent_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_always);
227 def(CGCPhaseManager_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_always);
228 def(CodeCache_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never);
229 def(RawMonitor_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
230 def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for oop_map_cache allocation.
231
232 def(MetaspaceExpand_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
233 def(ClassLoaderDataGraph_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);
234
235 def(Patching_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never); // used for safepointing and code patching.
236 def(Service_lock , PaddedMonitor, special, true, Monitor::_safepoint_check_never); // used for service thread operations
237 def(JmethodIdCreation_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // used for creating jmethodIDs.
238
239 def(SystemDictionary_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always);
240 def(ProtectionDomainSet_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never);
241 def(SharedDictionary_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always);
242 def(Module_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_always);
243 def(InlineCacheBuffer_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
244 def(VMStatistic_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always);
245 def(ExpandHeap_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_always); // Used during compilation by VM thread
246 def(JNIHandleBlockFreeList_lock , PaddedMutex , leaf-1, true, Monitor::_safepoint_check_never); // handles are used by VM thread
247 def(SignatureHandlerLibrary_lock , PaddedMutex , leaf, false, Monitor::_safepoint_check_always);
248 def(SymbolArena_lock , PaddedMutex , leaf+2, true, Monitor::_safepoint_check_never);
317 def(CodeHeapStateAnalytics_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
318 def(NMethodSweeperStats_lock , PaddedMutex , special, true, Monitor::_safepoint_check_never);
319 def(ThreadsSMRDelete_lock , PaddedMonitor, special, false, Monitor::_safepoint_check_never);
320 def(SharedDecoder_lock , PaddedMutex , native, false, Monitor::_safepoint_check_never);
321 def(DCmdFactory_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
322 #if INCLUDE_NMT
323 def(NMTQuery_lock , PaddedMutex , max_nonleaf, false, Monitor::_safepoint_check_always);
324 #endif
325 #if INCLUDE_CDS
326 #if INCLUDE_JVMTI
327 def(CDSClassFileStream_lock , PaddedMutex , max_nonleaf, false, Monitor::_safepoint_check_always);
328 #endif
329
330 #if INCLUDE_JVMCI
331 def(JVMCI_lock , PaddedMonitor, nonleaf+2, true, Monitor::_safepoint_check_always);
332 #endif
333 def(DumpTimeTable_lock , PaddedMutex , leaf, true, Monitor::_safepoint_check_never);
334 #endif // INCLUDE_CDS
335 }
336
337 GCMutexLocker::GCMutexLocker(Mutex* mutex) {
338 if (SafepointSynchronize::is_at_safepoint()) {
339 _locked = false;
340 } else {
341 _mutex = mutex;
342 _locked = true;
343 _mutex->lock();
344 }
345 }
346
347 // Print all mutexes/monitors that are currently owned by a thread; called
348 // by fatal error handler.
349 void print_owned_locks_on_error(outputStream* st) {
350 st->print("VM Mutex/Monitor currently owned by a thread: ");
351 bool none = true;
352 for (int i = 0; i < _num_mutex; i++) {
353 // see if it has an owner
354 if (_mutex_array[i]->owner() != NULL) {
355 if (none) {
356 // print format used by Mutex::print_on_error()
357 st->print_cr(" ([mutex/lock_event])");
|