171 fatal("must own lock %s", lock->name());
172 }
173
174 // a weaker assertion than the above
175 void assert_locked_or_safepoint_weak(const Mutex* lock) {
176 assert(lock != NULL, "Need non-NULL lock");
177 if (lock->is_locked()) return;
178 if (SafepointSynchronize::is_at_safepoint()) return;
179 if (!Universe::is_fully_initialized()) return;
180 fatal("must own lock %s", lock->name());
181 }
182
183 // a stronger assertion than the above
184 void assert_lock_strong(const Mutex* lock) {
185 assert(lock != NULL, "Need non-NULL lock");
186 if (lock->owned_by_self()) return;
187 fatal("must own lock %s", lock->name());
188 }
189
190 void assert_locked_or_safepoint_or_handshake(const Mutex* lock, const JavaThread* thread) {
191 if (Thread::current()->is_VM_thread() && thread->is_vmthread_processing_handshake()) return;
192 assert_locked_or_safepoint(lock);
193 }
194 #endif
195
196 #define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
197 var = new type(Mutex::pri, #var, vm_block, Mutex::safepoint_check_allowed); \
198 assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
199 _mutex_array[_num_mutex++] = var; \
200 }
201
202 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
203 void mutex_init() {
204 def(tty_lock , PaddedMutex , tty, true, _safepoint_check_never); // allow to lock in VM
205
206 def(CGC_lock , PaddedMonitor, special, true, _safepoint_check_never); // coordinate between fore- and background GC
207 def(STS_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
208
209 def(FullGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
210 if (UseG1GC) {
211 def(G1OldGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
|
171 fatal("must own lock %s", lock->name());
172 }
173
174 // a weaker assertion than the above
175 void assert_locked_or_safepoint_weak(const Mutex* lock) {
176 assert(lock != NULL, "Need non-NULL lock");
177 if (lock->is_locked()) return;
178 if (SafepointSynchronize::is_at_safepoint()) return;
179 if (!Universe::is_fully_initialized()) return;
180 fatal("must own lock %s", lock->name());
181 }
182
183 // a stronger assertion than the above
184 void assert_lock_strong(const Mutex* lock) {
185 assert(lock != NULL, "Need non-NULL lock");
186 if (lock->owned_by_self()) return;
187 fatal("must own lock %s", lock->name());
188 }
189
190 void assert_locked_or_safepoint_or_handshake(const Mutex* lock, const JavaThread* thread) {
191 if (Thread::current() == thread->active_handshaker()) return;
192 assert_locked_or_safepoint(lock);
193 }
194 #endif
195
196 #define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
197 var = new type(Mutex::pri, #var, vm_block, Mutex::safepoint_check_allowed); \
198 assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
199 _mutex_array[_num_mutex++] = var; \
200 }
201
202 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
203 void mutex_init() {
204 def(tty_lock , PaddedMutex , tty, true, _safepoint_check_never); // allow to lock in VM
205
206 def(CGC_lock , PaddedMonitor, special, true, _safepoint_check_never); // coordinate between fore- and background GC
207 def(STS_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
208
209 def(FullGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
210 if (UseG1GC) {
211 def(G1OldGCCount_lock , PaddedMonitor, leaf, true, _safepoint_check_always);
|