149 Mutex* ClassLoaderDataGraph_lock = NULL;
150
151 #define MAX_NUM_MUTEX 128
152 static Monitor * _mutex_array[MAX_NUM_MUTEX];
153 static int _num_mutex;
154
155 #ifdef ASSERT
156 void assert_locked_or_safepoint(const Monitor * lock) {
157 // check if this thread owns the lock (common case)
158 if (IgnoreLockingAssertions) return;
159 assert(lock != NULL, "Need non-NULL lock");
160 if (lock->owned_by_self()) return;
161 if (SafepointSynchronize::is_at_safepoint()) return;
162 if (!Universe::is_fully_initialized()) return;
163 // see if invoker of VM operation owns it
164 VM_Operation* op = VMThread::vm_operation();
165 if (op != NULL && op->calling_thread() == lock->owner()) return;
166 fatal("must own lock %s", lock->name());
167 }
168
169 // a stronger assertion than the above
170 void assert_lock_strong(const Monitor * lock) {
171 if (IgnoreLockingAssertions) return;
172 assert(lock != NULL, "Need non-NULL lock");
173 if (lock->owned_by_self()) return;
174 fatal("must own lock %s", lock->name());
175 }
176 #endif
177
178 #define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
179 var = new type(Mutex::pri, #var, vm_block, safepoint_check_allowed); \
180 assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
181 _mutex_array[_num_mutex++] = var; \
182 }
183
184 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
185 void mutex_init() {
186 def(tty_lock , PaddedMutex , event, true, Monitor::_safepoint_check_never); // allow to lock in VM
187
|
149 Mutex* ClassLoaderDataGraph_lock = NULL;
150
151 #define MAX_NUM_MUTEX 128
152 static Monitor * _mutex_array[MAX_NUM_MUTEX];
153 static int _num_mutex;
154
155 #ifdef ASSERT
156 void assert_locked_or_safepoint(const Monitor * lock) {
157 // check if this thread owns the lock (common case)
158 if (IgnoreLockingAssertions) return;
159 assert(lock != NULL, "Need non-NULL lock");
160 if (lock->owned_by_self()) return;
161 if (SafepointSynchronize::is_at_safepoint()) return;
162 if (!Universe::is_fully_initialized()) return;
163 // see if invoker of VM operation owns it
164 VM_Operation* op = VMThread::vm_operation();
165 if (op != NULL && op->calling_thread() == lock->owner()) return;
166 fatal("must own lock %s", lock->name());
167 }
168
169 // a weaker assertion than the above
170 void assert_locked_or_safepoint_weak(const Monitor * lock) {
171 if (IgnoreLockingAssertions) return;
172 assert(lock != NULL, "Need non-NULL lock");
173 if (lock->is_locked()) return;
174 if (SafepointSynchronize::is_at_safepoint()) return;
175 if (!Universe::is_fully_initialized()) return;
176 fatal("must own lock %s", lock->name());
177 }
178
179 // a stronger assertion than the above
180 void assert_lock_strong(const Monitor * lock) {
181 if (IgnoreLockingAssertions) return;
182 assert(lock != NULL, "Need non-NULL lock");
183 if (lock->owned_by_self()) return;
184 fatal("must own lock %s", lock->name());
185 }
186 #endif
187
188 #define def(var, type, pri, vm_block, safepoint_check_allowed ) { \
189 var = new type(Mutex::pri, #var, vm_block, safepoint_check_allowed); \
190 assert(_num_mutex < MAX_NUM_MUTEX, "increase MAX_NUM_MUTEX"); \
191 _mutex_array[_num_mutex++] = var; \
192 }
193
194 // Using Padded subclasses to prevent false sharing of these global monitors and mutexes.
195 void mutex_init() {
196 def(tty_lock , PaddedMutex , event, true, Monitor::_safepoint_check_never); // allow to lock in VM
197
|