111 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
112
113 // global list of blocks of monitors
114 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
115 // global monitor free list
116 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
117 // global monitor in-use list, for moribund threads,
118 // monitors they inflated need to be scanned for deflation
119 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
120 // count of entries in gOmInUseList
121 int ObjectSynchronizer::gOmInUseCount = 0;
122
123 static volatile intptr_t gListLock = 0; // protects global monitor lists
124 static volatile int gMonitorFreeCount = 0; // # on gFreeList
125 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
126
127 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
128 const oop,
129 const ObjectSynchronizer::InflateCause);
130
131 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
132
133
134 // =====================> Quick functions
135
136 // The quick_* forms are special fast-path variants used to improve
137 // performance. In the simplest case, a "quick_*" implementation could
138 // simply return false, in which case the caller will perform the necessary
139 // state transitions and call the slow-path form.
140 // The fast-path is designed to handle frequently arising cases in an efficient
141 // manner and is just a degenerate "optimistic" variant of the slow-path.
142 // returns true -- to indicate the call was satisfied.
143 // returns false -- to indicate the call needs the services of the slow-path.
144 // A no-loitering ordinance is in effect for code in the quick_* family
145 // operators: safepoints or indefinite blocking (blocking that might span a
146 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
147 // entry.
148 //
149 // Consider: An interesting optimization is to have the JIT recognize the
150 // following common idiom:
151 // synchronized (someobj) { .... ; notify(); }
152 // That is, we find a notify() or notifyAll() call that immediately precedes
153 // the monitorexit operation. In that case the JIT could fuse the operations
154 // into a single notifyAndExit() runtime primitive.
155
156 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
157 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
158 assert(self->is_Java_thread(), "invariant");
159 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
160 NoSafepointVerifier nsv;
161 if (obj == NULL) return false; // slow-path for invalid obj
162 const markOop mark = obj->mark();
163
164 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
165 // Degenerate notify
166 // stack-locked by caller so by definition the implied waitset is empty.
167 return true;
168 }
169
170 if (mark->has_monitor()) {
171 ObjectMonitor * const mon = mark->monitor();
172 assert(mon->object() == obj, "invariant");
173 if (mon->owner() != self) return false; // slow-path for IMS exception
174
175 if (mon->first_waiter() != NULL) {
176 // We have one or more waiters. Since this is an inflated monitor
177 // that we own, we can transfer one or more threads from the waitset
178 // to the entrylist here and now, avoiding the slow-path.
179 if (all) {
180 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
181 } else {
192 }
193
194 // biased locking and any other IMS exception states take the slow-path
195 return false;
196 }
197
198
199 // The LockNode emitted directly at the synchronization site would have
200 // been too big if it were to have included support for the cases of inflated
201 // recursive enter and exit, so they go here instead.
202 // Note that we can't safely call AsyncPrintJavaStack() from within
203 // quick_enter() as our thread state remains _in_Java.
204
205 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
206 BasicLock * lock) {
207 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
208 assert(Self->is_Java_thread(), "invariant");
209 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
210 NoSafepointVerifier nsv;
211 if (obj == NULL) return false; // Need to throw NPE
212 const markOop mark = obj->mark();
213
214 if (mark->has_monitor()) {
215 ObjectMonitor * const m = mark->monitor();
216 assert(m->object() == obj, "invariant");
217 Thread * const owner = (Thread *) m->_owner;
218
219 // Lock contention and Transactional Lock Elision (TLE) diagnostics
220 // and observability
221 // Case: light contention possibly amenable to TLE
222 // Case: TLE inimical operations such as nested/recursive synchronization
223
224 if (owner == Self) {
225 m->_recursions++;
226 return true;
227 }
228
229 // This Java Monitor is inflated so obj's header will never be
230 // displaced to this thread's BasicLock. Make the displaced header
231 // non-NULL so this BasicLock is not seen as recursive nor as
247
248 // Note that we could inflate in quick_enter.
249 // This is likely a useful optimization
250 // Critically, in quick_enter() we must not:
251 // -- perform bias revocation, or
252 // -- block indefinitely, or
253 // -- reach a safepoint
254
255 return false; // revert to slow-path
256 }
257
258 // -----------------------------------------------------------------------------
259 // Fast Monitor Enter/Exit
260 // This the fast monitor enter. The interpreter and compiler use
261 // some assembly copies of this code. Make sure update those code
262 // if the following function is changed. The implementation is
263 // extremely sensitive to race condition. Be careful.
264
265 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
266 bool attempt_rebias, TRAPS) {
267 if (UseBiasedLocking) {
268 if (!SafepointSynchronize::is_at_safepoint()) {
269 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
270 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
271 return;
272 }
273 } else {
274 assert(!attempt_rebias, "can not rebias toward VM thread");
275 BiasedLocking::revoke_at_safepoint(obj);
276 }
277 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
278 }
279
280 slow_enter(obj, lock, THREAD);
281 }
282
283 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
284 markOop mark = object->mark();
285 // We cannot check for Biased Locking if we are racing an inflation.
286 assert(mark == markOopDesc::INFLATING() ||
287 !mark->has_bias_pattern(), "should not see bias pattern here");
288
289 markOop dhw = lock->displaced_header();
290 if (dhw == NULL) {
291 // If the displaced header is NULL, then this exit matches up with
292 // a recursive enter. No real work to do here except for diagnostics.
293 #ifndef PRODUCT
294 if (mark != markOopDesc::INFLATING()) {
295 // Only do diagnostics if we are not racing an inflation. Simply
296 // exiting a recursive enter of a Java Monitor that is being
297 // inflated is safe; see the has_monitor() comment below.
298 assert(!mark->is_neutral(), "invariant");
299 assert(!mark->has_locker() ||
300 THREAD->is_lock_owned((address)mark->locker()), "invariant");
301 if (mark->has_monitor()) {
302 // The BasicLock's displaced_header is marked as a recursive
303 // enter and we have an inflated Java Monitor (ObjectMonitor).
304 // This is a special case where the Java Monitor was inflated
321 // swing the displaced header from the BasicLock back to the mark.
322 assert(dhw->is_neutral(), "invariant");
323 if (object->cas_set_mark(dhw, mark) == mark) {
324 TEVENT(fast_exit: release stack-lock);
325 return;
326 }
327 }
328
329 // We have to take the slow-path of possible inflation and then exit.
330 ObjectSynchronizer::inflate(THREAD,
331 object,
332 inflate_cause_vm_internal)->exit(true, THREAD);
333 }
334
335 // -----------------------------------------------------------------------------
336 // Interpreter/Compiler Slow Case
337 // This routine is used to handle interpreter/compiler slow case
338 // We don't need to use fast path here, because it must have been
339 // failed in the interpreter/compiler code.
340 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
341 markOop mark = obj->mark();
342 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
343
344 if (mark->is_neutral()) {
345 // Anticipate successful CAS -- the ST of the displaced mark must
346 // be visible <= the ST performed by the CAS.
347 lock->set_displaced_header(mark);
348 if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
349 TEVENT(slow_enter: release stacklock);
350 return;
351 }
352 // Fall through to inflate() ...
353 } else if (mark->has_locker() &&
354 THREAD->is_lock_owned((address)mark->locker())) {
355 assert(lock != mark->locker(), "must not re-lock the same lock");
356 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
357 lock->set_displaced_header(NULL);
358 return;
359 }
360
373 // failed in the interpreter/compiler code. Simply use the heavy
374 // weight monitor should be ok, unless someone find otherwise.
375 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
376 fast_exit(object, lock, THREAD);
377 }
378
379 // -----------------------------------------------------------------------------
380 // Class Loader support to workaround deadlocks on the class loader lock objects
381 // Also used by GC
382 // complete_exit()/reenter() are used to wait on a nested lock
383 // i.e. to give up an outer lock completely and then re-enter
384 // Used when holding nested locks - lock acquisition order: lock1 then lock2
385 // 1) complete_exit lock1 - saving recursion count
386 // 2) wait on lock2
387 // 3) when notified on lock2, unlock lock2
388 // 4) reenter lock1 with original recursion count
389 // 5) lock lock2
390 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
391 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
392 TEVENT(complete_exit);
393 if (UseBiasedLocking) {
394 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
395 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
396 }
397
398 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
399 obj(),
400 inflate_cause_vm_internal);
401
402 return monitor->complete_exit(THREAD);
403 }
404
405 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
406 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
407 TEVENT(reenter);
408 if (UseBiasedLocking) {
409 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
410 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
411 }
412
413 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
414 obj(),
415 inflate_cause_vm_internal);
416
417 monitor->reenter(recursion, THREAD);
418 }
419 // -----------------------------------------------------------------------------
420 // JNI locks on java objects
421 // NOTE: must use heavy weight monitor to handle jni monitor enter
422 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
423 // the current locking is from JNI instead of Java code
424 TEVENT(jni_enter);
425 if (UseBiasedLocking) {
426 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
427 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
428 }
429 THREAD->set_current_pending_monitor_is_from_java(false);
430 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
431 THREAD->set_current_pending_monitor_is_from_java(true);
432 }
433
434 // NOTE: must use heavy weight monitor to handle jni monitor exit
435 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
436 TEVENT(jni_exit);
437 if (UseBiasedLocking) {
438 Handle h_obj(THREAD, obj);
439 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
440 obj = h_obj();
441 }
442 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
443
444 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
445 obj,
446 inflate_cause_jni_exit);
447 // If this thread has locked the object, exit the monitor. Note: can't use
448 // monitor->check(CHECK); must exit even if an exception is pending.
449 if (monitor->check(THREAD)) {
450 monitor->exit(true, THREAD);
451 }
452 }
453
454 // -----------------------------------------------------------------------------
455 // Internal VM locks on java objects
456 // standard constructor, allows locking failures
461 _obj = obj;
462
463 if (_dolock) {
464 TEVENT(ObjectLocker);
465
466 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
467 }
468 }
469
470 ObjectLocker::~ObjectLocker() {
471 if (_dolock) {
472 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
473 }
474 }
475
476
477 // -----------------------------------------------------------------------------
478 // Wait/Notify/NotifyAll
479 // NOTE: must use heavy weight monitor to handle wait()
480 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
481 if (UseBiasedLocking) {
482 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
483 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
484 }
485 if (millis < 0) {
486 TEVENT(wait - throw IAX);
487 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
488 }
489 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
490 obj(),
491 inflate_cause_wait);
492
493 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
494 monitor->wait(millis, true, THREAD);
495
496 // This dummy call is in place to get around dtrace bug 6254741. Once
497 // that's fixed we can uncomment the following line, remove the call
498 // and change this function back into a "void" func.
499 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
500 return dtrace_waited_probe(monitor, obj, THREAD);
501 }
502
503 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
504 if (UseBiasedLocking) {
505 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
506 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
507 }
508 if (millis < 0) {
509 TEVENT(wait - throw IAX);
510 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
511 }
512 ObjectSynchronizer::inflate(THREAD,
513 obj(),
514 inflate_cause_wait)->wait(millis, false, THREAD);
515 }
516
517 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
518 if (UseBiasedLocking) {
519 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
520 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
521 }
522
523 markOop mark = obj->mark();
524 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
525 return;
526 }
527 ObjectSynchronizer::inflate(THREAD,
528 obj(),
529 inflate_cause_notify)->notify(THREAD);
530 }
531
532 // NOTE: see comment of notify()
533 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
534 if (UseBiasedLocking) {
535 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
536 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
537 }
538
539 markOop mark = obj->mark();
540 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
541 return;
542 }
543 ObjectSynchronizer::inflate(THREAD,
544 obj(),
545 inflate_cause_notify)->notifyAll(THREAD);
546 }
547
548 // -----------------------------------------------------------------------------
549 // Hash Code handling
550 //
551 // Performance concern:
552 // OrderAccess::storestore() calls release() which at one time stored 0
553 // into the global volatile OrderAccess::dummy variable. This store was
692 // likely make this the default in future releases.
693 unsigned t = Self->_hashStateX;
694 t ^= (t << 11);
695 Self->_hashStateX = Self->_hashStateY;
696 Self->_hashStateY = Self->_hashStateZ;
697 Self->_hashStateZ = Self->_hashStateW;
698 unsigned v = Self->_hashStateW;
699 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
700 Self->_hashStateW = v;
701 value = v;
702 }
703
704 value &= markOopDesc::hash_mask;
705 if (value == 0) value = 0xBAD;
706 assert(value != markOopDesc::no_hash, "invariant");
707 TEVENT(hashCode: GENERATE);
708 return value;
709 }
710
711 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
712 if (UseBiasedLocking) {
713 // NOTE: many places throughout the JVM do not expect a safepoint
714 // to be taken here, in particular most operations on perm gen
715 // objects. However, we only ever bias Java instances and all of
716 // the call sites of identity_hash that might revoke biases have
717 // been checked to make sure they can handle a safepoint. The
718 // added check of the bias pattern is to avoid useless calls to
719 // thread-local storage.
720 if (obj->mark()->has_bias_pattern()) {
721 // Handle for oop obj in case of STW safepoint
722 Handle hobj(Self, obj);
723 // Relaxing assertion for bug 6320749.
724 assert(Universe::verify_in_progress() ||
725 !SafepointSynchronize::is_at_safepoint(),
726 "biases should not be seen by VM thread here");
727 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
728 obj = hobj();
729 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
730 }
731 }
796 assert(mark->is_neutral(), "invariant");
797 hash = mark->hash();
798 if (hash == 0) {
799 hash = get_next_hash(Self, obj);
800 temp = mark->copy_set_hash(hash); // merge hash code into header
801 assert(temp->is_neutral(), "invariant");
802 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
803 if (test != mark) {
804 // The only update to the header in the monitor (outside GC)
805 // is install the hash code. If someone add new usage of
806 // displaced header, please update this code
807 hash = test->hash();
808 assert(test->is_neutral(), "invariant");
809 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
810 }
811 }
812 // We finally get the hash
813 return hash;
814 }
815
816 // Deprecated -- use FastHashCode() instead.
817
818 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
819 return FastHashCode(Thread::current(), obj());
820 }
821
822
823 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
824 Handle h_obj) {
825 if (UseBiasedLocking) {
826 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
827 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
828 }
829
830 assert(thread == JavaThread::current(), "Can only be called on current thread");
831 oop obj = h_obj();
832
833 markOop mark = ReadStableMark(obj);
834
835 // Uncontended case, header points to stack
836 if (mark->has_locker()) {
837 return thread->is_lock_owned((address)mark->locker());
838 }
839 // Contended case, header points to ObjectMonitor (tagged pointer)
840 if (mark->has_monitor()) {
841 ObjectMonitor* monitor = mark->monitor();
842 return monitor->is_entered(thread) != 0;
843 }
844 // Unlocked case, header in place
1365 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1366 markOop mark = obj->mark();
1367 if (mark->has_monitor()) {
1368 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1369 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1370 return mark->monitor();
1371 }
1372 return ObjectSynchronizer::inflate(Thread::current(),
1373 obj,
1374 inflate_cause_vm_internal);
1375 }
1376
1377 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1378 oop object,
1379 const InflateCause cause) {
1380
1381 // Inflate mutates the heap ...
1382 // Relaxing assertion for bug 6320749.
1383 assert(Universe::verify_in_progress() ||
1384 !SafepointSynchronize::is_at_safepoint(), "invariant");
1385
1386 EventJavaMonitorInflate event;
1387
1388 for (;;) {
1389 const markOop mark = object->mark();
1390 assert(!mark->has_bias_pattern(), "invariant");
1391
1392 // The mark can be in one of the following states:
1393 // * Inflated - just return
1394 // * Stack-locked - coerce it to inflated
1395 // * INFLATING - busy wait for conversion to complete
1396 // * Neutral - aggressively inflate the object.
1397 // * BIASED - Illegal. We should never see this
1398
1399 // CASE: inflated
1400 if (mark->has_monitor()) {
1401 ObjectMonitor * inf = mark->monitor();
1402 assert(inf->header()->is_neutral(), "invariant");
1403 assert(inf->object() == object, "invariant");
1404 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
|
111 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
112
113 // global list of blocks of monitors
114 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;
115 // global monitor free list
116 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL;
117 // global monitor in-use list, for moribund threads,
118 // monitors they inflated need to be scanned for deflation
119 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL;
120 // count of entries in gOmInUseList
121 int ObjectSynchronizer::gOmInUseCount = 0;
122
123 static volatile intptr_t gListLock = 0; // protects global monitor lists
124 static volatile int gMonitorFreeCount = 0; // # on gFreeList
125 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation
126
127 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
128 const oop,
129 const ObjectSynchronizer::InflateCause);
130
131 #define CHECK_THROW_NOSYNC_IMSE(obj) \
132 if ((obj)->mark()->is_always_locked()) { \
133 ResourceMark rm(THREAD); \
134 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
135 }
136
137 #define CHECK_THROW_NOSYNC_IMSE_0(obj) \
138 if ((obj)->mark()->is_always_locked()) { \
139 ResourceMark rm(THREAD); \
140 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \
141 }
142
143
144 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
145
146
147 // =====================> Quick functions
148
149 // The quick_* forms are special fast-path variants used to improve
150 // performance. In the simplest case, a "quick_*" implementation could
151 // simply return false, in which case the caller will perform the necessary
152 // state transitions and call the slow-path form.
153 // The fast-path is designed to handle frequently arising cases in an efficient
154 // manner and is just a degenerate "optimistic" variant of the slow-path.
155 // returns true -- to indicate the call was satisfied.
156 // returns false -- to indicate the call needs the services of the slow-path.
157 // A no-loitering ordinance is in effect for code in the quick_* family
158 // operators: safepoints or indefinite blocking (blocking that might span a
159 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
160 // entry.
161 //
162 // Consider: An interesting optimization is to have the JIT recognize the
163 // following common idiom:
164 // synchronized (someobj) { .... ; notify(); }
165 // That is, we find a notify() or notifyAll() call that immediately precedes
166 // the monitorexit operation. In that case the JIT could fuse the operations
167 // into a single notifyAndExit() runtime primitive.
168
169 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) {
170 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
171 assert(self->is_Java_thread(), "invariant");
172 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
173 NoSafepointVerifier nsv;
174 if (obj == NULL) return false; // slow-path for invalid obj
175 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
176 const markOop mark = obj->mark();
177
178 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) {
179 // Degenerate notify
180 // stack-locked by caller so by definition the implied waitset is empty.
181 return true;
182 }
183
184 if (mark->has_monitor()) {
185 ObjectMonitor * const mon = mark->monitor();
186 assert(mon->object() == obj, "invariant");
187 if (mon->owner() != self) return false; // slow-path for IMS exception
188
189 if (mon->first_waiter() != NULL) {
190 // We have one or more waiters. Since this is an inflated monitor
191 // that we own, we can transfer one or more threads from the waitset
192 // to the entrylist here and now, avoiding the slow-path.
193 if (all) {
194 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self);
195 } else {
206 }
207
208 // biased locking and any other IMS exception states take the slow-path
209 return false;
210 }
211
212
213 // The LockNode emitted directly at the synchronization site would have
214 // been too big if it were to have included support for the cases of inflated
215 // recursive enter and exit, so they go here instead.
216 // Note that we can't safely call AsyncPrintJavaStack() from within
217 // quick_enter() as our thread state remains _in_Java.
218
219 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
220 BasicLock * lock) {
221 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
222 assert(Self->is_Java_thread(), "invariant");
223 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant");
224 NoSafepointVerifier nsv;
225 if (obj == NULL) return false; // Need to throw NPE
226 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
227 const markOop mark = obj->mark();
228
229 if (mark->has_monitor()) {
230 ObjectMonitor * const m = mark->monitor();
231 assert(m->object() == obj, "invariant");
232 Thread * const owner = (Thread *) m->_owner;
233
234 // Lock contention and Transactional Lock Elision (TLE) diagnostics
235 // and observability
236 // Case: light contention possibly amenable to TLE
237 // Case: TLE inimical operations such as nested/recursive synchronization
238
239 if (owner == Self) {
240 m->_recursions++;
241 return true;
242 }
243
244 // This Java Monitor is inflated so obj's header will never be
245 // displaced to this thread's BasicLock. Make the displaced header
246 // non-NULL so this BasicLock is not seen as recursive nor as
262
263 // Note that we could inflate in quick_enter.
264 // This is likely a useful optimization
265 // Critically, in quick_enter() we must not:
266 // -- perform bias revocation, or
267 // -- block indefinitely, or
268 // -- reach a safepoint
269
270 return false; // revert to slow-path
271 }
272
273 // -----------------------------------------------------------------------------
274 // Fast Monitor Enter/Exit
275 // This the fast monitor enter. The interpreter and compiler use
276 // some assembly copies of this code. Make sure update those code
277 // if the following function is changed. The implementation is
278 // extremely sensitive to race condition. Be careful.
279
280 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock,
281 bool attempt_rebias, TRAPS) {
282 CHECK_THROW_NOSYNC_IMSE(obj);
283 if (UseBiasedLocking) {
284 if (!SafepointSynchronize::is_at_safepoint()) {
285 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD);
286 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) {
287 return;
288 }
289 } else {
290 assert(!attempt_rebias, "can not rebias toward VM thread");
291 BiasedLocking::revoke_at_safepoint(obj);
292 }
293 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
294 }
295
296 slow_enter(obj, lock, THREAD);
297 }
298
299 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) {
300 markOop mark = object->mark();
301 if (EnableValhalla && mark->is_always_locked()) {
302 return;
303 }
304 assert(!EnableValhalla || !object->klass()->is_value(), "monitor op on value type");
305 // We cannot check for Biased Locking if we are racing an inflation.
306 assert(mark == markOopDesc::INFLATING() ||
307 !mark->has_bias_pattern(), "should not see bias pattern here");
308
309 markOop dhw = lock->displaced_header();
310 if (dhw == NULL) {
311 // If the displaced header is NULL, then this exit matches up with
312 // a recursive enter. No real work to do here except for diagnostics.
313 #ifndef PRODUCT
314 if (mark != markOopDesc::INFLATING()) {
315 // Only do diagnostics if we are not racing an inflation. Simply
316 // exiting a recursive enter of a Java Monitor that is being
317 // inflated is safe; see the has_monitor() comment below.
318 assert(!mark->is_neutral(), "invariant");
319 assert(!mark->has_locker() ||
320 THREAD->is_lock_owned((address)mark->locker()), "invariant");
321 if (mark->has_monitor()) {
322 // The BasicLock's displaced_header is marked as a recursive
323 // enter and we have an inflated Java Monitor (ObjectMonitor).
324 // This is a special case where the Java Monitor was inflated
341 // swing the displaced header from the BasicLock back to the mark.
342 assert(dhw->is_neutral(), "invariant");
343 if (object->cas_set_mark(dhw, mark) == mark) {
344 TEVENT(fast_exit: release stack-lock);
345 return;
346 }
347 }
348
349 // We have to take the slow-path of possible inflation and then exit.
350 ObjectSynchronizer::inflate(THREAD,
351 object,
352 inflate_cause_vm_internal)->exit(true, THREAD);
353 }
354
355 // -----------------------------------------------------------------------------
356 // Interpreter/Compiler Slow Case
357 // This routine is used to handle interpreter/compiler slow case
358 // We don't need to use fast path here, because it must have been
359 // failed in the interpreter/compiler code.
360 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
361 CHECK_THROW_NOSYNC_IMSE(obj);
362 markOop mark = obj->mark();
363 assert(!mark->has_bias_pattern(), "should not see bias pattern here");
364
365 if (mark->is_neutral()) {
366 // Anticipate successful CAS -- the ST of the displaced mark must
367 // be visible <= the ST performed by the CAS.
368 lock->set_displaced_header(mark);
369 if (mark == obj()->cas_set_mark((markOop) lock, mark)) {
370 TEVENT(slow_enter: release stacklock);
371 return;
372 }
373 // Fall through to inflate() ...
374 } else if (mark->has_locker() &&
375 THREAD->is_lock_owned((address)mark->locker())) {
376 assert(lock != mark->locker(), "must not re-lock the same lock");
377 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
378 lock->set_displaced_header(NULL);
379 return;
380 }
381
394 // failed in the interpreter/compiler code. Simply use the heavy
395 // weight monitor should be ok, unless someone find otherwise.
396 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
397 fast_exit(object, lock, THREAD);
398 }
399
400 // -----------------------------------------------------------------------------
401 // Class Loader support to workaround deadlocks on the class loader lock objects
402 // Also used by GC
403 // complete_exit()/reenter() are used to wait on a nested lock
404 // i.e. to give up an outer lock completely and then re-enter
405 // Used when holding nested locks - lock acquisition order: lock1 then lock2
406 // 1) complete_exit lock1 - saving recursion count
407 // 2) wait on lock2
408 // 3) when notified on lock2, unlock lock2
409 // 4) reenter lock1 with original recursion count
410 // 5) lock lock2
411 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
412 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
413 TEVENT(complete_exit);
414 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
415 if (UseBiasedLocking) {
416 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
417 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
418 }
419
420 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
421 obj(),
422 inflate_cause_vm_internal);
423
424 return monitor->complete_exit(THREAD);
425 }
426
427 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
428 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
429 TEVENT(reenter);
430 assert(!EnableValhalla || !obj->klass()->is_value(), "monitor op on value type");
431 if (UseBiasedLocking) {
432 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
433 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
434 }
435
436 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
437 obj(),
438 inflate_cause_vm_internal);
439
440 monitor->reenter(recursion, THREAD);
441 }
442 // -----------------------------------------------------------------------------
443 // JNI locks on java objects
444 // NOTE: must use heavy weight monitor to handle jni monitor enter
445 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
446 // the current locking is from JNI instead of Java code
447 TEVENT(jni_enter);
448 CHECK_THROW_NOSYNC_IMSE(obj);
449 if (UseBiasedLocking) {
450 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
451 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
452 }
453 THREAD->set_current_pending_monitor_is_from_java(false);
454 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
455 THREAD->set_current_pending_monitor_is_from_java(true);
456 }
457
458 // NOTE: must use heavy weight monitor to handle jni monitor exit
459 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
460 TEVENT(jni_exit);
461 CHECK_THROW_NOSYNC_IMSE(obj);
462 if (UseBiasedLocking) {
463 Handle h_obj(THREAD, obj);
464 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
465 obj = h_obj();
466 }
467 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
468
469 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
470 obj,
471 inflate_cause_jni_exit);
472 // If this thread has locked the object, exit the monitor. Note: can't use
473 // monitor->check(CHECK); must exit even if an exception is pending.
474 if (monitor->check(THREAD)) {
475 monitor->exit(true, THREAD);
476 }
477 }
478
479 // -----------------------------------------------------------------------------
480 // Internal VM locks on java objects
481 // standard constructor, allows locking failures
486 _obj = obj;
487
488 if (_dolock) {
489 TEVENT(ObjectLocker);
490
491 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread);
492 }
493 }
494
495 ObjectLocker::~ObjectLocker() {
496 if (_dolock) {
497 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread);
498 }
499 }
500
501
502 // -----------------------------------------------------------------------------
503 // Wait/Notify/NotifyAll
504 // NOTE: must use heavy weight monitor to handle wait()
505 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
506 CHECK_THROW_NOSYNC_IMSE_0(obj);
507 if (UseBiasedLocking) {
508 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
509 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
510 }
511 if (millis < 0) {
512 TEVENT(wait - throw IAX);
513 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
514 }
515 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
516 obj(),
517 inflate_cause_wait);
518
519 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
520 monitor->wait(millis, true, THREAD);
521
522 // This dummy call is in place to get around dtrace bug 6254741. Once
523 // that's fixed we can uncomment the following line, remove the call
524 // and change this function back into a "void" func.
525 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
526 return dtrace_waited_probe(monitor, obj, THREAD);
527 }
528
529 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
530 CHECK_THROW_NOSYNC_IMSE(obj);
531 if (UseBiasedLocking) {
532 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
533 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
534 }
535 if (millis < 0) {
536 TEVENT(wait - throw IAX);
537 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
538 }
539 ObjectSynchronizer::inflate(THREAD,
540 obj(),
541 inflate_cause_wait)->wait(millis, false, THREAD);
542 }
543
544 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
545 CHECK_THROW_NOSYNC_IMSE(obj);
546 if (UseBiasedLocking) {
547 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
548 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
549 }
550
551 markOop mark = obj->mark();
552 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
553 return;
554 }
555 ObjectSynchronizer::inflate(THREAD,
556 obj(),
557 inflate_cause_notify)->notify(THREAD);
558 }
559
560 // NOTE: see comment of notify()
561 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
562 CHECK_THROW_NOSYNC_IMSE(obj);
563 if (UseBiasedLocking) {
564 BiasedLocking::revoke_and_rebias(obj, false, THREAD);
565 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
566 }
567
568 markOop mark = obj->mark();
569 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) {
570 return;
571 }
572 ObjectSynchronizer::inflate(THREAD,
573 obj(),
574 inflate_cause_notify)->notifyAll(THREAD);
575 }
576
577 // -----------------------------------------------------------------------------
578 // Hash Code handling
579 //
580 // Performance concern:
581 // OrderAccess::storestore() calls release() which at one time stored 0
582 // into the global volatile OrderAccess::dummy variable. This store was
721 // likely make this the default in future releases.
722 unsigned t = Self->_hashStateX;
723 t ^= (t << 11);
724 Self->_hashStateX = Self->_hashStateY;
725 Self->_hashStateY = Self->_hashStateZ;
726 Self->_hashStateZ = Self->_hashStateW;
727 unsigned v = Self->_hashStateW;
728 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
729 Self->_hashStateW = v;
730 value = v;
731 }
732
733 value &= markOopDesc::hash_mask;
734 if (value == 0) value = 0xBAD;
735 assert(value != markOopDesc::no_hash, "invariant");
736 TEVENT(hashCode: GENERATE);
737 return value;
738 }
739
740 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
741 if (EnableValhalla && obj->klass()->is_value()) {
742 // Expected tooling to override hashCode for value type, just don't crash
743 if (log_is_enabled(Debug, monitorinflation)) {
744 ResourceMark rm;
745 log_debug(monitorinflation)("FastHashCode for value type: %s", obj->klass()->external_name());
746 }
747 return obj->klass()->java_mirror()->identity_hash();
748 }
749 if (UseBiasedLocking) {
750 // NOTE: many places throughout the JVM do not expect a safepoint
751 // to be taken here, in particular most operations on perm gen
752 // objects. However, we only ever bias Java instances and all of
753 // the call sites of identity_hash that might revoke biases have
754 // been checked to make sure they can handle a safepoint. The
755 // added check of the bias pattern is to avoid useless calls to
756 // thread-local storage.
757 if (obj->mark()->has_bias_pattern()) {
758 // Handle for oop obj in case of STW safepoint
759 Handle hobj(Self, obj);
760 // Relaxing assertion for bug 6320749.
761 assert(Universe::verify_in_progress() ||
762 !SafepointSynchronize::is_at_safepoint(),
763 "biases should not be seen by VM thread here");
764 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
765 obj = hobj();
766 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
767 }
768 }
833 assert(mark->is_neutral(), "invariant");
834 hash = mark->hash();
835 if (hash == 0) {
836 hash = get_next_hash(Self, obj);
837 temp = mark->copy_set_hash(hash); // merge hash code into header
838 assert(temp->is_neutral(), "invariant");
839 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
840 if (test != mark) {
841 // The only update to the header in the monitor (outside GC)
842 // is install the hash code. If someone add new usage of
843 // displaced header, please update this code
844 hash = test->hash();
845 assert(test->is_neutral(), "invariant");
846 assert(hash != 0, "Trivial unexpected object/monitor header usage.");
847 }
848 }
849 // We finally get the hash
850 return hash;
851 }
852
853
854 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
855 Handle h_obj) {
856 if (EnableValhalla && h_obj->mark()->is_always_locked()) {
857 return false;
858 }
859 if (UseBiasedLocking) {
860 BiasedLocking::revoke_and_rebias(h_obj, false, thread);
861 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now");
862 }
863
864 assert(thread == JavaThread::current(), "Can only be called on current thread");
865 oop obj = h_obj();
866
867 markOop mark = ReadStableMark(obj);
868
869 // Uncontended case, header points to stack
870 if (mark->has_locker()) {
871 return thread->is_lock_owned((address)mark->locker());
872 }
873 // Contended case, header points to ObjectMonitor (tagged pointer)
874 if (mark->has_monitor()) {
875 ObjectMonitor* monitor = mark->monitor();
876 return monitor->is_entered(thread) != 0;
877 }
878 // Unlocked case, header in place
1399 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1400 markOop mark = obj->mark();
1401 if (mark->has_monitor()) {
1402 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1403 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");
1404 return mark->monitor();
1405 }
1406 return ObjectSynchronizer::inflate(Thread::current(),
1407 obj,
1408 inflate_cause_vm_internal);
1409 }
1410
1411 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1412 oop object,
1413 const InflateCause cause) {
1414
1415 // Inflate mutates the heap ...
1416 // Relaxing assertion for bug 6320749.
1417 assert(Universe::verify_in_progress() ||
1418 !SafepointSynchronize::is_at_safepoint(), "invariant");
1419
1420 if (EnableValhalla) {
1421 guarantee(!object->klass()->is_value(), "Attempt to inflate value type");
1422 }
1423
1424 EventJavaMonitorInflate event;
1425
1426 for (;;) {
1427 const markOop mark = object->mark();
1428 assert(!mark->has_bias_pattern(), "invariant");
1429
1430 // The mark can be in one of the following states:
1431 // * Inflated - just return
1432 // * Stack-locked - coerce it to inflated
1433 // * INFLATING - busy wait for conversion to complete
1434 // * Neutral - aggressively inflate the object.
1435 // * BIASED - Illegal. We should never see this
1436
1437 // CASE: inflated
1438 if (mark->has_monitor()) {
1439 ObjectMonitor * inf = mark->monitor();
1440 assert(inf->header()->is_neutral(), "invariant");
1441 assert(inf->object() == object, "invariant");
1442 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
|