20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "logging/log.hpp"
28 #include "logging/logStream.hpp"
29 #include "jfr/jfrEvents.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/metaspaceShared.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/interfaceSupport.inline.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "runtime/objectMonitor.hpp"
43 #include "runtime/objectMonitor.inline.hpp"
44 #include "runtime/osThread.hpp"
45 #include "runtime/safepointVerifiers.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "runtime/synchronizer.hpp"
49 #include "runtime/thread.inline.hpp"
50 #include "runtime/timer.hpp"
51 #include "runtime/vframe.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "utilities/align.hpp"
54 #include "utilities/dtrace.hpp"
55 #include "utilities/events.hpp"
56 #include "utilities/preserveException.hpp"
57
58 // The "core" versions of monitor enter and exit reside in this file.
59 // The interpreter and compilers contain specialized transliterated
60 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
61 // for instance. If you make changes here, make sure to modify the
62 // interpreter, and both C1 and C2 fast-path inline locking code emission.
63 //
64 // -----------------------------------------------------------------------------
101 }
102
103 #else // ndef DTRACE_ENABLED
104
105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
107
108 #endif // ndef DTRACE_ENABLED
109
110 // This exists only as a workaround of dtrace bug 6254741
111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
121
122 struct ObjectMonitorListGlobals {
123 char _pad_prefix[OM_CACHE_LINE_SIZE];
124 // These are highly shared list related variables.
125 // To avoid false-sharing they need to be the sole occupants of a cache line.
126
127 // Global ObjectMonitor free list. Newly allocated and deflated
128 // ObjectMonitors are prepended here.
129 ObjectMonitor* _free_list;
130 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
131
132 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
133 // ObjectMonitors on its per-thread in-use list are prepended here.
134 ObjectMonitor* _in_use_list;
135 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
136
137 int _free_count; // # on free_list
138 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(int));
139
140 int _in_use_count; // # on in_use_list
141 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
142
143 int _population; // # Extant -- in circulation
144 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
145 };
146 static ObjectMonitorListGlobals om_list_globals;
147
148 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
149
150
151 // =====================> Spin-lock functions
152
153 // ObjectMonitors are not lockable outside of this file. We use spin-locks
154 // implemented using a bit in the _next_om field instead of the heavier
155 // weight locking mechanisms for faster list management.
156
157 #define OM_LOCK_BIT 0x1
158
159 // Return true if the ObjectMonitor is locked.
160 // Otherwise returns false.
161 static bool is_locked(ObjectMonitor* om) {
162 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT;
163 }
164
282 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1);
283 break;
284 }
285 // Implied else: try it all again
286 }
287
288 // Second we handle om_list_globals._free_list:
289 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
290 &om_list_globals._free_list, &om_list_globals._free_count);
291 }
292
293 // Prepend a list of ObjectMonitors to om_list_globals._free_list.
294 // 'tail' is the last ObjectMonitor in the list and there are 'count'
295 // on the list. Also updates om_list_globals._free_count.
296 static void prepend_list_to_global_free_list(ObjectMonitor* list,
297 ObjectMonitor* tail, int count) {
298 prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
299 &om_list_globals._free_count);
300 }
301
302 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
303 // 'tail' is the last ObjectMonitor in the list and there are 'count'
304 // on the list. Also updates om_list_globals._in_use_list.
305 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
306 ObjectMonitor* tail, int count) {
307 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
308 &om_list_globals._in_use_count);
309 }
310
311 // Prepend an ObjectMonitor to the specified list. Also updates
312 // the specified counter.
313 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
314 int* count_p) {
315 while (true) {
316 om_lock(m); // Lock m so we can safely update its next field.
317 ObjectMonitor* cur = NULL;
318 // Lock the list head to guard against races with a list walker
319 // thread:
320 if ((cur = get_list_head_locked(list_p)) != NULL) {
321 // List head is now locked so we can safely switch it.
322 m->set_next_om(cur); // m now points to cur (and unlocks m)
323 Atomic::store(list_p, m); // Switch list head to unlocked m.
324 om_unlock(cur);
325 break;
326 }
327 // The list is empty so try to set the list head.
328 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
329 m->set_next_om(cur); // m now points to NULL (and unlocks m)
330 if (Atomic::cmpxchg(list_p, cur, m) == cur) {
331 // List head is now unlocked m.
332 break;
333 }
334 // Implied else: try it all again
335 }
336 Atomic::inc(count_p);
337 }
338
339 // Prepend an ObjectMonitor to a per-thread om_free_list.
340 // Also updates the per-thread om_free_count.
341 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
342 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
343 }
344
345 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
346 // Also updates the per-thread om_in_use_count.
347 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
348 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
349 }
350
351 // Take an ObjectMonitor from the start of the specified list. Also
352 // decrements the specified counter. Returns NULL if none are available.
353 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
354 int* count_p) {
355 ObjectMonitor* take = NULL;
356 // Lock the list head to guard against races with a list walker
357 // thread:
358 if ((take = get_list_head_locked(list_p)) == NULL) {
359 return NULL; // None are available.
360 }
361 ObjectMonitor* next = unmarked_next(take);
362 // Switch locked list head to next (which unlocks the list head, but
363 // leaves take locked):
364 Atomic::store(list_p, next);
365 Atomic::dec(count_p);
366 // Unlock take, but leave the next value for any lagging list
367 // walkers. It will get cleaned up when take is prepended to
368 // the in-use list:
369 om_unlock(take);
370 return take;
371 }
372
373 // Take an ObjectMonitor from the start of the om_list_globals._free_list.
374 // Also updates om_list_globals._free_count. Returns NULL if none are
375 // available.
376 static ObjectMonitor* take_from_start_of_global_free_list() {
377 return take_from_start_of_common(&om_list_globals._free_list,
446 }
447
448 // biased locking and any other IMS exception states take the slow-path
449 return false;
450 }
451
452
453 // The LockNode emitted directly at the synchronization site would have
454 // been too big if it were to have included support for the cases of inflated
455 // recursive enter and exit, so they go here instead.
456 // Note that we can't safely call AsyncPrintJavaStack() from within
457 // quick_enter() as our thread state remains _in_Java.
458
459 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
460 BasicLock * lock) {
461 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
462 assert(self->is_Java_thread(), "invariant");
463 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
464 NoSafepointVerifier nsv;
465 if (obj == NULL) return false; // Need to throw NPE
466 const markWord mark = obj->mark();
467
468 if (mark.has_monitor()) {
469 ObjectMonitor* const m = mark.monitor();
470 assert(m->object() == obj, "invariant");
471 Thread* const owner = (Thread *) m->_owner;
472
473 // Lock contention and Transactional Lock Elision (TLE) diagnostics
474 // and observability
475 // Case: light contention possibly amenable to TLE
476 // Case: TLE inimical operations such as nested/recursive synchronization
477
478 if (owner == self) {
479 m->_recursions++;
480 return true;
481 }
482
483 // This Java Monitor is inflated so obj's header will never be
484 // displaced to this thread's BasicLock. Make the displaced header
485 // non-NULL so this BasicLock is not seen as recursive nor as
486 // being locked. We do this unconditionally so that this thread's
487 // BasicLock cannot be mis-interpreted by any stack walkers. For
488 // performance reasons, stack walkers generally first check for
489 // Biased Locking in the object's header, the second check is for
490 // stack-locking in the object's header, the third check is for
491 // recursive stack-locking in the displaced header in the BasicLock,
492 // and last are the inflated Java Monitor (ObjectMonitor) checks.
493 lock->set_displaced_header(markWord::unused_mark());
494
495 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
496 assert(m->_recursions == 0, "invariant");
497 return true;
498 }
499 }
500
501 // Note that we could inflate in quick_enter.
502 // This is likely a useful optimization
503 // Critically, in quick_enter() we must not:
504 // -- perform bias revocation, or
505 // -- block indefinitely, or
506 // -- reach a safepoint
507
508 return false; // revert to slow-path
509 }
510
511 // -----------------------------------------------------------------------------
512 // Monitor Enter/Exit
513 // The interpreter and compiler assembly code tries to lock using the fast path
514 // of this algorithm. Make sure to update that code if the following function is
515 // changed. The implementation is extremely sensitive to race condition. Be careful.
516
517 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
518 if (UseBiasedLocking) {
530 // Anticipate successful CAS -- the ST of the displaced mark must
531 // be visible <= the ST performed by the CAS.
532 lock->set_displaced_header(mark);
533 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
534 return;
535 }
536 // Fall through to inflate() ...
537 } else if (mark.has_locker() &&
538 THREAD->is_lock_owned((address)mark.locker())) {
539 assert(lock != mark.locker(), "must not re-lock the same lock");
540 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
541 lock->set_displaced_header(markWord::from_pointer(NULL));
542 return;
543 }
544
545 // The object header will never be displaced to this lock,
546 // so it does not matter what the value is, except that it
547 // must be non-zero to avoid looking like a re-entrant lock,
548 // and must not look locked either.
549 lock->set_displaced_header(markWord::unused_mark());
550 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
551 }
552
553 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
554 markWord mark = object->mark();
555 // We cannot check for Biased Locking if we are racing an inflation.
556 assert(mark == markWord::INFLATING() ||
557 !mark.has_bias_pattern(), "should not see bias pattern here");
558
559 markWord dhw = lock->displaced_header();
560 if (dhw.value() == 0) {
561 // If the displaced header is NULL, then this exit matches up with
562 // a recursive enter. No real work to do here except for diagnostics.
563 #ifndef PRODUCT
564 if (mark != markWord::INFLATING()) {
565 // Only do diagnostics if we are not racing an inflation. Simply
566 // exiting a recursive enter of a Java Monitor that is being
567 // inflated is safe; see the has_monitor() comment below.
568 assert(!mark.is_neutral(), "invariant");
569 assert(!mark.has_locker() ||
570 THREAD->is_lock_owned((address)mark.locker()), "invariant");
579 // does not own the Java Monitor.
580 ObjectMonitor* m = mark.monitor();
581 assert(((oop)(m->object()))->mark() == mark, "invariant");
582 assert(m->is_entered(THREAD), "invariant");
583 }
584 }
585 #endif
586 return;
587 }
588
589 if (mark == markWord::from_pointer(lock)) {
590 // If the object is stack-locked by the current thread, try to
591 // swing the displaced header from the BasicLock back to the mark.
592 assert(dhw.is_neutral(), "invariant");
593 if (object->cas_set_mark(dhw, mark) == mark) {
594 return;
595 }
596 }
597
598 // We have to take the slow-path of possible inflation and then exit.
599 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
600 }
601
602 // -----------------------------------------------------------------------------
603 // Class Loader support to workaround deadlocks on the class loader lock objects
604 // Also used by GC
605 // complete_exit()/reenter() are used to wait on a nested lock
606 // i.e. to give up an outer lock completely and then re-enter
607 // Used when holding nested locks - lock acquisition order: lock1 then lock2
608 // 1) complete_exit lock1 - saving recursion count
609 // 2) wait on lock2
610 // 3) when notified on lock2, unlock lock2
611 // 4) reenter lock1 with original recursion count
612 // 5) lock lock2
613 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
614 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
615 if (UseBiasedLocking) {
616 BiasedLocking::revoke(obj, THREAD);
617 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
618 }
619
620 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
621
622 return monitor->complete_exit(THREAD);
623 }
624
625 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
626 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
627 if (UseBiasedLocking) {
628 BiasedLocking::revoke(obj, THREAD);
629 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
630 }
631
632 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
633
634 monitor->reenter(recursions, THREAD);
635 }
636 // -----------------------------------------------------------------------------
637 // JNI locks on java objects
638 // NOTE: must use heavy weight monitor to handle jni monitor enter
639 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
640 // the current locking is from JNI instead of Java code
641 if (UseBiasedLocking) {
642 BiasedLocking::revoke(obj, THREAD);
643 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
644 }
645 THREAD->set_current_pending_monitor_is_from_java(false);
646 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
647 THREAD->set_current_pending_monitor_is_from_java(true);
648 }
649
650 // NOTE: must use heavy weight monitor to handle jni monitor exit
651 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
652 if (UseBiasedLocking) {
653 Handle h_obj(THREAD, obj);
654 BiasedLocking::revoke(h_obj, THREAD);
655 obj = h_obj();
656 }
657 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
658
659 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
660 // If this thread has locked the object, exit the monitor. We
661 // intentionally do not use CHECK here because we must exit the
662 // monitor even if an exception is pending.
663 if (monitor->check_owner(THREAD)) {
664 monitor->exit(true, THREAD);
665 }
666 }
667
668 // -----------------------------------------------------------------------------
669 // Internal VM locks on java objects
670 // standard constructor, allows locking failures
671 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
672 _dolock = do_lock;
673 _thread = thread;
674 _thread->check_for_valid_safepoint_state();
675 _obj = obj;
676
677 if (_dolock) {
678 ObjectSynchronizer::enter(_obj, &_lock, _thread);
679 }
680 }
681
682 ObjectLocker::~ObjectLocker() {
683 if (_dolock) {
684 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
685 }
686 }
687
688
689 // -----------------------------------------------------------------------------
690 // Wait/Notify/NotifyAll
691 // NOTE: must use heavy weight monitor to handle wait()
692 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
693 if (UseBiasedLocking) {
694 BiasedLocking::revoke(obj, THREAD);
695 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
696 }
697 if (millis < 0) {
698 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
699 }
700 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
701
702 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
703 monitor->wait(millis, true, THREAD);
704
705 // This dummy call is in place to get around dtrace bug 6254741. Once
706 // that's fixed we can uncomment the following line, remove the call
707 // and change this function back into a "void" func.
708 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
709 return dtrace_waited_probe(monitor, obj, THREAD);
710 }
711
712 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
713 if (UseBiasedLocking) {
714 BiasedLocking::revoke(obj, THREAD);
715 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
716 }
717 if (millis < 0) {
718 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
719 }
720 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
721 }
722
723 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
724 if (UseBiasedLocking) {
725 BiasedLocking::revoke(obj, THREAD);
726 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
727 }
728
729 markWord mark = obj->mark();
730 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
731 return;
732 }
733 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
734 }
735
736 // NOTE: see comment of notify()
737 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
738 if (UseBiasedLocking) {
739 BiasedLocking::revoke(obj, THREAD);
740 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
741 }
742
743 markWord mark = obj->mark();
744 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
745 return;
746 }
747 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
748 }
749
750 // -----------------------------------------------------------------------------
751 // Hash Code handling
752 //
753 // Performance concern:
754 // OrderAccess::storestore() calls release() which at one time stored 0
755 // into the global volatile OrderAccess::dummy variable. This store was
756 // unnecessary for correctness. Many threads storing into a common location
757 // causes considerable cache migration or "sloshing" on large SMP systems.
758 // As such, I avoided using OrderAccess::storestore(). In some cases
759 // OrderAccess::fence() -- which incurs local latency on the executing
760 // processor -- is a better choice as it scales on SMP systems.
761 //
762 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
763 // a discussion of coherency costs. Note that all our current reference
764 // platforms provide strong ST-ST order, so the issue is moot on IA32,
765 // x64, and SPARC.
766 //
767 // As a general policy we use "volatile" to control compiler-based reordering
920 Handle hobj(self, obj);
921 // Relaxing assertion for bug 6320749.
922 assert(Universe::verify_in_progress() ||
923 !SafepointSynchronize::is_at_safepoint(),
924 "biases should not be seen by VM thread here");
925 BiasedLocking::revoke(hobj, JavaThread::current());
926 obj = hobj();
927 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
928 }
929 }
930
931 // hashCode() is a heap mutator ...
932 // Relaxing assertion for bug 6320749.
933 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
934 !SafepointSynchronize::is_at_safepoint(), "invariant");
935 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
936 self->is_Java_thread() , "invariant");
937 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
938 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
939
940 ObjectMonitor* monitor = NULL;
941 markWord temp, test;
942 intptr_t hash;
943 markWord mark = read_stable_mark(obj);
944
945 // object should remain ineligible for biased locking
946 assert(!mark.has_bias_pattern(), "invariant");
947
948 if (mark.is_neutral()) { // if this is a normal header
949 hash = mark.hash();
950 if (hash != 0) { // if it has a hash, just return it
951 return hash;
952 }
953 hash = get_next_hash(self, obj); // get a new hash
954 temp = mark.copy_set_hash(hash); // merge the hash into header
955 // try to install the hash
956 test = obj->cas_set_mark(temp, mark);
957 if (test == mark) { // if the hash was installed, return it
958 return hash;
959 }
960 // Failed to install the hash. It could be that another thread
961 // installed the hash just before our attempt or inflation has
962 // occurred or... so we fall thru to inflate the monitor for
963 // stability and then install the hash.
964 } else if (mark.has_monitor()) {
965 monitor = mark.monitor();
966 temp = monitor->header();
967 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
968 hash = temp.hash();
969 if (hash != 0) { // if it has a hash, just return it
970 return hash;
971 }
972 // Fall thru so we only have one place that installs the hash in
973 // the ObjectMonitor.
974 } else if (self->is_lock_owned((address)mark.locker())) {
975 // This is a stack lock owned by the calling thread so fetch the
976 // displaced markWord from the BasicLock on the stack.
977 temp = mark.displaced_mark_helper();
978 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
979 hash = temp.hash();
980 if (hash != 0) { // if it has a hash, just return it
981 return hash;
982 }
983 // WARNING:
984 // The displaced header in the BasicLock on a thread's stack
985 // is strictly immutable. It CANNOT be changed in ANY cases.
986 // So we have to inflate the stack lock into an ObjectMonitor
987 // even if the current thread owns the lock. The BasicLock on
988 // a thread's stack can be asynchronously read by other threads
989 // during an inflate() call so any change to that stack memory
990 // may not propagate to other threads correctly.
991 }
992
993 // Inflate the monitor to set the hash.
994 monitor = inflate(self, obj, inflate_cause_hash_code);
995 // Load ObjectMonitor's header/dmw field and see if it has a hash.
996 mark = monitor->header();
997 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
998 hash = mark.hash();
999 if (hash == 0) { // if it does not have a hash
1000 hash = get_next_hash(self, obj); // get a new hash
1001 temp = mark.copy_set_hash(hash); // merge the hash into header
1002 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1003 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1004 test = markWord(v);
1005 if (test != mark) {
1006 // The attempt to update the ObjectMonitor's header/dmw field
1007 // did not work. This can happen if another thread managed to
1008 // merge in the hash just before our cmpxchg().
1009 // If we add any new usages of the header/dmw field, this code
1010 // will need to be updated.
1011 hash = test.hash();
1012 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1013 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1014 }
1015 }
1016 // We finally get the hash.
1017 return hash;
1018 }
1019
1020 // Deprecated -- use FastHashCode() instead.
1021
1022 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1023 return FastHashCode(Thread::current(), obj());
1024 }
1025
1026
1027 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1028 Handle h_obj) {
1029 if (UseBiasedLocking) {
1030 BiasedLocking::revoke(h_obj, thread);
1031 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1032 }
1033
1034 assert(thread == JavaThread::current(), "Can only be called on current thread");
1035 oop obj = h_obj();
1036
1037 markWord mark = read_stable_mark(obj);
1038
1039 // Uncontended case, header points to stack
1040 if (mark.has_locker()) {
1041 return thread->is_lock_owned((address)mark.locker());
1042 }
1043 // Contended case, header points to ObjectMonitor (tagged pointer)
1044 if (mark.has_monitor()) {
1045 ObjectMonitor* monitor = mark.monitor();
1046 return monitor->is_entered(thread) != 0;
1047 }
1048 // Unlocked case, header in place
1049 assert(mark.is_neutral(), "sanity check");
1050 return false;
1051 }
1052
1053 // Be aware of this method could revoke bias of the lock object.
1054 // This method queries the ownership of the lock handle specified by 'h_obj'.
1055 // If the current thread owns the lock, it returns owner_self. If no
1056 // thread owns the lock, it returns owner_none. Otherwise, it will return
1057 // owner_other.
1058 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1059 (JavaThread *self, Handle h_obj) {
1060 // The caller must beware this method can revoke bias, and
1061 // revocation can result in a safepoint.
1062 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
1063 assert(self->thread_state() != _thread_blocked, "invariant");
1064
1065 // Possible mark states: neutral, biased, stack-locked, inflated
1066
1067 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
1068 // CASE: biased
1069 BiasedLocking::revoke(h_obj, self);
1070 assert(!h_obj->mark().has_bias_pattern(),
1071 "biases should be revoked by now");
1072 }
1073
1074 assert(self == JavaThread::current(), "Can only be called on current thread");
1075 oop obj = h_obj();
1076 markWord mark = read_stable_mark(obj);
1077
1078 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
1079 if (mark.has_locker()) {
1080 return self->is_lock_owned((address)mark.locker()) ?
1081 owner_self : owner_other;
1082 }
1083
1084 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
1085 // The Object:ObjectMonitor relationship is stable as long as we're
1086 // not at a safepoint.
1087 if (mark.has_monitor()) {
1088 void* owner = mark.monitor()->_owner;
1089 if (owner == NULL) return owner_none;
1090 return (owner == self ||
1091 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1092 }
1093
1094 // CASE: neutral
1095 assert(mark.is_neutral(), "sanity check");
1096 return owner_none; // it's unlocked
1097 }
1098
1099 // FIXME: jvmti should call this
1100 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1101 if (UseBiasedLocking) {
1102 if (SafepointSynchronize::is_at_safepoint()) {
1103 BiasedLocking::revoke_at_safepoint(h_obj);
1104 } else {
1105 BiasedLocking::revoke(h_obj, JavaThread::current());
1106 }
1107 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1108 }
1109
1110 oop obj = h_obj();
1111 address owner = NULL;
1112
1113 markWord mark = read_stable_mark(obj);
1114
1115 // Uncontended case, header points to stack
1116 if (mark.has_locker()) {
1117 owner = (address) mark.locker();
1118 }
1119
1120 // Contended case, header points to ObjectMonitor (tagged pointer)
1121 else if (mark.has_monitor()) {
1122 ObjectMonitor* monitor = mark.monitor();
1123 assert(monitor != NULL, "monitor should be non-null");
1124 owner = (address) monitor->owner();
1125 }
1126
1127 if (owner != NULL) {
1128 // owning_thread_from_monitor_owner() may also return NULL here
1129 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1130 }
1131
1132 // Unlocked case, header in place
1133 // Cannot have assertion since this object may have been
1134 // locked by another thread when reaching here.
1135 // assert(mark.is_neutral(), "sanity check");
1136
1137 return NULL;
1138 }
1139
1140 // Visitors ...
1141
1142 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1143 PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1144 while (block != NULL) {
1145 assert(block->object() == CHAINMARKER, "must be a block header");
1146 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1147 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1148 oop object = (oop)mid->object();
1149 if (object != NULL) {
1150 // Only process with closure if the object is set.
1151 closure->do_monitor(mid);
1152 }
1153 }
1154 // unmarked_next() is not needed with g_block_list (no locking
1155 // used with block linkage _next_om fields).
1156 block = (PaddedObjectMonitor*)block->next_om();
1157 }
1158 }
1159
1160 static bool monitors_used_above_threshold() {
1161 int population = Atomic::load(&om_list_globals._population);
1162 if (population == 0) {
1163 return false;
1164 }
1165 if (MonitorUsedDeflationThreshold > 0) {
1166 int monitors_used = population - Atomic::load(&om_list_globals._free_count);
1167 int monitor_usage = (monitors_used * 100LL) / population;
1168 return monitor_usage > MonitorUsedDeflationThreshold;
1169 }
1170 return false;
1171 }
1172
1173 // Returns true if MonitorBound is set (> 0) and if the specified
1174 // cnt is > MonitorBound. Otherwise returns false.
1175 static bool is_MonitorBound_exceeded(const int cnt) {
1176 const int mx = MonitorBound;
1177 return mx > 0 && cnt > mx;
1178 }
1179
1180 bool ObjectSynchronizer::is_cleanup_needed() {
1181 if (monitors_used_above_threshold()) {
1182 // Too many monitors in use.
1183 return true;
1184 }
1185 return needs_monitor_scavenge();
1186 }
1187
1188 bool ObjectSynchronizer::needs_monitor_scavenge() {
1189 if (Atomic::load(&_forceMonitorScavenge) == 1) {
1190 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1191 return true;
1192 }
1193 return false;
1194 }
1195
1196 void ObjectSynchronizer::oops_do(OopClosure* f) {
1197 // We only scan the global used list here (for moribund threads), and
1198 // the thread-local monitors in Thread::oops_do().
1199 global_used_oops_do(f);
1200 }
1201
1202 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1203 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1204 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
1205 }
1206
1207 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1208 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1209 list_oops_do(thread->om_in_use_list, f);
1210 }
1211
1212 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1213 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1214 // The oops_do() phase does not overlap with monitor deflation
1215 // so no need to lock ObjectMonitors for the list traversal.
1216 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1217 if (mid->object() != NULL) {
1218 f->do_oop((oop*)mid->object_addr());
1219 }
1220 }
1221 }
1222
1223
1224 // -----------------------------------------------------------------------------
1225 // ObjectMonitor Lifecycle
1226 // -----------------------
1227 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread
1228 // free list and associates them with objects. Deflation -- which occurs at
1229 // STW-time -- disassociates idle monitors from objects.
1230 // Such scavenged monitors are returned to the om_list_globals._free_list.
1231 //
1232 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1233 //
1234 // Lifecycle:
1235 // -- unassigned and on the om_list_globals._free_list
1236 // -- unassigned and on a per-thread free list
1237 // -- assigned to an object. The object is inflated and the mark refers
1238 // to the ObjectMonitor.
1239
1240
1241 // Constraining monitor pool growth via MonitorBound ...
1242 //
1243 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1244 //
1245 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
1246 // the rate of scavenging is driven primarily by GC. As such, we can find
1247 // an inordinate number of monitors in circulation.
1248 // To avoid that scenario we can artificially induce a STW safepoint
1249 // if the pool appears to be growing past some reasonable bound.
1250 // Generally we favor time in space-time tradeoffs, but as there's no
1251 // natural back-pressure on the # of extant monitors we need to impose some
1252 // type of limit. Beware that if MonitorBound is set to too low a value
1253 // we could just loop. In addition, if MonitorBound is set to a low value
1254 // we'll incur more safepoints, which are harmful to performance.
1255 // See also: GuaranteedSafepointInterval
1256 //
1257 // If MonitorBound is set, the boundry applies to
1258 // (om_list_globals._population - om_list_globals._free_count)
1259 // i.e., if there are not enough ObjectMonitors on the global free list,
1260 // then a safepoint deflation is induced. Picking a good MonitorBound value
1261 // is non-trivial.
1262
1263 static void InduceScavenge(Thread* self, const char * Whence) {
1264 // Induce STW safepoint to trim monitors
1265 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1266 // More precisely, trigger a cleanup safepoint as the number
1267 // of active monitors passes the specified threshold.
1268 // TODO: assert thread state is reasonable
1269
1270 if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
1271 VMThread::check_for_forced_cleanup();
1272 }
1273 }
1274
1275 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1276 // A large MAXPRIVATE value reduces both list lock contention
1277 // and list coherency traffic, but also tends to increase the
1278 // number of ObjectMonitors in circulation as well as the STW
1279 // scavenge costs. As usual, we lean toward time in space-time
1280 // tradeoffs.
1281 const int MAXPRIVATE = 1024;
1282 NoSafepointVerifier nsv;
1283
1284 stringStream ss;
1285 for (;;) {
1286 ObjectMonitor* m;
1287
1288 // 1: try to allocate from the thread's local om_free_list.
1289 // Threads will attempt to allocate first from their local list, then
1290 // from the global list, and only after those attempts fail will the
1291 // thread attempt to instantiate new monitors. Thread-local free lists
1292 // improve allocation latency, as well as reducing coherency traffic
1293 // on the shared global list.
1294 m = take_from_start_of_om_free_list(self);
1295 if (m != NULL) {
1296 guarantee(m->object() == NULL, "invariant");
1297 prepend_to_om_in_use_list(self, m);
1298 return m;
1299 }
1300
1301 // 2: try to allocate from the global om_list_globals._free_list
1302 // If we're using thread-local free lists then try
1303 // to reprovision the caller's free list.
1304 if (Atomic::load(&om_list_globals._free_list) != NULL) {
1305 // Reprovision the thread's om_free_list.
1306 // Use bulk transfers to reduce the allocation rate and heat
1307 // on various locks.
1308 for (int i = self->om_free_provision; --i >= 0;) {
1309 ObjectMonitor* take = take_from_start_of_global_free_list();
1310 if (take == NULL) {
1311 break; // No more are available.
1312 }
1313 guarantee(take->object() == NULL, "invariant");
1314 take->Recycle();
1315 om_release(self, take, false);
1316 }
1317 self->om_free_provision += 1 + (self->om_free_provision / 2);
1318 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1319
1320 if (is_MonitorBound_exceeded(Atomic::load(&om_list_globals._population) -
1321 Atomic::load(&om_list_globals._free_count))) {
1322 // Not enough ObjectMonitors on the global free list.
1323 // We can't safely induce a STW safepoint from om_alloc() as our thread
1324 // state may not be appropriate for such activities and callers may hold
1325 // naked oops, so instead we defer the action.
1326 InduceScavenge(self, "om_alloc");
1327 }
1328 continue;
1329 }
1330
1331 // 3: allocate a block of new ObjectMonitors
1332 // Both the local and global free lists are empty -- resort to malloc().
1333 // In the current implementation ObjectMonitors are TSM - immortal.
1334 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1335 // each ObjectMonitor to start at the beginning of a cache line,
1336 // so we use align_up().
1337 // A better solution would be to use C++ placement-new.
1338 // BEWARE: As it stands currently, we don't run the ctors!
1339 assert(_BLOCKSIZE > 1, "invariant");
1340 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1341 PaddedObjectMonitor* temp;
1342 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
1343 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1344 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1345 (void)memset((void *) temp, 0, neededsize);
1346
1347 // Format the block.
1348 // initialize the linked list, each monitor points to its next
1349 // forming the single linked free list, the very first monitor
1350 // will points to next block, which forms the block list.
1351 // The trick of using the 1st element in the block as g_block_list
1352 // linkage should be reconsidered. A better implementation would
1353 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1354
1355 for (int i = 1; i < _BLOCKSIZE; i++) {
1356 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]);
1357 }
1358
1359 // terminate the last monitor as the end of list
1360 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL);
1361
1362 // Element [0] is reserved for global list linkage
1363 temp[0].set_object(CHAINMARKER);
1364
1365 // Consider carving out this thread's current request from the
1366 // block in hand. This avoids some lock traffic and redundant
1367 // list activity.
1368
1369 prepend_block_to_lists(temp);
1370 }
1371 }
1372
1373 // Place "m" on the caller's private per-thread om_free_list.
1374 // In practice there's no need to clamp or limit the number of
1375 // monitors on a thread's om_free_list as the only non-allocation time
1376 // we'll call om_release() is to return a monitor to the free list after
1377 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1378 // accumulate on a thread's free list.
1379 //
1380 // Key constraint: all ObjectMonitors on a thread's free list and the global
1381 // free list must have their object field set to null. This prevents the
1382 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
1383 // are trying to release them.
1384
1385 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1386 bool from_per_thread_alloc) {
1387 guarantee(m->header().value() == 0, "invariant");
1388 guarantee(m->object() == NULL, "invariant");
1389 NoSafepointVerifier nsv;
1390
1391 stringStream ss;
1392 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1393 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1394 m->_recursions);
1395 // _next_om is used for both per-thread in-use and free lists so
1396 // we have to remove 'm' from the in-use list first (as needed).
1397 if (from_per_thread_alloc) {
1398 // Need to remove 'm' from om_in_use_list.
1399 ObjectMonitor* mid = NULL;
1400 ObjectMonitor* next = NULL;
1401
1402 // This list walk can only race with another list walker since
1403 // deflation can only happen at a safepoint so we don't have to
1404 // worry about an ObjectMonitor being removed from this list
1405 // while we are walking it.
1406
1407 // Lock the list head to avoid racing with another list walker.
1408 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1409 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1410 }
1411 next = unmarked_next(mid);
1412 if (m == mid) {
1413 // First special case:
1414 // 'm' matches mid, is the list head and is locked. Switch the list
1415 // head to next which unlocks the list head, but leaves the extracted
1416 // mid locked:
1417 Atomic::store(&self->om_in_use_list, next);
1418 } else if (m == next) {
1419 // Second special case:
1420 // 'm' matches next after the list head and we already have the list
1421 // head locked so set mid to what we are extracting:
1422 mid = next;
1423 // Lock mid to prevent races with a list walker:
1424 om_lock(mid);
1425 // Update next to what follows mid (if anything):
1426 next = unmarked_next(mid);
1427 // Switch next after the list head to new next which unlocks the
1428 // list head, but leaves the extracted mid locked:
1429 self->om_in_use_list->set_next_om(next);
1430 } else {
1431 // We have to search the list to find 'm'.
1432 om_unlock(mid); // unlock the list head
1433 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
1434 " is too short.", p2i(self), p2i(self->om_in_use_list));
1435 // Our starting anchor is next after the list head which is the
1436 // last ObjectMonitor we checked:
1437 ObjectMonitor* anchor = next;
1438 while ((mid = unmarked_next(anchor)) != NULL) {
1439 if (m == mid) {
1440 // We found 'm' on the per-thread in-use list so extract it.
1441 om_lock(anchor); // Lock the anchor so we can safely modify it.
1442 // Update next to what follows mid (if anything):
1443 next = unmarked_next(mid);
1444 // Switch next after the anchor to new next which unlocks the
1445 // anchor, but leaves the extracted mid locked:
1446 anchor->set_next_om(next);
1447 break;
1448 } else {
1449 anchor = mid;
1450 }
1451 }
1452 }
1453
1454 if (mid == NULL) {
1455 // Reached end of the list and didn't find 'm' so:
1456 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
1457 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
1458 }
1459
1460 // At this point mid is disconnected from the in-use list so
1461 // its lock no longer has any effects on the in-use list.
1462 Atomic::dec(&self->om_in_use_count);
1463 // Unlock mid, but leave the next value for any lagging list
1464 // walkers. It will get cleaned up when mid is prepended to
1465 // the thread's free list:
1466 om_unlock(mid);
1467 }
1468
1469 prepend_to_om_free_list(self, m);
1470 }
1471
1472 // Return ObjectMonitors on a moribund thread's free and in-use
1473 // lists to the appropriate global lists. The ObjectMonitors on the
1474 // per-thread in-use list may still be in use by other threads.
1475 //
1476 // We currently call om_flush() from Threads::remove() before the
1477 // thread has been excised from the thread list and is no longer a
1478 // mutator. This means that om_flush() cannot run concurrently with
1479 // a safepoint and interleave with deflate_idle_monitors(). In
1480 // particular, this ensures that the thread's in-use monitors are
1481 // scanned by a GC safepoint, either via Thread::oops_do() (before
1482 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1483 // om_flush() is called).
1484
1485 void ObjectSynchronizer::om_flush(Thread* self) {
1486 // Process the per-thread in-use list first to be consistent.
1487 int in_use_count = 0;
1488 ObjectMonitor* in_use_list = NULL;
1489 ObjectMonitor* in_use_tail = NULL;
1490 NoSafepointVerifier nsv;
1491
1492 // This function can race with a list walker thread so we lock the
1493 // list head to prevent confusion.
1494 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
1495 // At this point, we have locked the in-use list head so a racing
1496 // thread cannot come in after us. However, a racing thread could
1497 // be ahead of us; we'll detect that and delay to let it finish.
1498 //
1499 // The thread is going away, however the ObjectMonitors on the
1500 // om_in_use_list may still be in-use by other threads. Link
1501 // them to in_use_tail, which will be linked into the global
1502 // in-use list (om_list_globals._in_use_list) below.
1503 //
1504 // Account for the in-use list head before the loop since it is
1505 // already locked (by this thread):
1506 in_use_tail = in_use_list;
1507 in_use_count++;
1508 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL; cur_om = unmarked_next(cur_om)) {
1509 if (is_locked(cur_om)) {
1510 // cur_om is locked so there must be a racing walker thread ahead
1511 // of us so we'll give it a chance to finish.
1512 while (is_locked(cur_om)) {
1513 os::naked_short_sleep(1);
1514 }
1515 }
1516 in_use_tail = cur_om;
1517 in_use_count++;
1518 }
1519 guarantee(in_use_tail != NULL, "invariant");
1520 int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1521 assert(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1522 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1523 Atomic::store(&self->om_in_use_count, 0);
1524 // Clear the in-use list head (which also unlocks it):
1525 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1526 om_unlock(in_use_list);
1527 }
1528
1529 int free_count = 0;
1530 ObjectMonitor* free_list = NULL;
1531 ObjectMonitor* free_tail = NULL;
1532 // This function can race with a list walker thread so we lock the
1533 // list head to prevent confusion.
1534 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1535 // At this point, we have locked the free list head so a racing
1536 // thread cannot come in after us. However, a racing thread could
1537 // be ahead of us; we'll detect that and delay to let it finish.
1538 //
1539 // The thread is going away. Set 'free_tail' to the last per-thread free
1540 // monitor which will be linked to om_list_globals._free_list below.
1541 //
1542 // Account for the free list head before the loop since it is
1543 // already locked (by this thread):
1544 free_tail = free_list;
1545 free_count++;
1546 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) {
1547 if (is_locked(s)) {
1548 // s is locked so there must be a racing walker thread ahead
1549 // of us so we'll give it a chance to finish.
1550 while (is_locked(s)) {
1551 os::naked_short_sleep(1);
1552 }
1553 }
1554 free_tail = s;
1555 free_count++;
1556 guarantee(s->object() == NULL, "invariant");
1557 stringStream ss;
1558 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1559 }
1560 guarantee(free_tail != NULL, "invariant");
1561 int l_om_free_count = Atomic::load(&self->om_free_count);
1562 assert(l_om_free_count == free_count, "free counts don't match: "
1563 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1564 Atomic::store(&self->om_free_count, 0);
1565 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1566 om_unlock(free_list);
1567 }
1568
1569 if (free_tail != NULL) {
1570 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1571 }
1572
1573 if (in_use_tail != NULL) {
1574 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1575 }
1576
1577 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1578 LogStreamHandle(Info, monitorinflation) lsh_info;
1579 LogStream* ls = NULL;
1580 if (log_is_enabled(Debug, monitorinflation)) {
1581 ls = &lsh_debug;
1582 } else if ((free_count != 0 || in_use_count != 0) &&
1585 }
1586 if (ls != NULL) {
1587 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1588 ", in_use_count=%d" ", om_free_provision=%d",
1589 p2i(self), free_count, in_use_count, self->om_free_provision);
1590 }
1591 }
1592
1593 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1594 const oop obj,
1595 ObjectSynchronizer::InflateCause cause) {
1596 assert(event != NULL, "invariant");
1597 assert(event->should_commit(), "invariant");
1598 event->set_monitorClass(obj->klass());
1599 event->set_address((uintptr_t)(void*)obj);
1600 event->set_cause((u1)cause);
1601 event->commit();
1602 }
1603
1604 // Fast path code shared by multiple functions
1605 void ObjectSynchronizer::inflate_helper(oop obj) {
1606 markWord mark = obj->mark();
1607 if (mark.has_monitor()) {
1608 assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
1609 assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
1610 return;
1611 }
1612 inflate(Thread::current(), obj, inflate_cause_vm_internal);
1613 }
1614
1615 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
1616 oop object, const InflateCause cause) {
1617 // Inflate mutates the heap ...
1618 // Relaxing assertion for bug 6320749.
1619 assert(Universe::verify_in_progress() ||
1620 !SafepointSynchronize::is_at_safepoint(), "invariant");
1621
1622 EventJavaMonitorInflate event;
1623
1624 for (;;) {
1625 const markWord mark = object->mark();
1626 assert(!mark.has_bias_pattern(), "invariant");
1627
1628 // The mark can be in one of the following states:
1629 // * Inflated - just return
1630 // * Stack-locked - coerce it to inflated
1631 // * INFLATING - busy wait for conversion to complete
1632 // * Neutral - aggressively inflate the object.
1633 // * BIASED - Illegal. We should never see this
1634
1635 // CASE: inflated
1636 if (mark.has_monitor()) {
1637 ObjectMonitor* inf = mark.monitor();
1638 markWord dmw = inf->header();
1639 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1640 assert(inf->object() == object, "invariant");
1641 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1642 return inf;
1643 }
1644
1645 // CASE: inflation in progress - inflating over a stack-lock.
1646 // Some other thread is converting from stack-locked to inflated.
1647 // Only that thread can complete inflation -- other threads must wait.
1648 // The INFLATING value is transient.
1649 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1650 // We could always eliminate polling by parking the thread on some auxiliary list.
1651 if (mark == markWord::INFLATING()) {
1652 read_stable_mark(object);
1653 continue;
1654 }
1655
1656 // CASE: stack-locked
1657 // Could be stack-locked either by this thread or by some other thread.
1658 //
1659 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1660 // to install INFLATING into the mark word. We originally installed INFLATING,
1661 // allocated the objectmonitor, and then finally STed the address of the
1662 // objectmonitor into the mark. This was correct, but artificially lengthened
1668 // critical INFLATING...ST interval. A thread can transfer
1669 // multiple objectmonitors en-mass from the global free list to its local free list.
1670 // This reduces coherency traffic and lock contention on the global free list.
1671 // Using such local free lists, it doesn't matter if the om_alloc() call appears
1672 // before or after the CAS(INFLATING) operation.
1673 // See the comments in om_alloc().
1674
1675 LogStreamHandle(Trace, monitorinflation) lsh;
1676
1677 if (mark.has_locker()) {
1678 ObjectMonitor* m = om_alloc(self);
1679 // Optimistically prepare the objectmonitor - anticipate successful CAS
1680 // We do this before the CAS in order to minimize the length of time
1681 // in which INFLATING appears in the mark.
1682 m->Recycle();
1683 m->_Responsible = NULL;
1684 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1685
1686 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1687 if (cmp != mark) {
1688 om_release(self, m, true);
1689 continue; // Interference -- just retry
1690 }
1691
1692 // We've successfully installed INFLATING (0) into the mark-word.
1693 // This is the only case where 0 will appear in a mark-word.
1694 // Only the singular thread that successfully swings the mark-word
1695 // to 0 can perform (or more precisely, complete) inflation.
1696 //
1697 // Why do we CAS a 0 into the mark-word instead of just CASing the
1698 // mark-word from the stack-locked value directly to the new inflated state?
1699 // Consider what happens when a thread unlocks a stack-locked object.
1700 // It attempts to use CAS to swing the displaced header value from the
1701 // on-stack BasicLock back into the object header. Recall also that the
1702 // header value (hash code, etc) can reside in (a) the object header, or
1703 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1704 // header in an ObjectMonitor. The inflate() routine must copy the header
1705 // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1706 // the while preserving the hashCode stability invariants. If the owner
1707 // decides to release the lock while the value is 0, the unlock will fail
1708 // and control will eventually pass from slow_exit() to inflate. The owner
1709 // will then spin, waiting for the 0 value to disappear. Put another way,
1710 // the 0 causes the owner to stall if the owner happens to try to
1711 // drop the lock (restoring the header from the BasicLock to the object)
1712 // while inflation is in-progress. This protocol avoids races that might
1713 // would otherwise permit hashCode values to change or "flicker" for an object.
1714 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1715 // 0 serves as a "BUSY" inflate-in-progress indicator.
1716
1717
1718 // fetch the displaced mark from the owner's stack.
1719 // The owner can't die or unwind past the lock while our INFLATING
1720 // object is in the mark. Furthermore the owner can't complete
1721 // an unlock on the object, either.
1722 markWord dmw = mark.displaced_mark_helper();
1723 // Catch if the object's header is not neutral (not locked and
1724 // not marked is what we care about here).
1725 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1726
1727 // Setup monitor fields to proper values -- prepare the monitor
1728 m->set_header(dmw);
1729
1730 // Optimization: if the mark.locker stack address is associated
1731 // with this thread we could simply set m->_owner = self.
1732 // Note that a thread can inflate an object
1733 // that it has stack-locked -- as might happen in wait() -- directly
1734 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1735 m->set_owner_from(NULL, mark.locker());
1736 m->set_object(object);
1737 // TODO-FIXME: assert BasicLock->dhw != 0.
1738
1739 // Must preserve store ordering. The monitor state must
1740 // be stable at the time of publishing the monitor address.
1741 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1742 object->release_set_mark(markWord::encode(m));
1743
1744 // Hopefully the performance counters are allocated on distinct cache lines
1745 // to avoid false sharing on MP systems ...
1746 OM_PERFDATA_OP(Inflations, inc());
1747 if (log_is_enabled(Trace, monitorinflation)) {
1748 ResourceMark rm(self);
1749 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1750 INTPTR_FORMAT ", type='%s'", p2i(object),
1751 object->mark().value(), object->klass()->external_name());
1752 }
1753 if (event.should_commit()) {
1754 post_monitor_inflate_event(&event, object, cause);
1755 }
1756 return m;
1757 }
1758
1759 // CASE: neutral
1760 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1761 // If we know we're inflating for entry it's better to inflate by swinging a
1762 // pre-locked ObjectMonitor pointer into the object header. A successful
1763 // CAS inflates the object *and* confers ownership to the inflating thread.
1764 // In the current implementation we use a 2-step mechanism where we CAS()
1765 // to inflate and then CAS() again to try to swing _owner from NULL to self.
1766 // An inflateTry() method that we could call from enter() would be useful.
1767
1768 // Catch if the object's header is not neutral (not locked and
1769 // not marked is what we care about here).
1770 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1771 ObjectMonitor* m = om_alloc(self);
1772 // prepare m for installation - set monitor to initial state
1773 m->Recycle();
1774 m->set_header(mark);
1775 m->set_object(object);
1776 m->_Responsible = NULL;
1777 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
1778
1779 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1780 m->set_header(markWord::zero());
1781 m->set_object(NULL);
1782 m->Recycle();
1783 om_release(self, m, true);
1784 m = NULL;
1785 continue;
1786 // interference - the markword changed - just retry.
1787 // The state-transitions are one-way, so there's no chance of
1788 // live-lock -- "Inflated" is an absorbing state.
1789 }
1790
1791 // Hopefully the performance counters are allocated on distinct
1792 // cache lines to avoid false sharing on MP systems ...
1793 OM_PERFDATA_OP(Inflations, inc());
1794 if (log_is_enabled(Trace, monitorinflation)) {
1795 ResourceMark rm(self);
1796 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1797 INTPTR_FORMAT ", type='%s'", p2i(object),
1798 object->mark().value(), object->klass()->external_name());
1799 }
1800 if (event.should_commit()) {
1801 post_monitor_inflate_event(&event, object, cause);
1802 }
1803 return m;
1804 }
1805 }
1806
1807
1808 // We maintain a list of in-use monitors for each thread.
1809 //
1810 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1811 // deflate_idle_monitors() scans only a global list of in-use monitors which
1812 // is populated only as a thread dies (see om_flush()).
1813 //
1814 // These operations are called at all safepoints, immediately after mutators
1815 // are stopped, but before any objects have moved. Collectively they traverse
1816 // the population of in-use monitors, deflating where possible. The scavenged
1817 // monitors are returned to the global monitor free list.
1818 //
1819 // Beware that we scavenge at *every* stop-the-world point. Having a large
1820 // number of monitors in-use could negatively impact performance. We also want
1821 // to minimize the total # of monitors in circulation, as they incur a small
1822 // footprint penalty.
1823 //
1824 // Perversely, the heap size -- and thus the STW safepoint rate --
1825 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1826 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1827 // This is an unfortunate aspect of this design.
1828
1829 // Deflate a single monitor if not in-use
1830 // Return true if deflated, false if in-use
1831 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1832 ObjectMonitor** free_head_p,
1833 ObjectMonitor** free_tail_p) {
1834 bool deflated;
1835 // Normal case ... The monitor is associated with obj.
1836 const markWord mark = obj->mark();
1837 guarantee(mark == markWord::encode(mid), "should match: mark="
1838 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
1839 markWord::encode(mid).value());
1840 // Make sure that mark.monitor() and markWord::encode() agree:
1841 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1842 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
1843 const markWord dmw = mid->header();
1844 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1845
1846 if (mid->is_busy()) {
1847 // Easy checks are first - the ObjectMonitor is busy so no deflation.
1848 deflated = false;
1849 } else {
1850 // Deflate the monitor if it is no longer being used
1851 // It's idle - scavenge and return to the global free list
1852 // plain old deflation ...
1853 if (log_is_enabled(Trace, monitorinflation)) {
1854 ResourceMark rm;
1855 log_trace(monitorinflation)("deflate_monitor: "
1856 "object=" INTPTR_FORMAT ", mark="
1857 INTPTR_FORMAT ", type='%s'", p2i(obj),
1858 mark.value(), obj->klass()->external_name());
1859 }
1860
1861 // Restore the header back to obj
1862 obj->release_set_mark(dmw);
1863 mid->clear();
1864
1865 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1866 p2i(mid->object()));
1867
1868 // Move the deflated ObjectMonitor to the working free list
1869 // defined by free_head_p and free_tail_p.
1870 if (*free_head_p == NULL) *free_head_p = mid;
1871 if (*free_tail_p != NULL) {
1872 // We append to the list so the caller can use mid->_next_om
1873 // to fix the linkages in its context.
1874 ObjectMonitor* prevtail = *free_tail_p;
1875 // Should have been cleaned up by the caller:
1876 // Note: Should not have to lock prevtail here since we're at a
1877 // safepoint and ObjectMonitors on the local free list should
1878 // not be accessed in parallel.
1879 #ifdef ASSERT
1880 ObjectMonitor* l_next_om = prevtail->next_om();
1881 #endif
1882 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
1883 prevtail->set_next_om(mid);
1884 }
1885 *free_tail_p = mid;
1886 // At this point, mid->_next_om still refers to its current
1887 // value and another ObjectMonitor's _next_om field still
1888 // refers to this ObjectMonitor. Those linkages have to be
1889 // cleaned up by the caller who has the complete context.
1890 deflated = true;
1891 }
1892 return deflated;
1893 }
1894
1895 // Walk a given monitor list, and deflate idle monitors.
1896 // The given list could be a per-thread list or a global list.
1897 //
1898 // In the case of parallel processing of thread local monitor lists,
1899 // work is done by Threads::parallel_threads_do() which ensures that
1900 // each Java thread is processed by exactly one worker thread, and
1901 // thus avoid conflicts that would arise when worker threads would
1902 // process the same monitor lists concurrently.
1903 //
1904 // See also ParallelSPCleanupTask and
1905 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1906 // Threads::parallel_java_threads_do() in thread.cpp.
1907 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
1908 int* count_p,
1909 ObjectMonitor** free_head_p,
1910 ObjectMonitor** free_tail_p) {
1911 ObjectMonitor* cur_mid_in_use = NULL;
1912 ObjectMonitor* mid = NULL;
1913 ObjectMonitor* next = NULL;
1914 int deflated_count = 0;
1925 // by unlinking mid from the global or per-thread in-use list.
1926 if (cur_mid_in_use == NULL) {
1927 // mid is the list head so switch the list head to next:
1928 Atomic::store(list_p, next);
1929 } else {
1930 // Switch cur_mid_in_use's next field to next:
1931 cur_mid_in_use->set_next_om(next);
1932 }
1933 // At this point mid is disconnected from the in-use list.
1934 deflated_count++;
1935 Atomic::dec(count_p);
1936 // mid is current tail in the free_head_p list so NULL terminate it:
1937 mid->set_next_om(NULL);
1938 } else {
1939 cur_mid_in_use = mid;
1940 }
1941 }
1942 return deflated_count;
1943 }
1944
1945 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1946 counters->n_in_use = 0; // currently associated with objects
1947 counters->n_in_circulation = 0; // extant
1948 counters->n_scavenged = 0; // reclaimed (global and per-thread)
1949 counters->per_thread_scavenged = 0; // per-thread scavenge total
1950 counters->per_thread_times = 0.0; // per-thread scavenge times
1951 }
1952
1953 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1954 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1955 bool deflated = false;
1956
1957 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
1958 ObjectMonitor* free_tail_p = NULL;
1959 elapsedTimer timer;
1960
1961 if (log_is_enabled(Info, monitorinflation)) {
1962 timer.start();
1963 }
1964
1965 // Note: the thread-local monitors lists get deflated in
1966 // a separate pass. See deflate_thread_local_monitors().
1967
1968 // For moribund threads, scan om_list_globals._in_use_list
1969 int deflated_count = 0;
1970 if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
1971 // Update n_in_circulation before om_list_globals._in_use_count is
1972 // updated by deflation.
1973 Atomic::add(&counters->n_in_circulation,
1974 Atomic::load(&om_list_globals._in_use_count));
1987 #endif
1988 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
1989 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
1990 Atomic::add(&counters->n_scavenged, deflated_count);
1991 }
1992 timer.stop();
1993
1994 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1995 LogStreamHandle(Info, monitorinflation) lsh_info;
1996 LogStream* ls = NULL;
1997 if (log_is_enabled(Debug, monitorinflation)) {
1998 ls = &lsh_debug;
1999 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2000 ls = &lsh_info;
2001 }
2002 if (ls != NULL) {
2003 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2004 }
2005 }
2006
2007 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2008 // Report the cumulative time for deflating each thread's idle
2009 // monitors. Note: if the work is split among more than one
2010 // worker thread, then the reported time will likely be more
2011 // than a beginning to end measurement of the phase.
2012 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2013
2014 if (log_is_enabled(Debug, monitorinflation)) {
2015 // exit_globals()'s call to audit_and_print_stats() is done
2016 // at the Info level and not at a safepoint.
2017 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2018 } else if (log_is_enabled(Info, monitorinflation)) {
2019 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
2020 "global_free_count=%d",
2021 Atomic::load(&om_list_globals._population),
2022 Atomic::load(&om_list_globals._in_use_count),
2023 Atomic::load(&om_list_globals._free_count));
2024 }
2025
2026 Atomic::store(&_forceMonitorScavenge, 0); // Reset
2027
2028 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2029 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2030
2031 GVars.stw_random = os::random();
2032 GVars.stw_cycle++;
2033 }
2034
2035 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2036 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2037
2038 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2039 ObjectMonitor* free_tail_p = NULL;
2040 elapsedTimer timer;
2041
2042 if (log_is_enabled(Info, safepoint, cleanup) ||
2043 log_is_enabled(Info, monitorinflation)) {
2044 timer.start();
2045 }
2046
2047 // Update n_in_circulation before om_in_use_count is updated by deflation.
2048 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count));
2049
2050 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2051 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count));
2052
2053 if (free_head_p != NULL) {
2054 // Move the deflated ObjectMonitors back to the global free list.
2055 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2056 #ifdef ASSERT
2057 ObjectMonitor* l_next_om = free_tail_p->next_om();
2189 if (Atomic::load(&om_list_globals._population) == chk_om_population) {
2190 ls->print_cr("global_population=%d equals chk_om_population=%d",
2191 Atomic::load(&om_list_globals._population), chk_om_population);
2192 } else {
2193 // With fine grained locks on the monitor lists, it is possible for
2194 // log_monitor_list_counts() to return a value that doesn't match
2195 // om_list_globals._population. So far a higher value has been
2196 // seen in testing so something is being double counted by
2197 // log_monitor_list_counts().
2198 ls->print_cr("WARNING: global_population=%d is not equal to "
2199 "chk_om_population=%d",
2200 Atomic::load(&om_list_globals._population), chk_om_population);
2201 }
2202
2203 // Check om_list_globals._in_use_list and om_list_globals._in_use_count:
2204 chk_global_in_use_list_and_count(ls, &error_cnt);
2205
2206 // Check om_list_globals._free_list and om_list_globals._free_count:
2207 chk_global_free_list_and_count(ls, &error_cnt);
2208
2209 ls->print_cr("Checking per-thread lists:");
2210
2211 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2212 // Check om_in_use_list and om_in_use_count:
2213 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2214
2215 // Check om_free_list and om_free_count:
2216 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
2217 }
2218
2219 if (error_cnt == 0) {
2220 ls->print_cr("No errors found in monitor list checks.");
2221 } else {
2222 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
2223 }
2224
2225 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
2226 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
2227 // When exiting this log output is at the Info level. When called
2228 // at a safepoint, this log output is at the Trace level since
2239 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
2240 outputStream * out, int *error_cnt_p) {
2241 stringStream ss;
2242 if (n->is_busy()) {
2243 if (jt != NULL) {
2244 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2245 ": free per-thread monitor must not be busy: %s", p2i(jt),
2246 p2i(n), n->is_busy_to_string(&ss));
2247 } else {
2248 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2249 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
2250 }
2251 *error_cnt_p = *error_cnt_p + 1;
2252 }
2253 if (n->header().value() != 0) {
2254 if (jt != NULL) {
2255 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2256 ": free per-thread monitor must have NULL _header "
2257 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
2258 n->header().value());
2259 } else {
2260 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2261 "must have NULL _header field: _header=" INTPTR_FORMAT,
2262 p2i(n), n->header().value());
2263 }
2264 *error_cnt_p = *error_cnt_p + 1;
2265 }
2266 if (n->object() != NULL) {
2267 if (jt != NULL) {
2268 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2269 ": free per-thread monitor must have NULL _object "
2270 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
2271 p2i(n->object()));
2272 } else {
2273 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
2274 "must have NULL _object field: _object=" INTPTR_FORMAT,
2275 p2i(n), p2i(n->object()));
2276 }
2277 *error_cnt_p = *error_cnt_p + 1;
2278 }
2279 }
2280
2281 // Lock the next ObjectMonitor for traversal and unlock the current
2282 // ObjectMonitor. Returns the next ObjectMonitor if there is one.
2283 // Otherwise returns NULL (after unlocking the current ObjectMonitor).
2284 // This function is used by the various list walker functions to
2285 // safely walk a list without allowing an ObjectMonitor to be moved
2311 if (cur == NULL) {
2312 break;
2313 }
2314 }
2315 }
2316 int l_free_count = Atomic::load(&om_list_globals._free_count);
2317 if (l_free_count == chk_om_free_count) {
2318 out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
2319 l_free_count, chk_om_free_count);
2320 } else {
2321 // With fine grained locks on om_list_globals._free_list, it
2322 // is possible for an ObjectMonitor to be prepended to
2323 // om_list_globals._free_list after we started calculating
2324 // chk_om_free_count so om_list_globals._free_count may not
2325 // match anymore.
2326 out->print_cr("WARNING: global_free_count=%d is not equal to "
2327 "chk_om_free_count=%d", l_free_count, chk_om_free_count);
2328 }
2329 }
2330
2331 // Check the global in-use list and count; log the results of the checks.
2332 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
2333 int *error_cnt_p) {
2334 int chk_om_in_use_count = 0;
2335 ObjectMonitor* cur = NULL;
2336 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
2337 // Marked the global in-use list head so process the list.
2338 while (true) {
2339 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
2340 chk_om_in_use_count++;
2341
2342 cur = lock_next_for_traversal(cur);
2343 if (cur == NULL) {
2344 break;
2345 }
2346 }
2347 }
2348 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
2349 if (l_in_use_count == chk_om_in_use_count) {
2350 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",
2469 if (l_om_in_use_count == chk_om_in_use_count) {
2470 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
2471 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
2472 chk_om_in_use_count);
2473 } else {
2474 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
2475 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
2476 chk_om_in_use_count);
2477 *error_cnt_p = *error_cnt_p + 1;
2478 }
2479 }
2480
2481 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2482 // flags indicate why the entry is in-use, 'object' and 'object type'
2483 // indicate the associated object and its type.
2484 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
2485 stringStream ss;
2486 if (Atomic::load(&om_list_globals._in_use_count) > 0) {
2487 out->print_cr("In-use global monitor info:");
2488 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2489 out->print_cr("%18s %s %18s %18s",
2490 "monitor", "BHL", "object", "object type");
2491 out->print_cr("================== === ================== ==================");
2492 ObjectMonitor* cur = NULL;
2493 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
2494 // Marked the global in-use list head so process the list.
2495 while (true) {
2496 const oop obj = (oop) cur->object();
2497 const markWord mark = cur->header();
2498 ResourceMark rm;
2499 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(cur),
2500 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL,
2501 p2i(obj), obj->klass()->external_name());
2502 if (cur->is_busy() != 0) {
2503 out->print(" (%s)", cur->is_busy_to_string(&ss));
2504 ss.reset();
2505 }
2506 out->cr();
2507
2508 cur = lock_next_for_traversal(cur);
2509 if (cur == NULL) {
2510 break;
2511 }
2512 }
2513 }
2514 }
2515
2516 out->print_cr("In-use per-thread monitor info:");
2517 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2518 out->print_cr("%18s %18s %s %18s %18s",
2519 "jt", "monitor", "BHL", "object", "object type");
2520 out->print_cr("================== ================== === ================== ==================");
2521 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2522 ObjectMonitor* cur = NULL;
2523 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
2524 // Marked the global in-use list head so process the list.
2525 while (true) {
2526 const oop obj = (oop) cur->object();
2527 const markWord mark = cur->header();
2528 ResourceMark rm;
2529 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT
2530 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
2531 mark.hash() != 0, cur->owner() != NULL, p2i(obj),
2532 obj->klass()->external_name());
2533 if (cur->is_busy() != 0) {
2534 out->print(" (%s)", cur->is_busy_to_string(&ss));
2535 ss.reset();
2536 }
2537 out->cr();
2538
2539 cur = lock_next_for_traversal(cur);
2540 if (cur == NULL) {
2541 break;
2542 }
2543 }
2544 }
2545 }
2546
2547 out->flush();
2548 }
2549
2550 // Log counts for the global and per-thread monitor lists and return
2551 // the population count.
2552 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2553 int pop_count = 0;
2554 out->print_cr("%18s %10s %10s %10s",
2555 "Global Lists:", "InUse", "Free", "Total");
2556 out->print_cr("================== ========== ========== ==========");
2557 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
2558 int l_free_count = Atomic::load(&om_list_globals._free_count);
2559 out->print_cr("%18s %10d %10d %10d", "", l_in_use_count,
2560 l_free_count, Atomic::load(&om_list_globals._population));
2561 pop_count += l_in_use_count + l_free_count;
2562
2563 out->print_cr("%18s %10s %10s %10s",
2564 "Per-Thread Lists:", "InUse", "Free", "Provision");
2565 out->print_cr("================== ========== ========== ==========");
2566
2567 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2568 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
2569 int l_om_free_count = Atomic::load(&jt->om_free_count);
2570 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2571 l_om_in_use_count, l_om_free_count, jt->om_free_provision);
2572 pop_count += l_om_in_use_count + l_om_free_count;
2573 }
2574 return pop_count;
2575 }
2576
2577 #ifndef PRODUCT
2578
2579 // Check if monitor belongs to the monitor cache
2580 // The list is grow-only so it's *relatively* safe to traverse
2581 // the list of extant blocks without taking a lock.
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "logging/log.hpp"
28 #include "logging/logStream.hpp"
29 #include "jfr/jfrEvents.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/metaspaceShared.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/handshake.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/objectMonitor.hpp"
44 #include "runtime/objectMonitor.inline.hpp"
45 #include "runtime/osThread.hpp"
46 #include "runtime/safepointMechanism.inline.hpp"
47 #include "runtime/safepointVerifiers.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/stubRoutines.hpp"
50 #include "runtime/synchronizer.hpp"
51 #include "runtime/thread.inline.hpp"
52 #include "runtime/timer.hpp"
53 #include "runtime/vframe.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "utilities/align.hpp"
56 #include "utilities/dtrace.hpp"
57 #include "utilities/events.hpp"
58 #include "utilities/preserveException.hpp"
59
60 // The "core" versions of monitor enter and exit reside in this file.
61 // The interpreter and compilers contain specialized transliterated
62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
63 // for instance. If you make changes here, make sure to modify the
64 // interpreter, and both C1 and C2 fast-path inline locking code emission.
65 //
66 // -----------------------------------------------------------------------------
103 }
104
105 #else // ndef DTRACE_ENABLED
106
107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
109
110 #endif // ndef DTRACE_ENABLED
111
112 // This exists only as a workaround of dtrace bug 6254741
113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
115 return 0;
116 }
117
118 #define NINFLATIONLOCKS 256
119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
120
121 // global list of blocks of monitors
122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
126
127 struct ObjectMonitorListGlobals {
128 char _pad_prefix[OM_CACHE_LINE_SIZE];
129 // These are highly shared list related variables.
130 // To avoid false-sharing they need to be the sole occupants of a cache line.
131
132 // Global ObjectMonitor free list. Newly allocated and deflated
133 // ObjectMonitors are prepended here.
134 ObjectMonitor* _free_list;
135 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
136
137 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
138 // ObjectMonitors on its per-thread in-use list are prepended here.
139 ObjectMonitor* _in_use_list;
140 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
141
142 // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
143 // is true, deflated ObjectMonitors wait on this list until after a
144 // handshake or a safepoint for platforms that don't support handshakes.
145 // After the handshake or safepoint, the deflated ObjectMonitors are
146 // prepended to free_list.
147 ObjectMonitor* _wait_list;
148 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
149
150 int _free_count; // # on free_list
151 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
152
153 int _in_use_count; // # on in_use_list
154 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
155
156 int _population; // # Extant -- in circulation
157 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int));
158
159 int _wait_count; // # on wait_list
160 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
161 };
162 static ObjectMonitorListGlobals om_list_globals;
163
164 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
165
166
167 // =====================> Spin-lock functions
168
169 // ObjectMonitors are not lockable outside of this file. We use spin-locks
170 // implemented using a bit in the _next_om field instead of the heavier
171 // weight locking mechanisms for faster list management.
172
173 #define OM_LOCK_BIT 0x1
174
175 // Return true if the ObjectMonitor is locked.
176 // Otherwise returns false.
177 static bool is_locked(ObjectMonitor* om) {
178 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT;
179 }
180
298 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1);
299 break;
300 }
301 // Implied else: try it all again
302 }
303
304 // Second we handle om_list_globals._free_list:
305 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
306 &om_list_globals._free_list, &om_list_globals._free_count);
307 }
308
309 // Prepend a list of ObjectMonitors to om_list_globals._free_list.
310 // 'tail' is the last ObjectMonitor in the list and there are 'count'
311 // on the list. Also updates om_list_globals._free_count.
312 static void prepend_list_to_global_free_list(ObjectMonitor* list,
313 ObjectMonitor* tail, int count) {
314 prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
315 &om_list_globals._free_count);
316 }
317
318 // Prepend a list of ObjectMonitors to om_list_globals._wait_list.
319 // 'tail' is the last ObjectMonitor in the list and there are 'count'
320 // on the list. Also updates om_list_globals._wait_count.
321 static void prepend_list_to_global_wait_list(ObjectMonitor* list,
322 ObjectMonitor* tail, int count) {
323 assert(HandshakeAfterDeflateIdleMonitors, "sanity check");
324 prepend_list_to_common(list, tail, count, &om_list_globals._wait_list,
325 &om_list_globals._wait_count);
326 }
327
328 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list.
329 // 'tail' is the last ObjectMonitor in the list and there are 'count'
330 // on the list. Also updates om_list_globals._in_use_list.
331 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
332 ObjectMonitor* tail, int count) {
333 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list,
334 &om_list_globals._in_use_count);
335 }
336
337 // Prepend an ObjectMonitor to the specified list. Also updates
338 // the specified counter.
339 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
340 int* count_p) {
341 while (true) {
342 om_lock(m); // Lock m so we can safely update its next field.
343 ObjectMonitor* cur = NULL;
344 // Lock the list head to guard against races with a list walker
345 // or async deflater thread (which only races in om_in_use_list):
346 if ((cur = get_list_head_locked(list_p)) != NULL) {
347 // List head is now locked so we can safely switch it.
348 m->set_next_om(cur); // m now points to cur (and unlocks m)
349 Atomic::store(list_p, m); // Switch list head to unlocked m.
350 om_unlock(cur);
351 break;
352 }
353 // The list is empty so try to set the list head.
354 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
355 m->set_next_om(cur); // m now points to NULL (and unlocks m)
356 if (Atomic::cmpxchg(list_p, cur, m) == cur) {
357 // List head is now unlocked m.
358 break;
359 }
360 // Implied else: try it all again
361 }
362 Atomic::inc(count_p);
363 }
364
365 // Prepend an ObjectMonitor to a per-thread om_free_list.
366 // Also updates the per-thread om_free_count.
367 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
368 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
369 }
370
371 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
372 // Also updates the per-thread om_in_use_count.
373 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
374 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
375 }
376
377 // Take an ObjectMonitor from the start of the specified list. Also
378 // decrements the specified counter. Returns NULL if none are available.
379 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
380 int* count_p) {
381 ObjectMonitor* take = NULL;
382 // Lock the list head to guard against races with a list walker
383 // or async deflater thread (which only races in om_list_globals._free_list):
384 if ((take = get_list_head_locked(list_p)) == NULL) {
385 return NULL; // None are available.
386 }
387 ObjectMonitor* next = unmarked_next(take);
388 // Switch locked list head to next (which unlocks the list head, but
389 // leaves take locked):
390 Atomic::store(list_p, next);
391 Atomic::dec(count_p);
392 // Unlock take, but leave the next value for any lagging list
393 // walkers. It will get cleaned up when take is prepended to
394 // the in-use list:
395 om_unlock(take);
396 return take;
397 }
398
399 // Take an ObjectMonitor from the start of the om_list_globals._free_list.
400 // Also updates om_list_globals._free_count. Returns NULL if none are
401 // available.
402 static ObjectMonitor* take_from_start_of_global_free_list() {
403 return take_from_start_of_common(&om_list_globals._free_list,
472 }
473
474 // biased locking and any other IMS exception states take the slow-path
475 return false;
476 }
477
478
479 // The LockNode emitted directly at the synchronization site would have
480 // been too big if it were to have included support for the cases of inflated
481 // recursive enter and exit, so they go here instead.
482 // Note that we can't safely call AsyncPrintJavaStack() from within
483 // quick_enter() as our thread state remains _in_Java.
484
485 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
486 BasicLock * lock) {
487 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
488 assert(self->is_Java_thread(), "invariant");
489 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
490 NoSafepointVerifier nsv;
491 if (obj == NULL) return false; // Need to throw NPE
492
493 while (true) {
494 const markWord mark = obj->mark();
495
496 if (mark.has_monitor()) {
497 ObjectMonitorHandle omh;
498 if (!omh.save_om_ptr(obj, mark)) {
499 // Lost a race with async deflation so try again.
500 assert(AsyncDeflateIdleMonitors, "sanity check");
501 continue;
502 }
503 ObjectMonitor* const m = omh.om_ptr();
504 assert(m->object() == obj, "invariant");
505 Thread* const owner = (Thread *) m->_owner;
506
507 // Lock contention and Transactional Lock Elision (TLE) diagnostics
508 // and observability
509 // Case: light contention possibly amenable to TLE
510 // Case: TLE inimical operations such as nested/recursive synchronization
511
512 if (owner == self) {
513 m->_recursions++;
514 return true;
515 }
516
517 // This Java Monitor is inflated so obj's header will never be
518 // displaced to this thread's BasicLock. Make the displaced header
519 // non-NULL so this BasicLock is not seen as recursive nor as
520 // being locked. We do this unconditionally so that this thread's
521 // BasicLock cannot be mis-interpreted by any stack walkers. For
522 // performance reasons, stack walkers generally first check for
523 // Biased Locking in the object's header, the second check is for
524 // stack-locking in the object's header, the third check is for
525 // recursive stack-locking in the displaced header in the BasicLock,
526 // and last are the inflated Java Monitor (ObjectMonitor) checks.
527 lock->set_displaced_header(markWord::unused_mark());
528
529 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) {
530 assert(m->_recursions == 0, "invariant");
531 return true;
532 }
533
534 if (AsyncDeflateIdleMonitors &&
535 m->try_set_owner_from(DEFLATER_MARKER, self) == DEFLATER_MARKER) {
536 // The deflation protocol finished the first part (setting owner),
537 // but it failed the second part (making ref_count negative) and
538 // bailed. Or the ObjectMonitor was async deflated and reused.
539 // Acquired the monitor.
540 assert(m->_recursions == 0, "invariant");
541 return true;
542 }
543 }
544 break;
545 }
546
547 // Note that we could inflate in quick_enter.
548 // This is likely a useful optimization
549 // Critically, in quick_enter() we must not:
550 // -- perform bias revocation, or
551 // -- block indefinitely, or
552 // -- reach a safepoint
553
554 return false; // revert to slow-path
555 }
556
557 // -----------------------------------------------------------------------------
558 // Monitor Enter/Exit
559 // The interpreter and compiler assembly code tries to lock using the fast path
560 // of this algorithm. Make sure to update that code if the following function is
561 // changed. The implementation is extremely sensitive to race condition. Be careful.
562
563 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
564 if (UseBiasedLocking) {
576 // Anticipate successful CAS -- the ST of the displaced mark must
577 // be visible <= the ST performed by the CAS.
578 lock->set_displaced_header(mark);
579 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
580 return;
581 }
582 // Fall through to inflate() ...
583 } else if (mark.has_locker() &&
584 THREAD->is_lock_owned((address)mark.locker())) {
585 assert(lock != mark.locker(), "must not re-lock the same lock");
586 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
587 lock->set_displaced_header(markWord::from_pointer(NULL));
588 return;
589 }
590
591 // The object header will never be displaced to this lock,
592 // so it does not matter what the value is, except that it
593 // must be non-zero to avoid looking like a re-entrant lock,
594 // and must not look locked either.
595 lock->set_displaced_header(markWord::unused_mark());
596 ObjectMonitorHandle omh;
597 inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter);
598 omh.om_ptr()->enter(THREAD);
599 }
600
601 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
602 markWord mark = object->mark();
603 // We cannot check for Biased Locking if we are racing an inflation.
604 assert(mark == markWord::INFLATING() ||
605 !mark.has_bias_pattern(), "should not see bias pattern here");
606
607 markWord dhw = lock->displaced_header();
608 if (dhw.value() == 0) {
609 // If the displaced header is NULL, then this exit matches up with
610 // a recursive enter. No real work to do here except for diagnostics.
611 #ifndef PRODUCT
612 if (mark != markWord::INFLATING()) {
613 // Only do diagnostics if we are not racing an inflation. Simply
614 // exiting a recursive enter of a Java Monitor that is being
615 // inflated is safe; see the has_monitor() comment below.
616 assert(!mark.is_neutral(), "invariant");
617 assert(!mark.has_locker() ||
618 THREAD->is_lock_owned((address)mark.locker()), "invariant");
627 // does not own the Java Monitor.
628 ObjectMonitor* m = mark.monitor();
629 assert(((oop)(m->object()))->mark() == mark, "invariant");
630 assert(m->is_entered(THREAD), "invariant");
631 }
632 }
633 #endif
634 return;
635 }
636
637 if (mark == markWord::from_pointer(lock)) {
638 // If the object is stack-locked by the current thread, try to
639 // swing the displaced header from the BasicLock back to the mark.
640 assert(dhw.is_neutral(), "invariant");
641 if (object->cas_set_mark(dhw, mark) == mark) {
642 return;
643 }
644 }
645
646 // We have to take the slow-path of possible inflation and then exit.
647 ObjectMonitorHandle omh;
648 inflate(&omh, THREAD, object, inflate_cause_vm_internal);
649 omh.om_ptr()->exit(true, THREAD);
650 }
651
652 // -----------------------------------------------------------------------------
653 // Class Loader support to workaround deadlocks on the class loader lock objects
654 // Also used by GC
655 // complete_exit()/reenter() are used to wait on a nested lock
656 // i.e. to give up an outer lock completely and then re-enter
657 // Used when holding nested locks - lock acquisition order: lock1 then lock2
658 // 1) complete_exit lock1 - saving recursion count
659 // 2) wait on lock2
660 // 3) when notified on lock2, unlock lock2
661 // 4) reenter lock1 with original recursion count
662 // 5) lock lock2
663 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
664 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
665 if (UseBiasedLocking) {
666 BiasedLocking::revoke(obj, THREAD);
667 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
668 }
669
670 ObjectMonitorHandle omh;
671 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
672 intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD);
673 return ret_code;
674 }
675
676 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
677 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
678 if (UseBiasedLocking) {
679 BiasedLocking::revoke(obj, THREAD);
680 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
681 }
682
683 ObjectMonitorHandle omh;
684 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
685 omh.om_ptr()->reenter(recursions, THREAD);
686 }
687 // -----------------------------------------------------------------------------
688 // JNI locks on java objects
689 // NOTE: must use heavy weight monitor to handle jni monitor enter
690 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
691 // the current locking is from JNI instead of Java code
692 if (UseBiasedLocking) {
693 BiasedLocking::revoke(obj, THREAD);
694 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
695 }
696 THREAD->set_current_pending_monitor_is_from_java(false);
697 ObjectMonitorHandle omh;
698 inflate(&omh, THREAD, obj(), inflate_cause_jni_enter);
699 omh.om_ptr()->enter(THREAD);
700 THREAD->set_current_pending_monitor_is_from_java(true);
701 }
702
703 // NOTE: must use heavy weight monitor to handle jni monitor exit
704 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
705 if (UseBiasedLocking) {
706 Handle h_obj(THREAD, obj);
707 BiasedLocking::revoke(h_obj, THREAD);
708 obj = h_obj();
709 }
710 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
711
712 ObjectMonitorHandle omh;
713 inflate(&omh, THREAD, obj, inflate_cause_jni_exit);
714 ObjectMonitor* monitor = omh.om_ptr();
715 // If this thread has locked the object, exit the monitor. We
716 // intentionally do not use CHECK here because we must exit the
717 // monitor even if an exception is pending.
718 if (monitor->check_owner(THREAD)) {
719 monitor->exit(true, THREAD);
720 }
721 }
722
723 // -----------------------------------------------------------------------------
724 // Internal VM locks on java objects
725 // standard constructor, allows locking failures
726 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
727 _dolock = do_lock;
728 _thread = thread;
729 _thread->check_for_valid_safepoint_state();
730 _obj = obj;
731
732 if (_dolock) {
733 ObjectSynchronizer::enter(_obj, &_lock, _thread);
734 }
735 }
736
737 ObjectLocker::~ObjectLocker() {
738 if (_dolock) {
739 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
740 }
741 }
742
743
744 // -----------------------------------------------------------------------------
745 // Wait/Notify/NotifyAll
746 // NOTE: must use heavy weight monitor to handle wait()
747 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
748 if (UseBiasedLocking) {
749 BiasedLocking::revoke(obj, THREAD);
750 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
751 }
752 if (millis < 0) {
753 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
754 }
755 ObjectMonitorHandle omh;
756 inflate(&omh, THREAD, obj(), inflate_cause_wait);
757 ObjectMonitor* monitor = omh.om_ptr();
758
759 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
760 monitor->wait(millis, true, THREAD);
761
762 // This dummy call is in place to get around dtrace bug 6254741. Once
763 // that's fixed we can uncomment the following line, remove the call
764 // and change this function back into a "void" func.
765 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
766 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
767 return ret_code;
768 }
769
770 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
771 if (UseBiasedLocking) {
772 BiasedLocking::revoke(obj, THREAD);
773 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
774 }
775 if (millis < 0) {
776 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
777 }
778 ObjectMonitorHandle omh;
779 inflate(&omh, THREAD, obj(), inflate_cause_wait);
780 omh.om_ptr()->wait(millis, false, THREAD);
781 }
782
783 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
784 if (UseBiasedLocking) {
785 BiasedLocking::revoke(obj, THREAD);
786 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
787 }
788
789 markWord mark = obj->mark();
790 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
791 return;
792 }
793 ObjectMonitorHandle omh;
794 inflate(&omh, THREAD, obj(), inflate_cause_notify);
795 omh.om_ptr()->notify(THREAD);
796 }
797
798 // NOTE: see comment of notify()
799 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
800 if (UseBiasedLocking) {
801 BiasedLocking::revoke(obj, THREAD);
802 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
803 }
804
805 markWord mark = obj->mark();
806 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
807 return;
808 }
809 ObjectMonitorHandle omh;
810 inflate(&omh, THREAD, obj(), inflate_cause_notify);
811 omh.om_ptr()->notifyAll(THREAD);
812 }
813
814 // -----------------------------------------------------------------------------
815 // Hash Code handling
816 //
817 // Performance concern:
818 // OrderAccess::storestore() calls release() which at one time stored 0
819 // into the global volatile OrderAccess::dummy variable. This store was
820 // unnecessary for correctness. Many threads storing into a common location
821 // causes considerable cache migration or "sloshing" on large SMP systems.
822 // As such, I avoided using OrderAccess::storestore(). In some cases
823 // OrderAccess::fence() -- which incurs local latency on the executing
824 // processor -- is a better choice as it scales on SMP systems.
825 //
826 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
827 // a discussion of coherency costs. Note that all our current reference
828 // platforms provide strong ST-ST order, so the issue is moot on IA32,
829 // x64, and SPARC.
830 //
831 // As a general policy we use "volatile" to control compiler-based reordering
984 Handle hobj(self, obj);
985 // Relaxing assertion for bug 6320749.
986 assert(Universe::verify_in_progress() ||
987 !SafepointSynchronize::is_at_safepoint(),
988 "biases should not be seen by VM thread here");
989 BiasedLocking::revoke(hobj, JavaThread::current());
990 obj = hobj();
991 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
992 }
993 }
994
995 // hashCode() is a heap mutator ...
996 // Relaxing assertion for bug 6320749.
997 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
998 !SafepointSynchronize::is_at_safepoint(), "invariant");
999 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
1000 self->is_Java_thread() , "invariant");
1001 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
1002 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
1003
1004 while (true) {
1005 ObjectMonitor* monitor = NULL;
1006 markWord temp, test;
1007 intptr_t hash;
1008 markWord mark = read_stable_mark(obj);
1009
1010 // object should remain ineligible for biased locking
1011 assert(!mark.has_bias_pattern(), "invariant");
1012
1013 if (mark.is_neutral()) { // if this is a normal header
1014 hash = mark.hash();
1015 if (hash != 0) { // if it has a hash, just return it
1016 return hash;
1017 }
1018 hash = get_next_hash(self, obj); // get a new hash
1019 temp = mark.copy_set_hash(hash); // merge the hash into header
1020 // try to install the hash
1021 test = obj->cas_set_mark(temp, mark);
1022 if (test == mark) { // if the hash was installed, return it
1023 return hash;
1024 }
1025 // Failed to install the hash. It could be that another thread
1026 // installed the hash just before our attempt or inflation has
1027 // occurred or... so we fall thru to inflate the monitor for
1028 // stability and then install the hash.
1029 } else if (mark.has_monitor()) {
1030 ObjectMonitorHandle omh;
1031 if (!omh.save_om_ptr(obj, mark)) {
1032 // Lost a race with async deflation so try again.
1033 assert(AsyncDeflateIdleMonitors, "sanity check");
1034 continue;
1035 }
1036 monitor = omh.om_ptr();
1037 temp = monitor->header();
1038 // Allow for a lagging install_displaced_markword_in_object() to
1039 // have marked the ObjectMonitor's header/dmw field.
1040 assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()),
1041 "invariant: header=" INTPTR_FORMAT, temp.value());
1042 hash = temp.hash();
1043 if (hash != 0) { // if it has a hash, just return it
1044 return hash;
1045 }
1046 // Fall thru so we only have one place that installs the hash in
1047 // the ObjectMonitor.
1048 } else if (self->is_lock_owned((address)mark.locker())) {
1049 // This is a stack lock owned by the calling thread so fetch the
1050 // displaced markWord from the BasicLock on the stack.
1051 temp = mark.displaced_mark_helper();
1052 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1053 hash = temp.hash();
1054 if (hash != 0) { // if it has a hash, just return it
1055 return hash;
1056 }
1057 // WARNING:
1058 // The displaced header in the BasicLock on a thread's stack
1059 // is strictly immutable. It CANNOT be changed in ANY cases.
1060 // So we have to inflate the stack lock into an ObjectMonitor
1061 // even if the current thread owns the lock. The BasicLock on
1062 // a thread's stack can be asynchronously read by other threads
1063 // during an inflate() call so any change to that stack memory
1064 // may not propagate to other threads correctly.
1065 }
1066
1067 // Inflate the monitor to set the hash.
1068 ObjectMonitorHandle omh;
1069 inflate(&omh, self, obj, inflate_cause_hash_code);
1070 monitor = omh.om_ptr();
1071 // Load ObjectMonitor's header/dmw field and see if it has a hash.
1072 mark = monitor->header();
1073 // Allow for a lagging install_displaced_markword_in_object() to
1074 // have marked the ObjectMonitor's header/dmw field.
1075 assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()),
1076 "invariant: header=" INTPTR_FORMAT, mark.value());
1077 hash = mark.hash();
1078 if (hash == 0) { // if it does not have a hash
1079 hash = get_next_hash(self, obj); // get a new hash
1080 temp = mark.copy_set_hash(hash); // merge the hash into header
1081 if (AsyncDeflateIdleMonitors && temp.is_marked()) {
1082 // A lagging install_displaced_markword_in_object() has marked
1083 // the ObjectMonitor's header/dmw field. We clear it to avoid
1084 // any confusion if we are able to set the hash.
1085 temp.set_unmarked();
1086 }
1087 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1088 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1089 test = markWord(v);
1090 if (test != mark) {
1091 // The attempt to update the ObjectMonitor's header/dmw field
1092 // did not work. This can happen if another thread managed to
1093 // merge in the hash just before our cmpxchg(). With async
1094 // deflation, a lagging install_displaced_markword_in_object()
1095 // could have just marked or just unmarked the header/dmw field.
1096 // If we add any new usages of the header/dmw field, this code
1097 // will need to be updated.
1098 if (AsyncDeflateIdleMonitors) {
1099 // Since async deflation gives us two possible reasons for
1100 // the cmwxchg() to fail, it is easier to simply retry.
1101 continue;
1102 }
1103 hash = test.hash();
1104 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1105 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1106 }
1107 }
1108 // We finally get the hash.
1109 return hash;
1110 }
1111 }
1112
1113 // Deprecated -- use FastHashCode() instead.
1114
1115 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1116 return FastHashCode(Thread::current(), obj());
1117 }
1118
1119
1120 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1121 Handle h_obj) {
1122 if (UseBiasedLocking) {
1123 BiasedLocking::revoke(h_obj, thread);
1124 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1125 }
1126
1127 assert(thread == JavaThread::current(), "Can only be called on current thread");
1128 oop obj = h_obj();
1129
1130 while (true) {
1131 markWord mark = read_stable_mark(obj);
1132
1133 // Uncontended case, header points to stack
1134 if (mark.has_locker()) {
1135 return thread->is_lock_owned((address)mark.locker());
1136 }
1137 // Contended case, header points to ObjectMonitor (tagged pointer)
1138 if (mark.has_monitor()) {
1139 ObjectMonitorHandle omh;
1140 if (!omh.save_om_ptr(obj, mark)) {
1141 // Lost a race with async deflation so try again.
1142 assert(AsyncDeflateIdleMonitors, "sanity check");
1143 continue;
1144 }
1145 bool ret_code = omh.om_ptr()->is_entered(thread) != 0;
1146 return ret_code;
1147 }
1148 // Unlocked case, header in place
1149 assert(mark.is_neutral(), "sanity check");
1150 return false;
1151 }
1152 }
1153
1154 // Be aware of this method could revoke bias of the lock object.
1155 // This method queries the ownership of the lock handle specified by 'h_obj'.
1156 // If the current thread owns the lock, it returns owner_self. If no
1157 // thread owns the lock, it returns owner_none. Otherwise, it will return
1158 // owner_other.
1159 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1160 (JavaThread *self, Handle h_obj) {
1161 // The caller must beware this method can revoke bias, and
1162 // revocation can result in a safepoint.
1163 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
1164 assert(self->thread_state() != _thread_blocked, "invariant");
1165
1166 // Possible mark states: neutral, biased, stack-locked, inflated
1167
1168 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
1169 // CASE: biased
1170 BiasedLocking::revoke(h_obj, self);
1171 assert(!h_obj->mark().has_bias_pattern(),
1172 "biases should be revoked by now");
1173 }
1174
1175 assert(self == JavaThread::current(), "Can only be called on current thread");
1176 oop obj = h_obj();
1177
1178 while (true) {
1179 markWord mark = read_stable_mark(obj);
1180
1181 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
1182 if (mark.has_locker()) {
1183 return self->is_lock_owned((address)mark.locker()) ?
1184 owner_self : owner_other;
1185 }
1186
1187 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
1188 // The Object:ObjectMonitor relationship is stable as long as we're
1189 // not at a safepoint and AsyncDeflateIdleMonitors is false.
1190 if (mark.has_monitor()) {
1191 ObjectMonitorHandle omh;
1192 if (!omh.save_om_ptr(obj, mark)) {
1193 // Lost a race with async deflation so try again.
1194 assert(AsyncDeflateIdleMonitors, "sanity check");
1195 continue;
1196 }
1197 ObjectMonitor* monitor = omh.om_ptr();
1198 void* owner = monitor->_owner;
1199 if (owner == NULL) return owner_none;
1200 return (owner == self ||
1201 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1202 }
1203
1204 // CASE: neutral
1205 assert(mark.is_neutral(), "sanity check");
1206 return owner_none; // it's unlocked
1207 }
1208 }
1209
1210 // FIXME: jvmti should call this
1211 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1212 if (UseBiasedLocking) {
1213 if (SafepointSynchronize::is_at_safepoint()) {
1214 BiasedLocking::revoke_at_safepoint(h_obj);
1215 } else {
1216 BiasedLocking::revoke(h_obj, JavaThread::current());
1217 }
1218 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1219 }
1220
1221 oop obj = h_obj();
1222
1223 while (true) {
1224 address owner = NULL;
1225 markWord mark = read_stable_mark(obj);
1226
1227 // Uncontended case, header points to stack
1228 if (mark.has_locker()) {
1229 owner = (address) mark.locker();
1230 }
1231
1232 // Contended case, header points to ObjectMonitor (tagged pointer)
1233 else if (mark.has_monitor()) {
1234 ObjectMonitorHandle omh;
1235 if (!omh.save_om_ptr(obj, mark)) {
1236 // Lost a race with async deflation so try again.
1237 assert(AsyncDeflateIdleMonitors, "sanity check");
1238 continue;
1239 }
1240 ObjectMonitor* monitor = omh.om_ptr();
1241 assert(monitor != NULL, "monitor should be non-null");
1242 owner = (address) monitor->owner();
1243 }
1244
1245 if (owner != NULL) {
1246 // owning_thread_from_monitor_owner() may also return NULL here
1247 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1248 }
1249
1250 // Unlocked case, header in place
1251 // Cannot have assertion since this object may have been
1252 // locked by another thread when reaching here.
1253 // assert(mark.is_neutral(), "sanity check");
1254
1255 return NULL;
1256 }
1257 }
1258
1259 // Visitors ...
1260
1261 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1262 PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1263 while (block != NULL) {
1264 assert(block->object() == CHAINMARKER, "must be a block header");
1265 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1266 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1267 ObjectMonitorHandle omh;
1268 if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) {
1269 // The ObjectMonitor* is not free and it has been made safe.
1270 if (mid->object() == NULL) {
1271 // Only process with closure if the object is set.
1272 continue;
1273 }
1274 closure->do_monitor(mid);
1275 }
1276 }
1277 // unmarked_next() is not needed with g_block_list (no locking
1278 // used with block linkage _next_om fields).
1279 block = (PaddedObjectMonitor*)block->next_om();
1280 }
1281 }
1282
1283 static bool monitors_used_above_threshold() {
1284 int population = Atomic::load(&om_list_globals._population);
1285 if (population == 0) {
1286 return false;
1287 }
1288 if (MonitorUsedDeflationThreshold > 0) {
1289 int monitors_used = population - Atomic::load(&om_list_globals._free_count);
1290 if (HandshakeAfterDeflateIdleMonitors) {
1291 monitors_used -= Atomic::load(&om_list_globals._wait_count);
1292 }
1293 int monitor_usage = (monitors_used * 100LL) / population;
1294 return monitor_usage > MonitorUsedDeflationThreshold;
1295 }
1296 return false;
1297 }
1298
1299 // Returns true if MonitorBound is set (> 0) and if the specified
1300 // cnt is > MonitorBound. Otherwise returns false.
1301 static bool is_MonitorBound_exceeded(const int cnt) {
1302 const int mx = MonitorBound;
1303 return mx > 0 && cnt > mx;
1304 }
1305
1306 bool ObjectSynchronizer::is_async_deflation_needed() {
1307 if (!AsyncDeflateIdleMonitors) {
1308 return false;
1309 }
1310 if (is_async_deflation_requested()) {
1311 // Async deflation request.
1312 return true;
1313 }
1314 if (AsyncDeflationInterval > 0 &&
1315 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1316 monitors_used_above_threshold()) {
1317 // It's been longer than our specified deflate interval and there
1318 // are too many monitors in use. We don't deflate more frequently
1319 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1320 // in order to not swamp the ServiceThread.
1321 _last_async_deflation_time_ns = os::javaTimeNanos();
1322 return true;
1323 }
1324 int monitors_used = Atomic::load(&om_list_globals._population) -
1325 Atomic::load(&om_list_globals._free_count);
1326 if (HandshakeAfterDeflateIdleMonitors) {
1327 monitors_used -= Atomic::load(&om_list_globals._wait_count);
1328 }
1329 if (is_MonitorBound_exceeded(monitors_used)) {
1330 // Not enough ObjectMonitors on the global free list.
1331 return true;
1332 }
1333 return false;
1334 }
1335
1336 bool ObjectSynchronizer::needs_monitor_scavenge() {
1337 if (Atomic::load(&_forceMonitorScavenge) == 1) {
1338 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1339 return true;
1340 }
1341 return false;
1342 }
1343
1344 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1345 if (!AsyncDeflateIdleMonitors) {
1346 if (monitors_used_above_threshold()) {
1347 // Too many monitors in use.
1348 return true;
1349 }
1350 return needs_monitor_scavenge();
1351 }
1352 if (is_special_deflation_requested()) {
1353 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1354 // if there is a special deflation request.
1355 return true;
1356 }
1357 return false;
1358 }
1359
1360 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1361 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1362 }
1363
1364 void ObjectSynchronizer::oops_do(OopClosure* f) {
1365 // We only scan the global used list here (for moribund threads), and
1366 // the thread-local monitors in Thread::oops_do().
1367 global_used_oops_do(f);
1368 }
1369
1370 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1371 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1372 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
1373 }
1374
1375 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1376 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1377 list_oops_do(thread->om_in_use_list, f);
1378 }
1379
1380 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1381 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1382 // The oops_do() phase does not overlap with monitor deflation
1383 // so no need to lock ObjectMonitors for the list traversal and
1384 // no need to update the ObjectMonitor's ref_count for this
1385 // ObjectMonitor* use.
1386 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1387 if (mid->object() != NULL) {
1388 f->do_oop((oop*)mid->object_addr());
1389 }
1390 }
1391 }
1392
1393
1394 // -----------------------------------------------------------------------------
1395 // ObjectMonitor Lifecycle
1396 // -----------------------
1397 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread
1398 // free list and associates them with objects. Deflation -- which occurs at
1399 // STW-time or asynchronously -- disassociates idle monitors from objects.
1400 // Such scavenged monitors are returned to the om_list_globals._free_list.
1401 //
1402 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1403 //
1404 // Lifecycle:
1405 // -- unassigned and on the om_list_globals._free_list
1406 // -- unassigned and on a per-thread free list
1407 // -- assigned to an object. The object is inflated and the mark refers
1408 // to the ObjectMonitor.
1409
1410
1411 // Constraining monitor pool growth via MonitorBound ...
1412 //
1413 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1414 //
1415 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors):
1416 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
1417 // the rate of scavenging is driven primarily by GC. As such, we can find
1418 // an inordinate number of monitors in circulation.
1419 // To avoid that scenario we can artificially induce a STW safepoint
1420 // if the pool appears to be growing past some reasonable bound.
1421 // Generally we favor time in space-time tradeoffs, but as there's no
1422 // natural back-pressure on the # of extant monitors we need to impose some
1423 // type of limit. Beware that if MonitorBound is set to too low a value
1424 // we could just loop. In addition, if MonitorBound is set to a low value
1425 // we'll incur more safepoints, which are harmful to performance.
1426 // See also: GuaranteedSafepointInterval
1427 //
1428 // When safepoint deflation is being used and MonitorBound is set, the
1429 // boundry applies to
1430 // (om_list_globals._population - om_list_globals._free_count)
1431 // i.e., if there are not enough ObjectMonitors on the global free list,
1432 // then a safepoint deflation is induced. Picking a good MonitorBound value
1433 // is non-trivial.
1434 //
1435 // When async deflation is being used:
1436 // The monitor pool is still grow-only. Async deflation is requested
1437 // by a safepoint's cleanup phase or by the ServiceThread at periodic
1438 // intervals when is_async_deflation_needed() returns true. In
1439 // addition to other policies that are checked, if there are not
1440 // enough ObjectMonitors on the global free list, then
1441 // is_async_deflation_needed() will return true. The ServiceThread
1442 // calls deflate_global_idle_monitors_using_JT() and also calls
1443 // deflate_per_thread_idle_monitors_using_JT() as needed.
1444
1445 static void InduceScavenge(Thread* self, const char * Whence) {
1446 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation");
1447
1448 // Induce STW safepoint to trim monitors
1449 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1450 // More precisely, trigger a cleanup safepoint as the number
1451 // of active monitors passes the specified threshold.
1452 // TODO: assert thread state is reasonable
1453
1454 if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
1455 VMThread::check_for_forced_cleanup();
1456 }
1457 }
1458
1459 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1460 // A large MAXPRIVATE value reduces both list lock contention
1461 // and list coherency traffic, but also tends to increase the
1462 // number of ObjectMonitors in circulation as well as the STW
1463 // scavenge costs. As usual, we lean toward time in space-time
1464 // tradeoffs.
1465 const int MAXPRIVATE = 1024;
1466 NoSafepointVerifier nsv;
1467
1468 stringStream ss;
1469 for (;;) {
1470 ObjectMonitor* m;
1471
1472 // 1: try to allocate from the thread's local om_free_list.
1473 // Threads will attempt to allocate first from their local list, then
1474 // from the global list, and only after those attempts fail will the
1475 // thread attempt to instantiate new monitors. Thread-local free lists
1476 // improve allocation latency, as well as reducing coherency traffic
1477 // on the shared global list.
1478 m = take_from_start_of_om_free_list(self);
1479 if (m != NULL) {
1480 guarantee(m->object() == NULL, "invariant");
1481 m->set_allocation_state(ObjectMonitor::New);
1482 prepend_to_om_in_use_list(self, m);
1483 return m;
1484 }
1485
1486 // 2: try to allocate from the global om_list_globals._free_list
1487 // If we're using thread-local free lists then try
1488 // to reprovision the caller's free list.
1489 if (Atomic::load(&om_list_globals._free_list) != NULL) {
1490 // Reprovision the thread's om_free_list.
1491 // Use bulk transfers to reduce the allocation rate and heat
1492 // on various locks.
1493 for (int i = self->om_free_provision; --i >= 0;) {
1494 ObjectMonitor* take = take_from_start_of_global_free_list();
1495 if (take == NULL) {
1496 break; // No more are available.
1497 }
1498 guarantee(take->object() == NULL, "invariant");
1499 if (AsyncDeflateIdleMonitors) {
1500 // We allowed 3 field values to linger during async deflation.
1501 // We clear header and restore ref_count here, but we leave
1502 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
1503 // enter optimization can no longer race with async deflation
1504 // and reuse.
1505 take->set_header(markWord::zero());
1506 if (take->ref_count() < 0) {
1507 // Add back max_jint to restore the ref_count field to its
1508 // proper value.
1509 Atomic::add(&take->_ref_count, max_jint);
1510
1511 #ifdef ASSERT
1512 jint l_ref_count = take->ref_count();
1513 #endif
1514 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
1515 l_ref_count, take->ref_count());
1516 }
1517 }
1518 take->Recycle();
1519 // Since we're taking from the global free-list, take must be Free.
1520 // om_release() also sets the allocation state to Free because it
1521 // is called from other code paths.
1522 assert(take->is_free(), "invariant");
1523 om_release(self, take, false);
1524 }
1525 self->om_free_provision += 1 + (self->om_free_provision / 2);
1526 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1527
1528 if (!AsyncDeflateIdleMonitors &&
1529 is_MonitorBound_exceeded(Atomic::load(&om_list_globals._population) -
1530 Atomic::load(&om_list_globals._free_count))) {
1531 // Not enough ObjectMonitors on the global free list.
1532 // We can't safely induce a STW safepoint from om_alloc() as our thread
1533 // state may not be appropriate for such activities and callers may hold
1534 // naked oops, so instead we defer the action.
1535 InduceScavenge(self, "om_alloc");
1536 }
1537 continue;
1538 }
1539
1540 // 3: allocate a block of new ObjectMonitors
1541 // Both the local and global free lists are empty -- resort to malloc().
1542 // In the current implementation ObjectMonitors are TSM - immortal.
1543 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1544 // each ObjectMonitor to start at the beginning of a cache line,
1545 // so we use align_up().
1546 // A better solution would be to use C++ placement-new.
1547 // BEWARE: As it stands currently, we don't run the ctors!
1548 assert(_BLOCKSIZE > 1, "invariant");
1549 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1550 PaddedObjectMonitor* temp;
1551 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
1552 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1553 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1554 (void)memset((void *) temp, 0, neededsize);
1555
1556 // Format the block.
1557 // initialize the linked list, each monitor points to its next
1558 // forming the single linked free list, the very first monitor
1559 // will points to next block, which forms the block list.
1560 // The trick of using the 1st element in the block as g_block_list
1561 // linkage should be reconsidered. A better implementation would
1562 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1563
1564 for (int i = 1; i < _BLOCKSIZE; i++) {
1565 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]);
1566 assert(temp[i].is_free(), "invariant");
1567 }
1568
1569 // terminate the last monitor as the end of list
1570 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL);
1571
1572 // Element [0] is reserved for global list linkage
1573 temp[0].set_object(CHAINMARKER);
1574
1575 // Consider carving out this thread's current request from the
1576 // block in hand. This avoids some lock traffic and redundant
1577 // list activity.
1578
1579 prepend_block_to_lists(temp);
1580 }
1581 }
1582
1583 // Place "m" on the caller's private per-thread om_free_list.
1584 // In practice there's no need to clamp or limit the number of
1585 // monitors on a thread's om_free_list as the only non-allocation time
1586 // we'll call om_release() is to return a monitor to the free list after
1587 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1588 // accumulate on a thread's free list.
1589 //
1590 // Key constraint: all ObjectMonitors on a thread's free list and the global
1591 // free list must have their object field set to null. This prevents the
1592 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT()
1593 // -- from reclaiming them while we are trying to release them.
1594
1595 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1596 bool from_per_thread_alloc) {
1597 guarantee(m->header().value() == 0, "invariant");
1598 guarantee(m->object() == NULL, "invariant");
1599 NoSafepointVerifier nsv;
1600
1601 stringStream ss;
1602 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1603 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1604 m->_recursions);
1605 m->set_allocation_state(ObjectMonitor::Free);
1606 // _next_om is used for both per-thread in-use and free lists so
1607 // we have to remove 'm' from the in-use list first (as needed).
1608 if (from_per_thread_alloc) {
1609 // Need to remove 'm' from om_in_use_list.
1610 ObjectMonitor* mid = NULL;
1611 ObjectMonitor* next = NULL;
1612
1613 // This list walk can race with another list walker or with async
1614 // deflation so we have to worry about an ObjectMonitor being
1615 // removed from this list while we are walking it.
1616
1617 // Lock the list head to avoid racing with another list walker
1618 // or with async deflation.
1619 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1620 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1621 }
1622 next = unmarked_next(mid);
1623 if (m == mid) {
1624 // First special case:
1625 // 'm' matches mid, is the list head and is locked. Switch the list
1626 // head to next which unlocks the list head, but leaves the extracted
1627 // mid locked:
1628 Atomic::store(&self->om_in_use_list, next);
1629 } else if (m == next) {
1630 // Second special case:
1631 // 'm' matches next after the list head and we already have the list
1632 // head locked so set mid to what we are extracting:
1633 mid = next;
1634 // Lock mid to prevent races with a list walker or an async
1635 // deflater thread that's ahead of us. The locked list head
1636 // prevents races from behind us.
1637 om_lock(mid);
1638 // Update next to what follows mid (if anything):
1639 next = unmarked_next(mid);
1640 // Switch next after the list head to new next which unlocks the
1641 // list head, but leaves the extracted mid locked:
1642 self->om_in_use_list->set_next_om(next);
1643 } else {
1644 // We have to search the list to find 'm'.
1645 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT
1646 " is too short.", p2i(self), p2i(self->om_in_use_list));
1647 // Our starting anchor is next after the list head which is the
1648 // last ObjectMonitor we checked:
1649 ObjectMonitor* anchor = next;
1650 // Lock anchor to prevent races with a list walker or an async
1651 // deflater thread that's ahead of us. The locked list head
1652 // prevents races from behind us.
1653 om_lock(anchor);
1654 om_unlock(mid); // Unlock the list head now that anchor is locked.
1655 while ((mid = unmarked_next(anchor)) != NULL) {
1656 if (m == mid) {
1657 // We found 'm' on the per-thread in-use list so extract it.
1658 // Update next to what follows mid (if anything):
1659 next = unmarked_next(mid);
1660 // Switch next after the anchor to new next which unlocks the
1661 // anchor, but leaves the extracted mid locked:
1662 anchor->set_next_om(next);
1663 break;
1664 } else {
1665 // Lock the next anchor to prevent races with a list walker
1666 // or an async deflater thread that's ahead of us. The locked
1667 // current anchor prevents races from behind us.
1668 om_lock(mid);
1669 // Unlock current anchor now that next anchor is locked:
1670 om_unlock(anchor);
1671 anchor = mid; // Advance to new anchor and try again.
1672 }
1673 }
1674 }
1675
1676 if (mid == NULL) {
1677 // Reached end of the list and didn't find 'm' so:
1678 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list="
1679 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list));
1680 }
1681
1682 // At this point mid is disconnected from the in-use list so
1683 // its lock no longer has any effects on the in-use list.
1684 Atomic::dec(&self->om_in_use_count);
1685 // Unlock mid, but leave the next value for any lagging list
1686 // walkers. It will get cleaned up when mid is prepended to
1687 // the thread's free list:
1688 om_unlock(mid);
1689 }
1690
1691 prepend_to_om_free_list(self, m);
1692 guarantee(m->is_free(), "invariant");
1693 }
1694
1695 // Return ObjectMonitors on a moribund thread's free and in-use
1696 // lists to the appropriate global lists. The ObjectMonitors on the
1697 // per-thread in-use list may still be in use by other threads.
1698 //
1699 // We currently call om_flush() from Threads::remove() before the
1700 // thread has been excised from the thread list and is no longer a
1701 // mutator. This means that om_flush() cannot run concurrently with
1702 // a safepoint and interleave with deflate_idle_monitors(). In
1703 // particular, this ensures that the thread's in-use monitors are
1704 // scanned by a GC safepoint, either via Thread::oops_do() (before
1705 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1706 // om_flush() is called).
1707 //
1708 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
1709 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
1710 // run at the same time as om_flush() so we have to follow a careful
1711 // protocol to prevent list corruption.
1712
1713 void ObjectSynchronizer::om_flush(Thread* self) {
1714 // Process the per-thread in-use list first to be consistent.
1715 int in_use_count = 0;
1716 ObjectMonitor* in_use_list = NULL;
1717 ObjectMonitor* in_use_tail = NULL;
1718 NoSafepointVerifier nsv;
1719
1720 // This function can race with a list walker or with an async
1721 // deflater thread so we lock the list head to prevent confusion.
1722 // An async deflater thread checks to see if the target thread
1723 // is exiting, but if it has made it past that check before we
1724 // started exiting, then it is racing to get to the in-use list.
1725 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
1726 // At this point, we have locked the in-use list head so a racing
1727 // thread cannot come in after us. However, a racing thread could
1728 // be ahead of us; we'll detect that and delay to let it finish.
1729 //
1730 // The thread is going away, however the ObjectMonitors on the
1731 // om_in_use_list may still be in-use by other threads. Link
1732 // them to in_use_tail, which will be linked into the global
1733 // in-use list (om_list_globals._in_use_list) below.
1734 //
1735 // Account for the in-use list head before the loop since it is
1736 // already locked (by this thread):
1737 in_use_tail = in_use_list;
1738 in_use_count++;
1739 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
1740 if (is_locked(cur_om)) {
1741 // cur_om is locked so there must be a racing walker or async
1742 // deflater thread ahead of us so we'll give it a chance to finish.
1743 while (is_locked(cur_om)) {
1744 os::naked_short_sleep(1);
1745 }
1746 // Refetch the possibly changed next field and try again.
1747 cur_om = unmarked_next(in_use_tail);
1748 continue;
1749 }
1750 if (cur_om->is_free()) {
1751 // cur_om was deflated and the allocation state was changed
1752 // to Free while it was locked. We happened to see it just
1753 // after it was unlocked (and added to the free list).
1754 // Refetch the possibly changed next field and try again.
1755 cur_om = unmarked_next(in_use_tail);
1756 continue;
1757 }
1758 in_use_tail = cur_om;
1759 in_use_count++;
1760 cur_om = unmarked_next(cur_om);
1761 }
1762 guarantee(in_use_tail != NULL, "invariant");
1763 int l_om_in_use_count = Atomic::load(&self->om_in_use_count);
1764 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: "
1765 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
1766 Atomic::store(&self->om_in_use_count, 0);
1767 // Clear the in-use list head (which also unlocks it):
1768 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1769 om_unlock(in_use_list);
1770 }
1771
1772 int free_count = 0;
1773 ObjectMonitor* free_list = NULL;
1774 ObjectMonitor* free_tail = NULL;
1775 // This function can race with a list walker thread so we lock the
1776 // list head to prevent confusion.
1777 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) {
1778 // At this point, we have locked the free list head so a racing
1779 // thread cannot come in after us. However, a racing thread could
1780 // be ahead of us; we'll detect that and delay to let it finish.
1781 //
1782 // The thread is going away. Set 'free_tail' to the last per-thread free
1783 // monitor which will be linked to om_list_globals._free_list below.
1784 //
1785 // Account for the free list head before the loop since it is
1786 // already locked (by this thread):
1787 free_tail = free_list;
1788 free_count++;
1789 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) {
1790 if (is_locked(s)) {
1791 // s is locked so there must be a racing walker thread ahead
1792 // of us so we'll give it a chance to finish.
1793 while (is_locked(s)) {
1794 os::naked_short_sleep(1);
1795 }
1796 }
1797 free_tail = s;
1798 free_count++;
1799 guarantee(s->object() == NULL, "invariant");
1800 stringStream ss;
1801 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1802 }
1803 guarantee(free_tail != NULL, "invariant");
1804 int l_om_free_count = Atomic::load(&self->om_free_count);
1805 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1806 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
1807 Atomic::store(&self->om_free_count, 0);
1808 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1809 om_unlock(free_list);
1810 }
1811
1812 if (free_tail != NULL) {
1813 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1814 }
1815
1816 if (in_use_tail != NULL) {
1817 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1818 }
1819
1820 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1821 LogStreamHandle(Info, monitorinflation) lsh_info;
1822 LogStream* ls = NULL;
1823 if (log_is_enabled(Debug, monitorinflation)) {
1824 ls = &lsh_debug;
1825 } else if ((free_count != 0 || in_use_count != 0) &&
1828 }
1829 if (ls != NULL) {
1830 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1831 ", in_use_count=%d" ", om_free_provision=%d",
1832 p2i(self), free_count, in_use_count, self->om_free_provision);
1833 }
1834 }
1835
1836 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1837 const oop obj,
1838 ObjectSynchronizer::InflateCause cause) {
1839 assert(event != NULL, "invariant");
1840 assert(event->should_commit(), "invariant");
1841 event->set_monitorClass(obj->klass());
1842 event->set_address((uintptr_t)(void*)obj);
1843 event->set_cause((u1)cause);
1844 event->commit();
1845 }
1846
1847 // Fast path code shared by multiple functions
1848 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) {
1849 while (true) {
1850 markWord mark = obj->mark();
1851 if (mark.has_monitor()) {
1852 if (!omh_p->save_om_ptr(obj, mark)) {
1853 // Lost a race with async deflation so try again.
1854 assert(AsyncDeflateIdleMonitors, "sanity check");
1855 continue;
1856 }
1857 ObjectMonitor* monitor = omh_p->om_ptr();
1858 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid");
1859 markWord dmw = monitor->header();
1860 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1861 return;
1862 }
1863 inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal);
1864 return;
1865 }
1866 }
1867
1868 void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self,
1869 oop object, const InflateCause cause) {
1870 // Inflate mutates the heap ...
1871 // Relaxing assertion for bug 6320749.
1872 assert(Universe::verify_in_progress() ||
1873 !SafepointSynchronize::is_at_safepoint(), "invariant");
1874
1875 EventJavaMonitorInflate event;
1876
1877 for (;;) {
1878 const markWord mark = object->mark();
1879 assert(!mark.has_bias_pattern(), "invariant");
1880
1881 // The mark can be in one of the following states:
1882 // * Inflated - just return
1883 // * Stack-locked - coerce it to inflated
1884 // * INFLATING - busy wait for conversion to complete
1885 // * Neutral - aggressively inflate the object.
1886 // * BIASED - Illegal. We should never see this
1887
1888 // CASE: inflated
1889 if (mark.has_monitor()) {
1890 if (!omh_p->save_om_ptr(object, mark)) {
1891 // Lost a race with async deflation so try again.
1892 assert(AsyncDeflateIdleMonitors, "sanity check");
1893 continue;
1894 }
1895 ObjectMonitor* inf = omh_p->om_ptr();
1896 markWord dmw = inf->header();
1897 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1898 assert(inf->object() == object, "invariant");
1899 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1900 return;
1901 }
1902
1903 // CASE: inflation in progress - inflating over a stack-lock.
1904 // Some other thread is converting from stack-locked to inflated.
1905 // Only that thread can complete inflation -- other threads must wait.
1906 // The INFLATING value is transient.
1907 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1908 // We could always eliminate polling by parking the thread on some auxiliary list.
1909 if (mark == markWord::INFLATING()) {
1910 read_stable_mark(object);
1911 continue;
1912 }
1913
1914 // CASE: stack-locked
1915 // Could be stack-locked either by this thread or by some other thread.
1916 //
1917 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1918 // to install INFLATING into the mark word. We originally installed INFLATING,
1919 // allocated the objectmonitor, and then finally STed the address of the
1920 // objectmonitor into the mark. This was correct, but artificially lengthened
1926 // critical INFLATING...ST interval. A thread can transfer
1927 // multiple objectmonitors en-mass from the global free list to its local free list.
1928 // This reduces coherency traffic and lock contention on the global free list.
1929 // Using such local free lists, it doesn't matter if the om_alloc() call appears
1930 // before or after the CAS(INFLATING) operation.
1931 // See the comments in om_alloc().
1932
1933 LogStreamHandle(Trace, monitorinflation) lsh;
1934
1935 if (mark.has_locker()) {
1936 ObjectMonitor* m = om_alloc(self);
1937 // Optimistically prepare the objectmonitor - anticipate successful CAS
1938 // We do this before the CAS in order to minimize the length of time
1939 // in which INFLATING appears in the mark.
1940 m->Recycle();
1941 m->_Responsible = NULL;
1942 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1943
1944 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1945 if (cmp != mark) {
1946 // om_release() will reset the allocation state from New to Free.
1947 om_release(self, m, true);
1948 continue; // Interference -- just retry
1949 }
1950
1951 // We've successfully installed INFLATING (0) into the mark-word.
1952 // This is the only case where 0 will appear in a mark-word.
1953 // Only the singular thread that successfully swings the mark-word
1954 // to 0 can perform (or more precisely, complete) inflation.
1955 //
1956 // Why do we CAS a 0 into the mark-word instead of just CASing the
1957 // mark-word from the stack-locked value directly to the new inflated state?
1958 // Consider what happens when a thread unlocks a stack-locked object.
1959 // It attempts to use CAS to swing the displaced header value from the
1960 // on-stack BasicLock back into the object header. Recall also that the
1961 // header value (hash code, etc) can reside in (a) the object header, or
1962 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1963 // header in an ObjectMonitor. The inflate() routine must copy the header
1964 // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1965 // the while preserving the hashCode stability invariants. If the owner
1966 // decides to release the lock while the value is 0, the unlock will fail
1967 // and control will eventually pass from slow_exit() to inflate. The owner
1968 // will then spin, waiting for the 0 value to disappear. Put another way,
1969 // the 0 causes the owner to stall if the owner happens to try to
1970 // drop the lock (restoring the header from the BasicLock to the object)
1971 // while inflation is in-progress. This protocol avoids races that might
1972 // would otherwise permit hashCode values to change or "flicker" for an object.
1973 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1974 // 0 serves as a "BUSY" inflate-in-progress indicator.
1975
1976
1977 // fetch the displaced mark from the owner's stack.
1978 // The owner can't die or unwind past the lock while our INFLATING
1979 // object is in the mark. Furthermore the owner can't complete
1980 // an unlock on the object, either.
1981 markWord dmw = mark.displaced_mark_helper();
1982 // Catch if the object's header is not neutral (not locked and
1983 // not marked is what we care about here).
1984 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1985
1986 // Setup monitor fields to proper values -- prepare the monitor
1987 m->set_header(dmw);
1988
1989 // Optimization: if the mark.locker stack address is associated
1990 // with this thread we could simply set m->_owner = self.
1991 // Note that a thread can inflate an object
1992 // that it has stack-locked -- as might happen in wait() -- directly
1993 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1994 if (AsyncDeflateIdleMonitors) {
1995 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker());
1996 } else {
1997 m->set_owner_from(NULL, mark.locker());
1998 }
1999 m->set_object(object);
2000 // TODO-FIXME: assert BasicLock->dhw != 0.
2001
2002 omh_p->set_om_ptr(m);
2003
2004 // Must preserve store ordering. The monitor state must
2005 // be stable at the time of publishing the monitor address.
2006 guarantee(object->mark() == markWord::INFLATING(), "invariant");
2007 object->release_set_mark(markWord::encode(m));
2008
2009 // Once ObjectMonitor is configured and the object is associated
2010 // with the ObjectMonitor, it is safe to allow async deflation:
2011 assert(m->is_new(), "freshly allocated monitor must be new");
2012 m->set_allocation_state(ObjectMonitor::Old);
2013
2014 // Hopefully the performance counters are allocated on distinct cache lines
2015 // to avoid false sharing on MP systems ...
2016 OM_PERFDATA_OP(Inflations, inc());
2017 if (log_is_enabled(Trace, monitorinflation)) {
2018 ResourceMark rm(self);
2019 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
2020 INTPTR_FORMAT ", type='%s'", p2i(object),
2021 object->mark().value(), object->klass()->external_name());
2022 }
2023 if (event.should_commit()) {
2024 post_monitor_inflate_event(&event, object, cause);
2025 }
2026 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2027 return;
2028 }
2029
2030 // CASE: neutral
2031 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
2032 // If we know we're inflating for entry it's better to inflate by swinging a
2033 // pre-locked ObjectMonitor pointer into the object header. A successful
2034 // CAS inflates the object *and* confers ownership to the inflating thread.
2035 // In the current implementation we use a 2-step mechanism where we CAS()
2036 // to inflate and then CAS() again to try to swing _owner from NULL to self.
2037 // An inflateTry() method that we could call from enter() would be useful.
2038
2039 // Catch if the object's header is not neutral (not locked and
2040 // not marked is what we care about here).
2041 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
2042 ObjectMonitor* m = om_alloc(self);
2043 // prepare m for installation - set monitor to initial state
2044 m->Recycle();
2045 m->set_header(mark);
2046 // If we leave _owner == DEFLATER_MARKER here, then the simple C2
2047 // ObjectMonitor enter optimization can no longer race with async
2048 // deflation and reuse.
2049 m->set_object(object);
2050 m->_Responsible = NULL;
2051 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
2052
2053 omh_p->set_om_ptr(m);
2054
2055 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2056 m->set_header(markWord::zero());
2057 m->set_object(NULL);
2058 m->Recycle();
2059 omh_p->set_om_ptr(NULL);
2060 // om_release() will reset the allocation state from New to Free.
2061 om_release(self, m, true);
2062 m = NULL;
2063 continue;
2064 // interference - the markword changed - just retry.
2065 // The state-transitions are one-way, so there's no chance of
2066 // live-lock -- "Inflated" is an absorbing state.
2067 }
2068
2069 // Once the ObjectMonitor is configured and object is associated
2070 // with the ObjectMonitor, it is safe to allow async deflation:
2071 assert(m->is_new(), "freshly allocated monitor must be new");
2072 m->set_allocation_state(ObjectMonitor::Old);
2073
2074 // Hopefully the performance counters are allocated on distinct
2075 // cache lines to avoid false sharing on MP systems ...
2076 OM_PERFDATA_OP(Inflations, inc());
2077 if (log_is_enabled(Trace, monitorinflation)) {
2078 ResourceMark rm(self);
2079 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
2080 INTPTR_FORMAT ", type='%s'", p2i(object),
2081 object->mark().value(), object->klass()->external_name());
2082 }
2083 if (event.should_commit()) {
2084 post_monitor_inflate_event(&event, object, cause);
2085 }
2086 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2087 return;
2088 }
2089 }
2090
2091
2092 // We maintain a list of in-use monitors for each thread.
2093 //
2094 // For safepoint based deflation:
2095 // deflate_thread_local_monitors() scans a single thread's in-use list, while
2096 // deflate_idle_monitors() scans only a global list of in-use monitors which
2097 // is populated only as a thread dies (see om_flush()).
2098 //
2099 // These operations are called at all safepoints, immediately after mutators
2100 // are stopped, but before any objects have moved. Collectively they traverse
2101 // the population of in-use monitors, deflating where possible. The scavenged
2102 // monitors are returned to the global monitor free list.
2103 //
2104 // Beware that we scavenge at *every* stop-the-world point. Having a large
2105 // number of monitors in-use could negatively impact performance. We also want
2106 // to minimize the total # of monitors in circulation, as they incur a small
2107 // footprint penalty.
2108 //
2109 // Perversely, the heap size -- and thus the STW safepoint rate --
2110 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
2111 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
2112 // This is an unfortunate aspect of this design.
2113 //
2114 // For async deflation:
2115 // If a special deflation request is made, then the safepoint based
2116 // deflation mechanism is used. Otherwise, an async deflation request
2117 // is registered with the ServiceThread and it is notified.
2118
2119 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
2120 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2121
2122 // The per-thread in-use lists are handled in
2123 // ParallelSPCleanupThreadClosure::do_thread().
2124
2125 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
2126 // Use the older mechanism for the global in-use list or if a
2127 // special deflation has been requested before the safepoint.
2128 ObjectSynchronizer::deflate_idle_monitors(counters);
2129 return;
2130 }
2131
2132 log_debug(monitorinflation)("requesting async deflation of idle monitors.");
2133 // Request deflation of idle monitors by the ServiceThread:
2134 set_is_async_deflation_requested(true);
2135 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
2136 ml.notify_all();
2137
2138 if (log_is_enabled(Debug, monitorinflation)) {
2139 // exit_globals()'s call to audit_and_print_stats() is done
2140 // at the Info level and not at a safepoint.
2141 // For safepoint based deflation, audit_and_print_stats() is called
2142 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the
2143 // Debug level at a safepoint.
2144 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2145 }
2146 }
2147
2148 // Deflate a single monitor if not in-use
2149 // Return true if deflated, false if in-use
2150 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
2151 ObjectMonitor** free_head_p,
2152 ObjectMonitor** free_tail_p) {
2153 bool deflated;
2154 // Normal case ... The monitor is associated with obj.
2155 const markWord mark = obj->mark();
2156 guarantee(mark == markWord::encode(mid), "should match: mark="
2157 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
2158 markWord::encode(mid).value());
2159 // Make sure that mark.monitor() and markWord::encode() agree:
2160 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
2161 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
2162 const markWord dmw = mid->header();
2163 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2164
2165 if (mid->is_busy() || mid->ref_count() != 0) {
2166 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2167 // is in use so no deflation.
2168 deflated = false;
2169 } else {
2170 // Deflate the monitor if it is no longer being used
2171 // It's idle - scavenge and return to the global free list
2172 // plain old deflation ...
2173 if (log_is_enabled(Trace, monitorinflation)) {
2174 ResourceMark rm;
2175 log_trace(monitorinflation)("deflate_monitor: "
2176 "object=" INTPTR_FORMAT ", mark="
2177 INTPTR_FORMAT ", type='%s'", p2i(obj),
2178 mark.value(), obj->klass()->external_name());
2179 }
2180
2181 // Restore the header back to obj
2182 obj->release_set_mark(dmw);
2183 if (AsyncDeflateIdleMonitors) {
2184 // clear() expects the owner field to be NULL and we won't race
2185 // with the simple C2 ObjectMonitor enter optimization since
2186 // we're at a safepoint. DEFLATER_MARKER is the only non-NULL
2187 // value we should see here.
2188 mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2189 }
2190 mid->clear();
2191
2192 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2193 p2i(mid->object()));
2194 assert(mid->is_free(), "invariant");
2195
2196 // Move the deflated ObjectMonitor to the working free list
2197 // defined by free_head_p and free_tail_p.
2198 if (*free_head_p == NULL) *free_head_p = mid;
2199 if (*free_tail_p != NULL) {
2200 // We append to the list so the caller can use mid->_next_om
2201 // to fix the linkages in its context.
2202 ObjectMonitor* prevtail = *free_tail_p;
2203 // Should have been cleaned up by the caller:
2204 // Note: Should not have to lock prevtail here since we're at a
2205 // safepoint and ObjectMonitors on the local free list should
2206 // not be accessed in parallel.
2207 #ifdef ASSERT
2208 ObjectMonitor* l_next_om = prevtail->next_om();
2209 #endif
2210 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2211 prevtail->set_next_om(mid);
2212 }
2213 *free_tail_p = mid;
2214 // At this point, mid->_next_om still refers to its current
2215 // value and another ObjectMonitor's _next_om field still
2216 // refers to this ObjectMonitor. Those linkages have to be
2217 // cleaned up by the caller who has the complete context.
2218 deflated = true;
2219 }
2220 return deflated;
2221 }
2222
2223 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
2224 // Returns true if it was deflated and false otherwise.
2225 //
2226 // The async deflation protocol sets owner to DEFLATER_MARKER and
2227 // makes ref_count negative as signals to contending threads that
2228 // an async deflation is in progress. There are a number of checks
2229 // as part of the protocol to make sure that the calling thread has
2230 // not lost the race to a contending thread or to a thread that just
2231 // wants to use the ObjectMonitor*.
2232 //
2233 // The ObjectMonitor has been successfully async deflated when:
2234 // (owner == DEFLATER_MARKER && ref_count < 0)
2235 // Contending threads or ObjectMonitor* using threads that see those
2236 // values know to retry their operation.
2237 //
2238 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
2239 ObjectMonitor** free_head_p,
2240 ObjectMonitor** free_tail_p) {
2241 assert(AsyncDeflateIdleMonitors, "sanity check");
2242 assert(Thread::current()->is_Java_thread(), "precondition");
2243 // A newly allocated ObjectMonitor should not be seen here so we
2244 // avoid an endless inflate/deflate cycle.
2245 assert(mid->is_old(), "must be old: allocation_state=%d",
2246 (int) mid->allocation_state());
2247
2248 if (mid->is_busy() || mid->ref_count() != 0) {
2249 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2250 // is in use so no deflation.
2251 return false;
2252 }
2253
2254 if (mid->try_set_owner_from(NULL, DEFLATER_MARKER) == NULL) {
2255 // ObjectMonitor is not owned by another thread. Our setting
2256 // owner to DEFLATER_MARKER forces any contending thread through
2257 // the slow path. This is just the first part of the async
2258 // deflation dance.
2259
2260 if (mid->_contentions != 0 || mid->_waiters != 0) {
2261 // Another thread has raced to enter the ObjectMonitor after
2262 // mid->is_busy() above or has already entered and waited on
2263 // it which makes it busy so no deflation. Restore owner to
2264 // NULL if it is still DEFLATER_MARKER.
2265 mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2266 return false;
2267 }
2268
2269 if (Atomic::cmpxchg(&mid->_ref_count, (jint)0, -max_jint) == 0) {
2270 // Make ref_count negative to force any contending threads or
2271 // ObjectMonitor* using threads to retry. This is the second
2272 // part of the async deflation dance.
2273
2274 if (mid->owner_is_DEFLATER_MARKER()) {
2275 // If owner is still DEFLATER_MARKER, then we have successfully
2276 // signaled any contending threads to retry. If it is not, then we
2277 // have lost the race to an entering thread and the ObjectMonitor
2278 // is now busy. This is the third and final part of the async
2279 // deflation dance.
2280 // Note: This owner check solves the ABA problem with ref_count
2281 // where another thread acquired the ObjectMonitor, finished
2282 // using it and restored the ref_count to zero.
2283
2284 // Sanity checks for the races:
2285 guarantee(mid->_contentions == 0, "must be 0: contentions=%d",
2286 mid->_contentions);
2287 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters);
2288 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq="
2289 INTPTR_FORMAT, p2i(mid->_cxq));
2290 guarantee(mid->_EntryList == NULL,
2291 "must be no entering threads: EntryList=" INTPTR_FORMAT,
2292 p2i(mid->_EntryList));
2293
2294 const oop obj = (oop) mid->object();
2295 if (log_is_enabled(Trace, monitorinflation)) {
2296 ResourceMark rm;
2297 log_trace(monitorinflation)("deflate_monitor_using_JT: "
2298 "object=" INTPTR_FORMAT ", mark="
2299 INTPTR_FORMAT ", type='%s'",
2300 p2i(obj), obj->mark().value(),
2301 obj->klass()->external_name());
2302 }
2303
2304 // Install the old mark word if nobody else has already done it.
2305 mid->install_displaced_markword_in_object(obj);
2306 mid->clear_using_JT();
2307
2308 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2309 p2i(mid->object()));
2310 assert(mid->is_free(), "must be free: allocation_state=%d",
2311 (int) mid->allocation_state());
2312
2313 // Move the deflated ObjectMonitor to the working free list
2314 // defined by free_head_p and free_tail_p. No races on this list
2315 // so no need for load_acquire() or store_release().
2316 if (*free_head_p == NULL) {
2317 // First one on the list.
2318 *free_head_p = mid;
2319 }
2320 if (*free_tail_p != NULL) {
2321 // We append to the list so the caller can use mid->_next_om
2322 // to fix the linkages in its context.
2323 ObjectMonitor* prevtail = *free_tail_p;
2324 // Should have been cleaned up by the caller:
2325 om_lock(prevtail);
2326 #ifdef ASSERT
2327 ObjectMonitor* l_next_om = unmarked_next(prevtail);
2328 #endif
2329 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2330 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked)
2331 }
2332 *free_tail_p = mid;
2333
2334 // At this point, mid->_next_om still refers to its current
2335 // value and another ObjectMonitor's _next_om field still
2336 // refers to this ObjectMonitor. Those linkages have to be
2337 // cleaned up by the caller who has the complete context.
2338
2339 // We leave owner == DEFLATER_MARKER and ref_count < 0
2340 // to force any racing threads to retry.
2341 return true; // Success, ObjectMonitor has been deflated.
2342 }
2343
2344 // The owner was changed from DEFLATER_MARKER so we lost the
2345 // race since the ObjectMonitor is now busy.
2346
2347 // Add back max_jint to restore the ref_count field to its
2348 // proper value (which may not be what we saw above):
2349 Atomic::add(&mid->_ref_count, max_jint);
2350
2351 #ifdef ASSERT
2352 jint l_ref_count = mid->ref_count();
2353 #endif
2354 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
2355 l_ref_count, mid->ref_count());
2356 return false;
2357 }
2358
2359 // The ref_count was no longer 0 so we lost the race since the
2360 // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
2361 // Restore owner to NULL if it is still DEFLATER_MARKER:
2362 mid->try_set_owner_from(DEFLATER_MARKER, NULL);
2363 }
2364
2365 // The owner field is no longer NULL so we lost the race since the
2366 // ObjectMonitor is now busy.
2367 return false;
2368 }
2369
2370 // Walk a given monitor list, and deflate idle monitors.
2371 // The given list could be a per-thread list or a global list.
2372 //
2373 // In the case of parallel processing of thread local monitor lists,
2374 // work is done by Threads::parallel_threads_do() which ensures that
2375 // each Java thread is processed by exactly one worker thread, and
2376 // thus avoid conflicts that would arise when worker threads would
2377 // process the same monitor lists concurrently.
2378 //
2379 // See also ParallelSPCleanupTask and
2380 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
2381 // Threads::parallel_java_threads_do() in thread.cpp.
2382 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
2383 int* count_p,
2384 ObjectMonitor** free_head_p,
2385 ObjectMonitor** free_tail_p) {
2386 ObjectMonitor* cur_mid_in_use = NULL;
2387 ObjectMonitor* mid = NULL;
2388 ObjectMonitor* next = NULL;
2389 int deflated_count = 0;
2400 // by unlinking mid from the global or per-thread in-use list.
2401 if (cur_mid_in_use == NULL) {
2402 // mid is the list head so switch the list head to next:
2403 Atomic::store(list_p, next);
2404 } else {
2405 // Switch cur_mid_in_use's next field to next:
2406 cur_mid_in_use->set_next_om(next);
2407 }
2408 // At this point mid is disconnected from the in-use list.
2409 deflated_count++;
2410 Atomic::dec(count_p);
2411 // mid is current tail in the free_head_p list so NULL terminate it:
2412 mid->set_next_om(NULL);
2413 } else {
2414 cur_mid_in_use = mid;
2415 }
2416 }
2417 return deflated_count;
2418 }
2419
2420 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2421 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2422 // list could be a per-thread in-use list or the global in-use list.
2423 // If a safepoint has started, then we save state via saved_mid_in_use_p
2424 // and return to the caller to honor the safepoint.
2425 //
2426 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
2427 int* count_p,
2428 ObjectMonitor** free_head_p,
2429 ObjectMonitor** free_tail_p,
2430 ObjectMonitor** saved_mid_in_use_p) {
2431 assert(AsyncDeflateIdleMonitors, "sanity check");
2432 JavaThread* self = JavaThread::current();
2433
2434 ObjectMonitor* cur_mid_in_use = NULL;
2435 ObjectMonitor* mid = NULL;
2436 ObjectMonitor* next = NULL;
2437 ObjectMonitor* next_next = NULL;
2438 int deflated_count = 0;
2439 NoSafepointVerifier nsv;
2440
2441 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
2442 // protocol because om_release() can do list deletions in parallel;
2443 // this also prevents races with a list walker thread. We also
2444 // lock-next-next-as-we-go to prevent an om_flush() that is behind
2445 // this thread from passing us.
2446 if (*saved_mid_in_use_p == NULL) {
2447 // No saved state so start at the beginning.
2448 // Lock the list head so we can possibly deflate it:
2449 if ((mid = get_list_head_locked(list_p)) == NULL) {
2450 return 0; // The list is empty so nothing to deflate.
2451 }
2452 next = unmarked_next(mid);
2453 } else {
2454 // We're restarting after a safepoint so restore the necessary state
2455 // before we resume.
2456 cur_mid_in_use = *saved_mid_in_use_p;
2457 // Lock cur_mid_in_use so we can possibly update its
2458 // next field to extract a deflated ObjectMonitor.
2459 om_lock(cur_mid_in_use);
2460 mid = unmarked_next(cur_mid_in_use);
2461 if (mid == NULL) {
2462 om_unlock(cur_mid_in_use);
2463 *saved_mid_in_use_p = NULL;
2464 return 0; // The remainder is empty so nothing more to deflate.
2465 }
2466 // Lock mid so we can possibly deflate it:
2467 om_lock(mid);
2468 next = unmarked_next(mid);
2469 }
2470
2471 while (true) {
2472 // The current mid's next field is marked at this point. If we have
2473 // a cur_mid_in_use, then its next field is also marked at this point.
2474
2475 if (next != NULL) {
2476 // We lock next so that an om_flush() thread that is behind us
2477 // cannot pass us when we unlock the current mid.
2478 om_lock(next);
2479 next_next = unmarked_next(next);
2480 }
2481
2482 // Only try to deflate if there is an associated Java object and if
2483 // mid is old (is not newly allocated and is not newly freed).
2484 if (mid->object() != NULL && mid->is_old() &&
2485 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2486 // Deflation succeeded and already updated free_head_p and
2487 // free_tail_p as needed. Finish the move to the local free list
2488 // by unlinking mid from the global or per-thread in-use list.
2489 if (cur_mid_in_use == NULL) {
2490 // mid is the list head and it is locked. Switch the list head
2491 // to next which is also locked (if not NULL) and also leave
2492 // mid locked:
2493 Atomic::store(list_p, next);
2494 } else {
2495 ObjectMonitor* locked_next = mark_om_ptr(next);
2496 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
2497 // next field to locked_next and also leave mid locked:
2498 cur_mid_in_use->set_next_om(locked_next);
2499 }
2500 // At this point mid is disconnected from the in-use list so
2501 // its lock longer has any effects on in-use list.
2502 deflated_count++;
2503 Atomic::dec(count_p);
2504 // mid is current tail in the free_head_p list so NULL terminate it
2505 // (which also unlocks it):
2506 mid->set_next_om(NULL);
2507
2508 // All the list management is done so move on to the next one:
2509 mid = next; // mid keeps non-NULL next's locked state
2510 next = next_next;
2511 } else {
2512 // mid is considered in-use if it does not have an associated
2513 // Java object or mid is not old or deflation did not succeed.
2514 // A mid->is_new() node can be seen here when it is freshly
2515 // returned by om_alloc() (and skips the deflation code path).
2516 // A mid->is_old() node can be seen here when deflation failed.
2517 // A mid->is_free() node can be seen here when a fresh node from
2518 // om_alloc() is released by om_release() due to losing the race
2519 // in inflate().
2520
2521 // All the list management is done so move on to the next one:
2522 if (cur_mid_in_use != NULL) {
2523 om_unlock(cur_mid_in_use);
2524 }
2525 // The next cur_mid_in_use keeps mid's lock state so
2526 // that it is stable for a possible next field change. It
2527 // cannot be modified by om_release() while it is locked.
2528 cur_mid_in_use = mid;
2529 mid = next; // mid keeps non-NULL next's locked state
2530 next = next_next;
2531
2532 if (SafepointMechanism::should_block(self) &&
2533 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
2534 // If a safepoint has started and cur_mid_in_use is not the list
2535 // head and is old, then it is safe to use as saved state. Return
2536 // to the caller before blocking.
2537 *saved_mid_in_use_p = cur_mid_in_use;
2538 om_unlock(cur_mid_in_use);
2539 if (mid != NULL) {
2540 om_unlock(mid);
2541 }
2542 return deflated_count;
2543 }
2544 }
2545 if (mid == NULL) {
2546 if (cur_mid_in_use != NULL) {
2547 om_unlock(cur_mid_in_use);
2548 }
2549 break; // Reached end of the list so nothing more to deflate.
2550 }
2551
2552 // The current mid's next field is locked at this point. If we have
2553 // a cur_mid_in_use, then it is also locked at this point.
2554 }
2555 // We finished the list without a safepoint starting so there's
2556 // no need to save state.
2557 *saved_mid_in_use_p = NULL;
2558 return deflated_count;
2559 }
2560
2561 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2562 counters->n_in_use = 0; // currently associated with objects
2563 counters->n_in_circulation = 0; // extant
2564 counters->n_scavenged = 0; // reclaimed (global and per-thread)
2565 counters->per_thread_scavenged = 0; // per-thread scavenge total
2566 counters->per_thread_times = 0.0; // per-thread scavenge times
2567 }
2568
2569 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2570 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2571
2572 if (AsyncDeflateIdleMonitors) {
2573 // Nothing to do when global idle ObjectMonitors are deflated using
2574 // a JavaThread unless a special deflation has been requested.
2575 if (!is_special_deflation_requested()) {
2576 return;
2577 }
2578 }
2579
2580 bool deflated = false;
2581
2582 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2583 ObjectMonitor* free_tail_p = NULL;
2584 elapsedTimer timer;
2585
2586 if (log_is_enabled(Info, monitorinflation)) {
2587 timer.start();
2588 }
2589
2590 // Note: the thread-local monitors lists get deflated in
2591 // a separate pass. See deflate_thread_local_monitors().
2592
2593 // For moribund threads, scan om_list_globals._in_use_list
2594 int deflated_count = 0;
2595 if (Atomic::load(&om_list_globals._in_use_list) != NULL) {
2596 // Update n_in_circulation before om_list_globals._in_use_count is
2597 // updated by deflation.
2598 Atomic::add(&counters->n_in_circulation,
2599 Atomic::load(&om_list_globals._in_use_count));
2612 #endif
2613 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2614 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
2615 Atomic::add(&counters->n_scavenged, deflated_count);
2616 }
2617 timer.stop();
2618
2619 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2620 LogStreamHandle(Info, monitorinflation) lsh_info;
2621 LogStream* ls = NULL;
2622 if (log_is_enabled(Debug, monitorinflation)) {
2623 ls = &lsh_debug;
2624 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2625 ls = &lsh_info;
2626 }
2627 if (ls != NULL) {
2628 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2629 }
2630 }
2631
2632 class HandshakeForDeflation : public HandshakeClosure {
2633 public:
2634 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
2635
2636 void do_thread(Thread* thread) {
2637 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
2638 INTPTR_FORMAT, p2i(thread));
2639 }
2640 };
2641
2642 void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
2643 assert(AsyncDeflateIdleMonitors, "sanity check");
2644
2645 // Deflate any global idle monitors.
2646 deflate_global_idle_monitors_using_JT();
2647
2648 int count = 0;
2649 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2650 if (Atomic::load(&jt->om_in_use_count) > 0 && !jt->is_exiting()) {
2651 // This JavaThread is using ObjectMonitors so deflate any that
2652 // are idle unless this JavaThread is exiting; do not race with
2653 // ObjectSynchronizer::om_flush().
2654 deflate_per_thread_idle_monitors_using_JT(jt);
2655 count++;
2656 }
2657 }
2658 if (count > 0) {
2659 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
2660 }
2661
2662 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
2663 "global_free_count=%d, global_wait_count=%d",
2664 Atomic::load(&om_list_globals._population),
2665 Atomic::load(&om_list_globals._in_use_count),
2666 Atomic::load(&om_list_globals._free_count),
2667 Atomic::load(&om_list_globals._wait_count));
2668
2669 // The ServiceThread's async deflation request has been processed.
2670 set_is_async_deflation_requested(false);
2671
2672 if (HandshakeAfterDeflateIdleMonitors &&
2673 Atomic::load(&om_list_globals._wait_count) > 0) {
2674 // There are deflated ObjectMonitors waiting for a handshake
2675 // (or a safepoint) for safety.
2676
2677 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list);
2678 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL");
2679 int count = Atomic::load(&om_list_globals._wait_count);
2680 Atomic::store(&om_list_globals._wait_count, 0);
2681 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
2682
2683 // Find the tail for prepend_list_to_common(). No need to mark
2684 // ObjectMonitors for this list walk since only the deflater
2685 // thread manages the wait list.
2686 int l_count = 0;
2687 ObjectMonitor* tail = NULL;
2688 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2689 tail = n;
2690 l_count++;
2691 }
2692 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2693
2694 // Will execute a safepoint if !ThreadLocalHandshakes:
2695 HandshakeForDeflation hfd_hc;
2696 Handshake::execute(&hfd_hc);
2697
2698 prepend_list_to_common(list, tail, count, &om_list_globals._free_list,
2699 &om_list_globals._free_count);
2700
2701 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count);
2702 }
2703 }
2704
2705 // Deflate global idle ObjectMonitors using a JavaThread.
2706 //
2707 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2708 assert(AsyncDeflateIdleMonitors, "sanity check");
2709 assert(Thread::current()->is_Java_thread(), "precondition");
2710 JavaThread* self = JavaThread::current();
2711
2712 deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2713 }
2714
2715 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
2716 //
2717 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
2718 assert(AsyncDeflateIdleMonitors, "sanity check");
2719 assert(Thread::current()->is_Java_thread(), "precondition");
2720
2721 deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
2722 }
2723
2724 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2725 //
2726 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
2727 JavaThread* self = JavaThread::current();
2728
2729 int deflated_count = 0;
2730 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors
2731 ObjectMonitor* free_tail_p = NULL;
2732 ObjectMonitor* saved_mid_in_use_p = NULL;
2733 elapsedTimer timer;
2734
2735 if (log_is_enabled(Info, monitorinflation)) {
2736 timer.start();
2737 }
2738
2739 if (is_global) {
2740 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count)));
2741 } else {
2742 OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count)));
2743 }
2744
2745 do {
2746 int local_deflated_count;
2747 if (is_global) {
2748 local_deflated_count =
2749 deflate_monitor_list_using_JT(&om_list_globals._in_use_list,
2750 &om_list_globals._in_use_count,
2751 &free_head_p, &free_tail_p,
2752 &saved_mid_in_use_p);
2753 } else {
2754 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2755 }
2756 deflated_count += local_deflated_count;
2757
2758 if (free_head_p != NULL) {
2759 // Move the deflated ObjectMonitors to the global free list.
2760 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2761 // Note: The target thread can be doing an om_alloc() that
2762 // is trying to prepend an ObjectMonitor on its in-use list
2763 // at the same time that we have deflated the current in-use
2764 // list head and put it on the local free list. prepend_to_common()
2765 // will detect the race and retry which avoids list corruption,
2766 // but the next field in free_tail_p can flicker to marked
2767 // and then unmarked while prepend_to_common() is sorting it
2768 // all out.
2769 #ifdef ASSERT
2770 ObjectMonitor* l_next_om = unmarked_next(free_tail_p);
2771 #endif
2772 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om));
2773
2774 if (HandshakeAfterDeflateIdleMonitors) {
2775 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
2776 } else {
2777 prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count);
2778 }
2779
2780 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2781 }
2782
2783 if (saved_mid_in_use_p != NULL) {
2784 // deflate_monitor_list_using_JT() detected a safepoint starting.
2785 timer.stop();
2786 {
2787 if (is_global) {
2788 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2789 } else {
2790 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2791 }
2792 assert(SafepointMechanism::should_block(self), "sanity check");
2793 ThreadBlockInVM blocker(self);
2794 }
2795 // Prepare for another loop after the safepoint.
2796 free_head_p = NULL;
2797 free_tail_p = NULL;
2798 if (log_is_enabled(Info, monitorinflation)) {
2799 timer.start();
2800 }
2801 }
2802 } while (saved_mid_in_use_p != NULL);
2803 timer.stop();
2804
2805 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2806 LogStreamHandle(Info, monitorinflation) lsh_info;
2807 LogStream* ls = NULL;
2808 if (log_is_enabled(Debug, monitorinflation)) {
2809 ls = &lsh_debug;
2810 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2811 ls = &lsh_info;
2812 }
2813 if (ls != NULL) {
2814 if (is_global) {
2815 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2816 } else {
2817 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
2818 }
2819 }
2820 }
2821
2822 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2823 // Report the cumulative time for deflating each thread's idle
2824 // monitors. Note: if the work is split among more than one
2825 // worker thread, then the reported time will likely be more
2826 // than a beginning to end measurement of the phase.
2827 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2828
2829 bool needs_special_deflation = is_special_deflation_requested();
2830 if (AsyncDeflateIdleMonitors && !needs_special_deflation) {
2831 // Nothing to do when idle ObjectMonitors are deflated using
2832 // a JavaThread unless a special deflation has been requested.
2833 return;
2834 }
2835
2836 if (log_is_enabled(Debug, monitorinflation)) {
2837 // exit_globals()'s call to audit_and_print_stats() is done
2838 // at the Info level and not at a safepoint.
2839 // For async deflation, audit_and_print_stats() is called in
2840 // ObjectSynchronizer::do_safepoint_work() at the Debug level
2841 // at a safepoint.
2842 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2843 } else if (log_is_enabled(Info, monitorinflation)) {
2844 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
2845 "global_free_count=%d, global_wait_count=%d",
2846 Atomic::load(&om_list_globals._population),
2847 Atomic::load(&om_list_globals._in_use_count),
2848 Atomic::load(&om_list_globals._free_count),
2849 Atomic::load(&om_list_globals._wait_count));
2850 }
2851
2852 Atomic::store(&_forceMonitorScavenge, 0); // Reset
2853
2854 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2855 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2856
2857 GVars.stw_random = os::random();
2858 GVars.stw_cycle++;
2859
2860 if (needs_special_deflation) {
2861 set_is_special_deflation_requested(false); // special deflation is done
2862 }
2863 }
2864
2865 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2866 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2867
2868 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) {
2869 // Nothing to do if a special deflation has NOT been requested.
2870 return;
2871 }
2872
2873 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2874 ObjectMonitor* free_tail_p = NULL;
2875 elapsedTimer timer;
2876
2877 if (log_is_enabled(Info, safepoint, cleanup) ||
2878 log_is_enabled(Info, monitorinflation)) {
2879 timer.start();
2880 }
2881
2882 // Update n_in_circulation before om_in_use_count is updated by deflation.
2883 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count));
2884
2885 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2886 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count));
2887
2888 if (free_head_p != NULL) {
2889 // Move the deflated ObjectMonitors back to the global free list.
2890 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2891 #ifdef ASSERT
2892 ObjectMonitor* l_next_om = free_tail_p->next_om();
3024 if (Atomic::load(&om_list_globals._population) == chk_om_population) {
3025 ls->print_cr("global_population=%d equals chk_om_population=%d",
3026 Atomic::load(&om_list_globals._population), chk_om_population);
3027 } else {
3028 // With fine grained locks on the monitor lists, it is possible for
3029 // log_monitor_list_counts() to return a value that doesn't match
3030 // om_list_globals._population. So far a higher value has been
3031 // seen in testing so something is being double counted by
3032 // log_monitor_list_counts().
3033 ls->print_cr("WARNING: global_population=%d is not equal to "
3034 "chk_om_population=%d",
3035 Atomic::load(&om_list_globals._population), chk_om_population);
3036 }
3037
3038 // Check om_list_globals._in_use_list and om_list_globals._in_use_count:
3039 chk_global_in_use_list_and_count(ls, &error_cnt);
3040
3041 // Check om_list_globals._free_list and om_list_globals._free_count:
3042 chk_global_free_list_and_count(ls, &error_cnt);
3043
3044 if (HandshakeAfterDeflateIdleMonitors) {
3045 // Check om_list_globals._wait_list and om_list_globals._wait_count:
3046 chk_global_wait_list_and_count(ls, &error_cnt);
3047 }
3048
3049 ls->print_cr("Checking per-thread lists:");
3050
3051 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3052 // Check om_in_use_list and om_in_use_count:
3053 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
3054
3055 // Check om_free_list and om_free_count:
3056 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
3057 }
3058
3059 if (error_cnt == 0) {
3060 ls->print_cr("No errors found in monitor list checks.");
3061 } else {
3062 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
3063 }
3064
3065 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
3066 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
3067 // When exiting this log output is at the Info level. When called
3068 // at a safepoint, this log output is at the Trace level since
3079 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
3080 outputStream * out, int *error_cnt_p) {
3081 stringStream ss;
3082 if (n->is_busy()) {
3083 if (jt != NULL) {
3084 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3085 ": free per-thread monitor must not be busy: %s", p2i(jt),
3086 p2i(n), n->is_busy_to_string(&ss));
3087 } else {
3088 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3089 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
3090 }
3091 *error_cnt_p = *error_cnt_p + 1;
3092 }
3093 if (n->header().value() != 0) {
3094 if (jt != NULL) {
3095 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3096 ": free per-thread monitor must have NULL _header "
3097 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
3098 n->header().value());
3099 *error_cnt_p = *error_cnt_p + 1;
3100 } else if (!AsyncDeflateIdleMonitors) {
3101 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3102 "must have NULL _header field: _header=" INTPTR_FORMAT,
3103 p2i(n), n->header().value());
3104 *error_cnt_p = *error_cnt_p + 1;
3105 }
3106 }
3107 if (n->object() != NULL) {
3108 if (jt != NULL) {
3109 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3110 ": free per-thread monitor must have NULL _object "
3111 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
3112 p2i(n->object()));
3113 } else {
3114 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3115 "must have NULL _object field: _object=" INTPTR_FORMAT,
3116 p2i(n), p2i(n->object()));
3117 }
3118 *error_cnt_p = *error_cnt_p + 1;
3119 }
3120 }
3121
3122 // Lock the next ObjectMonitor for traversal and unlock the current
3123 // ObjectMonitor. Returns the next ObjectMonitor if there is one.
3124 // Otherwise returns NULL (after unlocking the current ObjectMonitor).
3125 // This function is used by the various list walker functions to
3126 // safely walk a list without allowing an ObjectMonitor to be moved
3152 if (cur == NULL) {
3153 break;
3154 }
3155 }
3156 }
3157 int l_free_count = Atomic::load(&om_list_globals._free_count);
3158 if (l_free_count == chk_om_free_count) {
3159 out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
3160 l_free_count, chk_om_free_count);
3161 } else {
3162 // With fine grained locks on om_list_globals._free_list, it
3163 // is possible for an ObjectMonitor to be prepended to
3164 // om_list_globals._free_list after we started calculating
3165 // chk_om_free_count so om_list_globals._free_count may not
3166 // match anymore.
3167 out->print_cr("WARNING: global_free_count=%d is not equal to "
3168 "chk_om_free_count=%d", l_free_count, chk_om_free_count);
3169 }
3170 }
3171
3172 // Check the global wait list and count; log the results of the checks.
3173 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out,
3174 int *error_cnt_p) {
3175 int chk_om_wait_count = 0;
3176 ObjectMonitor* cur = NULL;
3177 if ((cur = get_list_head_locked(&om_list_globals._wait_list)) != NULL) {
3178 // Marked the global wait list head so process the list.
3179 while (true) {
3180 // Rules for om_list_globals._wait_list are the same as for
3181 // om_list_globals._free_list:
3182 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
3183 chk_om_wait_count++;
3184
3185 cur = lock_next_for_traversal(cur);
3186 if (cur == NULL) {
3187 break;
3188 }
3189 }
3190 }
3191 if (Atomic::load(&om_list_globals._wait_count) == chk_om_wait_count) {
3192 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d",
3193 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count);
3194 } else {
3195 out->print_cr("ERROR: global_wait_count=%d is not equal to "
3196 "chk_om_wait_count=%d",
3197 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count);
3198 *error_cnt_p = *error_cnt_p + 1;
3199 }
3200 }
3201
3202 // Check the global in-use list and count; log the results of the checks.
3203 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
3204 int *error_cnt_p) {
3205 int chk_om_in_use_count = 0;
3206 ObjectMonitor* cur = NULL;
3207 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
3208 // Marked the global in-use list head so process the list.
3209 while (true) {
3210 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
3211 chk_om_in_use_count++;
3212
3213 cur = lock_next_for_traversal(cur);
3214 if (cur == NULL) {
3215 break;
3216 }
3217 }
3218 }
3219 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
3220 if (l_in_use_count == chk_om_in_use_count) {
3221 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",
3340 if (l_om_in_use_count == chk_om_in_use_count) {
3341 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
3342 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
3343 chk_om_in_use_count);
3344 } else {
3345 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
3346 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count,
3347 chk_om_in_use_count);
3348 *error_cnt_p = *error_cnt_p + 1;
3349 }
3350 }
3351
3352 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
3353 // flags indicate why the entry is in-use, 'object' and 'object type'
3354 // indicate the associated object and its type.
3355 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
3356 stringStream ss;
3357 if (Atomic::load(&om_list_globals._in_use_count) > 0) {
3358 out->print_cr("In-use global monitor info:");
3359 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3360 out->print_cr("%18s %s %7s %18s %18s",
3361 "monitor", "BHL", "ref_cnt", "object", "object type");
3362 out->print_cr("================== === ======= ================== ==================");
3363 ObjectMonitor* cur = NULL;
3364 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) {
3365 // Marked the global in-use list head so process the list.
3366 while (true) {
3367 const oop obj = (oop) cur->object();
3368 const markWord mark = cur->header();
3369 ResourceMark rm;
3370 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", p2i(cur),
3371 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL,
3372 (int)cur->ref_count(), p2i(obj), obj->klass()->external_name());
3373 if (cur->is_busy() != 0) {
3374 out->print(" (%s)", cur->is_busy_to_string(&ss));
3375 ss.reset();
3376 }
3377 out->cr();
3378
3379 cur = lock_next_for_traversal(cur);
3380 if (cur == NULL) {
3381 break;
3382 }
3383 }
3384 }
3385 }
3386
3387 out->print_cr("In-use per-thread monitor info:");
3388 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3389 out->print_cr("%18s %18s %s %7s %18s %18s",
3390 "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
3391 out->print_cr("================== ================== === ======= ================== ==================");
3392 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3393 ObjectMonitor* cur = NULL;
3394 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
3395 // Marked the global in-use list head so process the list.
3396 while (true) {
3397 const oop obj = (oop) cur->object();
3398 const markWord mark = cur->header();
3399 ResourceMark rm;
3400 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT
3401 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
3402 mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(),
3403 p2i(obj), obj->klass()->external_name());
3404 if (cur->is_busy() != 0) {
3405 out->print(" (%s)", cur->is_busy_to_string(&ss));
3406 ss.reset();
3407 }
3408 out->cr();
3409
3410 cur = lock_next_for_traversal(cur);
3411 if (cur == NULL) {
3412 break;
3413 }
3414 }
3415 }
3416 }
3417
3418 out->flush();
3419 }
3420
3421 // Log counts for the global and per-thread monitor lists and return
3422 // the population count.
3423 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3424 int pop_count = 0;
3425 out->print_cr("%18s %10s %10s %10s %10s",
3426 "Global Lists:", "InUse", "Free", "Wait", "Total");
3427 out->print_cr("================== ========== ========== ========== ==========");
3428 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count);
3429 int l_free_count = Atomic::load(&om_list_globals._free_count);
3430 int l_wait_count = Atomic::load(&om_list_globals._wait_count);
3431 out->print_cr("%18s %10d %10d %10d %10d", "", l_in_use_count,
3432 l_free_count, l_wait_count,
3433 Atomic::load(&om_list_globals._population));
3434 pop_count += l_in_use_count + l_free_count;
3435 if (HandshakeAfterDeflateIdleMonitors) {
3436 pop_count += l_wait_count;
3437 }
3438
3439 out->print_cr("%18s %10s %10s %10s",
3440 "Per-Thread Lists:", "InUse", "Free", "Provision");
3441 out->print_cr("================== ========== ========== ==========");
3442
3443 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3444 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count);
3445 int l_om_free_count = Atomic::load(&jt->om_free_count);
3446 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
3447 l_om_in_use_count, l_om_free_count, jt->om_free_provision);
3448 pop_count += l_om_in_use_count + l_om_free_count;
3449 }
3450 return pop_count;
3451 }
3452
3453 #ifndef PRODUCT
3454
3455 // Check if monitor belongs to the monitor cache
3456 // The list is grow-only so it's *relatively* safe to traverse
3457 // the list of extant blocks without taking a lock.
|