20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "logging/log.hpp"
28 #include "logging/logStream.hpp"
29 #include "jfr/jfrEvents.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/metaspaceShared.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/interfaceSupport.inline.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "runtime/objectMonitor.hpp"
43 #include "runtime/objectMonitor.inline.hpp"
44 #include "runtime/osThread.hpp"
45 #include "runtime/safepointVerifiers.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/stubRoutines.hpp"
48 #include "runtime/synchronizer.hpp"
49 #include "runtime/thread.inline.hpp"
50 #include "runtime/timer.hpp"
51 #include "runtime/vframe.hpp"
52 #include "runtime/vmThread.hpp"
53 #include "utilities/align.hpp"
54 #include "utilities/dtrace.hpp"
55 #include "utilities/events.hpp"
56 #include "utilities/preserveException.hpp"
57
58 // The "core" versions of monitor enter and exit reside in this file.
59 // The interpreter and compilers contain specialized transliterated
60 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
61 // for instance. If you make changes here, make sure to modify the
62 // interpreter, and both C1 and C2 fast-path inline locking code emission.
63 //
64 // -----------------------------------------------------------------------------
100 } \
101 }
102
103 #else // ndef DTRACE_ENABLED
104
105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
107
108 #endif // ndef DTRACE_ENABLED
109
110 // This exists only as a workaround of dtrace bug 6254741
111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
113 return 0;
114 }
115
116 #define NINFLATIONLOCKS 256
117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
118
119 // global list of blocks of monitors
120 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL;
121 // Global ObjectMonitor free list. Newly allocated and deflated
122 // ObjectMonitors are prepended here.
123 ObjectMonitor* volatile ObjectSynchronizer::g_free_list = NULL;
124 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
125 // ObjectMonitors on its per-thread in-use list are prepended here.
126 ObjectMonitor* volatile ObjectSynchronizer::g_om_in_use_list = NULL;
127 int ObjectSynchronizer::g_om_in_use_count = 0; // # on g_om_in_use_list
128
129 static volatile intptr_t gListLock = 0; // protects global monitor lists
130 static volatile int g_om_free_count = 0; // # on g_free_list
131 static volatile int g_om_population = 0; // # Extant -- in circulation
132
133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
134
135
136 // =====================> Quick functions
137
138 // The quick_* forms are special fast-path variants used to improve
139 // performance. In the simplest case, a "quick_*" implementation could
140 // simply return false, in which case the caller will perform the necessary
141 // state transitions and call the slow-path form.
142 // The fast-path is designed to handle frequently arising cases in an efficient
143 // manner and is just a degenerate "optimistic" variant of the slow-path.
144 // returns true -- to indicate the call was satisfied.
145 // returns false -- to indicate the call needs the services of the slow-path.
146 // A no-loitering ordinance is in effect for code in the quick_* family
147 // operators: safepoints or indefinite blocking (blocking that might span a
148 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
149 // entry.
150 //
151 // Consider: An interesting optimization is to have the JIT recognize the
152 // following common idiom:
153 // synchronized (someobj) { .... ; notify(); }
154 // That is, we find a notify() or notifyAll() call that immediately precedes
155 // the monitorexit operation. In that case the JIT could fuse the operations
194 }
195
196 // biased locking and any other IMS exception states take the slow-path
197 return false;
198 }
199
200
201 // The LockNode emitted directly at the synchronization site would have
202 // been too big if it were to have included support for the cases of inflated
203 // recursive enter and exit, so they go here instead.
204 // Note that we can't safely call AsyncPrintJavaStack() from within
205 // quick_enter() as our thread state remains _in_Java.
206
207 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
208 BasicLock * lock) {
209 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
210 assert(self->is_Java_thread(), "invariant");
211 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
212 NoSafepointVerifier nsv;
213 if (obj == NULL) return false; // Need to throw NPE
214 const markWord mark = obj->mark();
215
216 if (mark.has_monitor()) {
217 ObjectMonitor* const m = mark.monitor();
218 assert(m->object() == obj, "invariant");
219 Thread* const owner = (Thread *) m->_owner;
220
221 // Lock contention and Transactional Lock Elision (TLE) diagnostics
222 // and observability
223 // Case: light contention possibly amenable to TLE
224 // Case: TLE inimical operations such as nested/recursive synchronization
225
226 if (owner == self) {
227 m->_recursions++;
228 return true;
229 }
230
231 // This Java Monitor is inflated so obj's header will never be
232 // displaced to this thread's BasicLock. Make the displaced header
233 // non-NULL so this BasicLock is not seen as recursive nor as
234 // being locked. We do this unconditionally so that this thread's
235 // BasicLock cannot be mis-interpreted by any stack walkers. For
236 // performance reasons, stack walkers generally first check for
237 // Biased Locking in the object's header, the second check is for
238 // stack-locking in the object's header, the third check is for
239 // recursive stack-locking in the displaced header in the BasicLock,
240 // and last are the inflated Java Monitor (ObjectMonitor) checks.
241 lock->set_displaced_header(markWord::unused_mark());
242
243 if (owner == NULL && Atomic::replace_if_null(&(m->_owner), self)) {
244 assert(m->_recursions == 0, "invariant");
245 return true;
246 }
247 }
248
249 // Note that we could inflate in quick_enter.
250 // This is likely a useful optimization
251 // Critically, in quick_enter() we must not:
252 // -- perform bias revocation, or
253 // -- block indefinitely, or
254 // -- reach a safepoint
255
256 return false; // revert to slow-path
257 }
258
259 // -----------------------------------------------------------------------------
260 // Monitor Enter/Exit
261 // The interpreter and compiler assembly code tries to lock using the fast path
262 // of this algorithm. Make sure to update that code if the following function is
263 // changed. The implementation is extremely sensitive to race condition. Be careful.
264
265 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
266 if (UseBiasedLocking) {
278 // Anticipate successful CAS -- the ST of the displaced mark must
279 // be visible <= the ST performed by the CAS.
280 lock->set_displaced_header(mark);
281 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
282 return;
283 }
284 // Fall through to inflate() ...
285 } else if (mark.has_locker() &&
286 THREAD->is_lock_owned((address)mark.locker())) {
287 assert(lock != mark.locker(), "must not re-lock the same lock");
288 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
289 lock->set_displaced_header(markWord::from_pointer(NULL));
290 return;
291 }
292
293 // The object header will never be displaced to this lock,
294 // so it does not matter what the value is, except that it
295 // must be non-zero to avoid looking like a re-entrant lock,
296 // and must not look locked either.
297 lock->set_displaced_header(markWord::unused_mark());
298 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD);
299 }
300
301 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
302 markWord mark = object->mark();
303 // We cannot check for Biased Locking if we are racing an inflation.
304 assert(mark == markWord::INFLATING() ||
305 !mark.has_bias_pattern(), "should not see bias pattern here");
306
307 markWord dhw = lock->displaced_header();
308 if (dhw.value() == 0) {
309 // If the displaced header is NULL, then this exit matches up with
310 // a recursive enter. No real work to do here except for diagnostics.
311 #ifndef PRODUCT
312 if (mark != markWord::INFLATING()) {
313 // Only do diagnostics if we are not racing an inflation. Simply
314 // exiting a recursive enter of a Java Monitor that is being
315 // inflated is safe; see the has_monitor() comment below.
316 assert(!mark.is_neutral(), "invariant");
317 assert(!mark.has_locker() ||
318 THREAD->is_lock_owned((address)mark.locker()), "invariant");
327 // does not own the Java Monitor.
328 ObjectMonitor* m = mark.monitor();
329 assert(((oop)(m->object()))->mark() == mark, "invariant");
330 assert(m->is_entered(THREAD), "invariant");
331 }
332 }
333 #endif
334 return;
335 }
336
337 if (mark == markWord::from_pointer(lock)) {
338 // If the object is stack-locked by the current thread, try to
339 // swing the displaced header from the BasicLock back to the mark.
340 assert(dhw.is_neutral(), "invariant");
341 if (object->cas_set_mark(dhw, mark) == mark) {
342 return;
343 }
344 }
345
346 // We have to take the slow-path of possible inflation and then exit.
347 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD);
348 }
349
350 // -----------------------------------------------------------------------------
351 // Class Loader support to workaround deadlocks on the class loader lock objects
352 // Also used by GC
353 // complete_exit()/reenter() are used to wait on a nested lock
354 // i.e. to give up an outer lock completely and then re-enter
355 // Used when holding nested locks - lock acquisition order: lock1 then lock2
356 // 1) complete_exit lock1 - saving recursion count
357 // 2) wait on lock2
358 // 3) when notified on lock2, unlock lock2
359 // 4) reenter lock1 with original recursion count
360 // 5) lock lock2
361 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
362 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
363 if (UseBiasedLocking) {
364 BiasedLocking::revoke(obj, THREAD);
365 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
366 }
367
368 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
369
370 return monitor->complete_exit(THREAD);
371 }
372
373 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
374 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
375 if (UseBiasedLocking) {
376 BiasedLocking::revoke(obj, THREAD);
377 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
378 }
379
380 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal);
381
382 monitor->reenter(recursions, THREAD);
383 }
384 // -----------------------------------------------------------------------------
385 // JNI locks on java objects
386 // NOTE: must use heavy weight monitor to handle jni monitor enter
387 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
388 // the current locking is from JNI instead of Java code
389 if (UseBiasedLocking) {
390 BiasedLocking::revoke(obj, THREAD);
391 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
392 }
393 THREAD->set_current_pending_monitor_is_from_java(false);
394 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
395 THREAD->set_current_pending_monitor_is_from_java(true);
396 }
397
398 // NOTE: must use heavy weight monitor to handle jni monitor exit
399 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
400 if (UseBiasedLocking) {
401 Handle h_obj(THREAD, obj);
402 BiasedLocking::revoke(h_obj, THREAD);
403 obj = h_obj();
404 }
405 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
406
407 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit);
408 // If this thread has locked the object, exit the monitor. We
409 // intentionally do not use CHECK here because we must exit the
410 // monitor even if an exception is pending.
411 if (monitor->check_owner(THREAD)) {
412 monitor->exit(true, THREAD);
413 }
414 }
415
416 // -----------------------------------------------------------------------------
417 // Internal VM locks on java objects
418 // standard constructor, allows locking failures
419 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
420 _dolock = do_lock;
421 _thread = thread;
422 _thread->check_for_valid_safepoint_state();
423 _obj = obj;
424
425 if (_dolock) {
426 ObjectSynchronizer::enter(_obj, &_lock, _thread);
427 }
428 }
429
430 ObjectLocker::~ObjectLocker() {
431 if (_dolock) {
432 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
433 }
434 }
435
436
437 // -----------------------------------------------------------------------------
438 // Wait/Notify/NotifyAll
439 // NOTE: must use heavy weight monitor to handle wait()
440 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
441 if (UseBiasedLocking) {
442 BiasedLocking::revoke(obj, THREAD);
443 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
444 }
445 if (millis < 0) {
446 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
447 }
448 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait);
449
450 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
451 monitor->wait(millis, true, THREAD);
452
453 // This dummy call is in place to get around dtrace bug 6254741. Once
454 // that's fixed we can uncomment the following line, remove the call
455 // and change this function back into a "void" func.
456 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
457 return dtrace_waited_probe(monitor, obj, THREAD);
458 }
459
460 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
461 if (UseBiasedLocking) {
462 BiasedLocking::revoke(obj, THREAD);
463 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
464 }
465 if (millis < 0) {
466 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
467 }
468 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD);
469 }
470
471 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
472 if (UseBiasedLocking) {
473 BiasedLocking::revoke(obj, THREAD);
474 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
475 }
476
477 markWord mark = obj->mark();
478 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
479 return;
480 }
481 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD);
482 }
483
484 // NOTE: see comment of notify()
485 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
486 if (UseBiasedLocking) {
487 BiasedLocking::revoke(obj, THREAD);
488 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
489 }
490
491 markWord mark = obj->mark();
492 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
493 return;
494 }
495 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD);
496 }
497
498 // -----------------------------------------------------------------------------
499 // Hash Code handling
500 //
501 // Performance concern:
502 // OrderAccess::storestore() calls release() which at one time stored 0
503 // into the global volatile OrderAccess::dummy variable. This store was
504 // unnecessary for correctness. Many threads storing into a common location
505 // causes considerable cache migration or "sloshing" on large SMP systems.
506 // As such, I avoided using OrderAccess::storestore(). In some cases
507 // OrderAccess::fence() -- which incurs local latency on the executing
508 // processor -- is a better choice as it scales on SMP systems.
509 //
510 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
511 // a discussion of coherency costs. Note that all our current reference
512 // platforms provide strong ST-ST order, so the issue is moot on IA32,
513 // x64, and SPARC.
514 //
515 // As a general policy we use "volatile" to control compiler-based reordering
516 // and explicit fences (barriers) to control for architectural reordering
517 // performed by the CPU(s) or platform.
518
519 struct SharedGlobals {
520 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE];
521 // These are highly shared mostly-read variables.
522 // To avoid false-sharing they need to be the sole occupants of a cache line.
523 volatile int stw_random;
524 volatile int stw_cycle;
525 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
526 // Hot RW variable -- Sequester to avoid false-sharing
527 volatile int hc_sequence;
528 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int));
529 };
530
531 static SharedGlobals GVars;
532 static int _forceMonitorScavenge = 0; // Scavenge required and pending
533
534 static markWord read_stable_mark(oop obj) {
535 markWord mark = obj->mark();
536 if (!mark.is_being_inflated()) {
537 return mark; // normal fast-path return
538 }
539
540 int its = 0;
541 for (;;) {
542 markWord mark = obj->mark();
543 if (!mark.is_being_inflated()) {
544 return mark; // normal fast-path return
545 }
546
547 // The object is being inflated by some other thread.
548 // The caller of read_stable_mark() must wait for inflation to complete.
668 Handle hobj(self, obj);
669 // Relaxing assertion for bug 6320749.
670 assert(Universe::verify_in_progress() ||
671 !SafepointSynchronize::is_at_safepoint(),
672 "biases should not be seen by VM thread here");
673 BiasedLocking::revoke(hobj, JavaThread::current());
674 obj = hobj();
675 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
676 }
677 }
678
679 // hashCode() is a heap mutator ...
680 // Relaxing assertion for bug 6320749.
681 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
682 !SafepointSynchronize::is_at_safepoint(), "invariant");
683 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
684 self->is_Java_thread() , "invariant");
685 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
686 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
687
688 ObjectMonitor* monitor = NULL;
689 markWord temp, test;
690 intptr_t hash;
691 markWord mark = read_stable_mark(obj);
692
693 // object should remain ineligible for biased locking
694 assert(!mark.has_bias_pattern(), "invariant");
695
696 if (mark.is_neutral()) { // if this is a normal header
697 hash = mark.hash();
698 if (hash != 0) { // if it has a hash, just return it
699 return hash;
700 }
701 hash = get_next_hash(self, obj); // get a new hash
702 temp = mark.copy_set_hash(hash); // merge the hash into header
703 // try to install the hash
704 test = obj->cas_set_mark(temp, mark);
705 if (test == mark) { // if the hash was installed, return it
706 return hash;
707 }
708 // Failed to install the hash. It could be that another thread
709 // installed the hash just before our attempt or inflation has
710 // occurred or... so we fall thru to inflate the monitor for
711 // stability and then install the hash.
712 } else if (mark.has_monitor()) {
713 monitor = mark.monitor();
714 temp = monitor->header();
715 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
716 hash = temp.hash();
717 if (hash != 0) { // if it has a hash, just return it
718 return hash;
719 }
720 // Fall thru so we only have one place that installs the hash in
721 // the ObjectMonitor.
722 } else if (self->is_lock_owned((address)mark.locker())) {
723 // This is a stack lock owned by the calling thread so fetch the
724 // displaced markWord from the BasicLock on the stack.
725 temp = mark.displaced_mark_helper();
726 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
727 hash = temp.hash();
728 if (hash != 0) { // if it has a hash, just return it
729 return hash;
730 }
731 // WARNING:
732 // The displaced header in the BasicLock on a thread's stack
733 // is strictly immutable. It CANNOT be changed in ANY cases.
734 // So we have to inflate the stack lock into an ObjectMonitor
735 // even if the current thread owns the lock. The BasicLock on
736 // a thread's stack can be asynchronously read by other threads
737 // during an inflate() call so any change to that stack memory
738 // may not propagate to other threads correctly.
739 }
740
741 // Inflate the monitor to set the hash.
742 monitor = inflate(self, obj, inflate_cause_hash_code);
743 // Load ObjectMonitor's header/dmw field and see if it has a hash.
744 mark = monitor->header();
745 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
746 hash = mark.hash();
747 if (hash == 0) { // if it does not have a hash
748 hash = get_next_hash(self, obj); // get a new hash
749 temp = mark.copy_set_hash(hash); // merge the hash into header
750 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
751 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
752 test = markWord(v);
753 if (test != mark) {
754 // The attempt to update the ObjectMonitor's header/dmw field
755 // did not work. This can happen if another thread managed to
756 // merge in the hash just before our cmpxchg().
757 // If we add any new usages of the header/dmw field, this code
758 // will need to be updated.
759 hash = test.hash();
760 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
761 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
762 }
763 }
764 // We finally get the hash.
765 return hash;
766 }
767
768 // Deprecated -- use FastHashCode() instead.
769
770 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
771 return FastHashCode(Thread::current(), obj());
772 }
773
774
775 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
776 Handle h_obj) {
777 if (UseBiasedLocking) {
778 BiasedLocking::revoke(h_obj, thread);
779 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
780 }
781
782 assert(thread == JavaThread::current(), "Can only be called on current thread");
783 oop obj = h_obj();
784
785 markWord mark = read_stable_mark(obj);
786
787 // Uncontended case, header points to stack
788 if (mark.has_locker()) {
789 return thread->is_lock_owned((address)mark.locker());
790 }
791 // Contended case, header points to ObjectMonitor (tagged pointer)
792 if (mark.has_monitor()) {
793 ObjectMonitor* monitor = mark.monitor();
794 return monitor->is_entered(thread) != 0;
795 }
796 // Unlocked case, header in place
797 assert(mark.is_neutral(), "sanity check");
798 return false;
799 }
800
801 // Be aware of this method could revoke bias of the lock object.
802 // This method queries the ownership of the lock handle specified by 'h_obj'.
803 // If the current thread owns the lock, it returns owner_self. If no
804 // thread owns the lock, it returns owner_none. Otherwise, it will return
805 // owner_other.
806 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
807 (JavaThread *self, Handle h_obj) {
808 // The caller must beware this method can revoke bias, and
809 // revocation can result in a safepoint.
810 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
811 assert(self->thread_state() != _thread_blocked, "invariant");
812
813 // Possible mark states: neutral, biased, stack-locked, inflated
814
815 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
816 // CASE: biased
817 BiasedLocking::revoke(h_obj, self);
818 assert(!h_obj->mark().has_bias_pattern(),
819 "biases should be revoked by now");
820 }
821
822 assert(self == JavaThread::current(), "Can only be called on current thread");
823 oop obj = h_obj();
824 markWord mark = read_stable_mark(obj);
825
826 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
827 if (mark.has_locker()) {
828 return self->is_lock_owned((address)mark.locker()) ?
829 owner_self : owner_other;
830 }
831
832 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
833 // The Object:ObjectMonitor relationship is stable as long as we're
834 // not at a safepoint.
835 if (mark.has_monitor()) {
836 void* owner = mark.monitor()->_owner;
837 if (owner == NULL) return owner_none;
838 return (owner == self ||
839 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
840 }
841
842 // CASE: neutral
843 assert(mark.is_neutral(), "sanity check");
844 return owner_none; // it's unlocked
845 }
846
847 // FIXME: jvmti should call this
848 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
849 if (UseBiasedLocking) {
850 if (SafepointSynchronize::is_at_safepoint()) {
851 BiasedLocking::revoke_at_safepoint(h_obj);
852 } else {
853 BiasedLocking::revoke(h_obj, JavaThread::current());
854 }
855 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
856 }
857
858 oop obj = h_obj();
859 address owner = NULL;
860
861 markWord mark = read_stable_mark(obj);
862
863 // Uncontended case, header points to stack
864 if (mark.has_locker()) {
865 owner = (address) mark.locker();
866 }
867
868 // Contended case, header points to ObjectMonitor (tagged pointer)
869 else if (mark.has_monitor()) {
870 ObjectMonitor* monitor = mark.monitor();
871 assert(monitor != NULL, "monitor should be non-null");
872 owner = (address) monitor->owner();
873 }
874
875 if (owner != NULL) {
876 // owning_thread_from_monitor_owner() may also return NULL here
877 return Threads::owning_thread_from_monitor_owner(t_list, owner);
878 }
879
880 // Unlocked case, header in place
881 // Cannot have assertion since this object may have been
882 // locked by another thread when reaching here.
883 // assert(mark.is_neutral(), "sanity check");
884
885 return NULL;
886 }
887
888 // Visitors ...
889
890 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
891 PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
892 while (block != NULL) {
893 assert(block->object() == CHAINMARKER, "must be a block header");
894 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
895 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
896 oop object = (oop)mid->object();
897 if (object != NULL) {
898 // Only process with closure if the object is set.
899 closure->do_monitor(mid);
900 }
901 }
902 block = (PaddedObjectMonitor*)block->_next_om;
903 }
904 }
905
906 static bool monitors_used_above_threshold() {
907 if (g_om_population == 0) {
908 return false;
909 }
910 int monitors_used = g_om_population - g_om_free_count;
911 int monitor_usage = (monitors_used * 100LL) / g_om_population;
912 return monitor_usage > MonitorUsedDeflationThreshold;
913 }
914
915 bool ObjectSynchronizer::is_cleanup_needed() {
916 if (MonitorUsedDeflationThreshold > 0) {
917 if (monitors_used_above_threshold()) {
918 return true;
919 }
920 }
921 return needs_monitor_scavenge();
922 }
923
924 bool ObjectSynchronizer::needs_monitor_scavenge() {
925 if (Atomic::load(&_forceMonitorScavenge) == 1) {
926 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
927 return true;
928 }
929 return false;
930 }
931
932 void ObjectSynchronizer::oops_do(OopClosure* f) {
933 // We only scan the global used list here (for moribund threads), and
934 // the thread-local monitors in Thread::oops_do().
935 global_used_oops_do(f);
936 }
937
938 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
939 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
940 list_oops_do(g_om_in_use_list, f);
941 }
942
943 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
944 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
945 list_oops_do(thread->om_in_use_list, f);
946 }
947
948 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
949 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
950 ObjectMonitor* mid;
951 for (mid = list; mid != NULL; mid = mid->_next_om) {
952 if (mid->object() != NULL) {
953 f->do_oop((oop*)mid->object_addr());
954 }
955 }
956 }
957
958
959 // -----------------------------------------------------------------------------
960 // ObjectMonitor Lifecycle
961 // -----------------------
962 // Inflation unlinks monitors from the global g_free_list and
963 // associates them with objects. Deflation -- which occurs at
964 // STW-time -- disassociates idle monitors from objects. Such
965 // scavenged monitors are returned to the g_free_list.
966 //
967 // The global list is protected by gListLock. All the critical sections
968 // are short and operate in constant-time.
969 //
970 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
971 //
972 // Lifecycle:
973 // -- unassigned and on the global free list
974 // -- unassigned and on a thread's private om_free_list
975 // -- assigned to an object. The object is inflated and the mark refers
976 // to the objectmonitor.
977
978
979 // Constraining monitor pool growth via MonitorBound ...
980 //
981 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
982 //
983 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
984 // the rate of scavenging is driven primarily by GC. As such, we can find
985 // an inordinate number of monitors in circulation.
986 // To avoid that scenario we can artificially induce a STW safepoint
987 // if the pool appears to be growing past some reasonable bound.
988 // Generally we favor time in space-time tradeoffs, but as there's no
989 // natural back-pressure on the # of extant monitors we need to impose some
990 // type of limit. Beware that if MonitorBound is set to too low a value
991 // we could just loop. In addition, if MonitorBound is set to a low value
992 // we'll incur more safepoints, which are harmful to performance.
993 // See also: GuaranteedSafepointInterval
994 //
995 // If MonitorBound is set, the boundry applies to
996 // (g_om_population - g_om_free_count)
997 // i.e., if there are not enough ObjectMonitors on the global free list,
998 // then a safepoint deflation is induced. Picking a good MonitorBound value
999 // is non-trivial.
1000
1001 static void InduceScavenge(Thread* self, const char * Whence) {
1002 // Induce STW safepoint to trim monitors
1003 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1004 // More precisely, trigger a cleanup safepoint as the number
1005 // of active monitors passes the specified threshold.
1006 // TODO: assert thread state is reasonable
1007
1008 if (Atomic::xchg (&_forceMonitorScavenge, 1) == 0) {
1009 VMThread::check_for_forced_cleanup();
1010 }
1011 }
1012
1013 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
1014 // A large MAXPRIVATE value reduces both list lock contention
1015 // and list coherency traffic, but also tends to increase the
1016 // number of ObjectMonitors in circulation as well as the STW
1017 // scavenge costs. As usual, we lean toward time in space-time
1018 // tradeoffs.
1019 const int MAXPRIVATE = 1024;
1020 stringStream ss;
1021 for (;;) {
1022 ObjectMonitor* m;
1023
1024 // 1: try to allocate from the thread's local om_free_list.
1025 // Threads will attempt to allocate first from their local list, then
1026 // from the global list, and only after those attempts fail will the thread
1027 // attempt to instantiate new monitors. Thread-local free lists take
1028 // heat off the gListLock and improve allocation latency, as well as reducing
1029 // coherency traffic on the shared global list.
1030 m = self->om_free_list;
1031 if (m != NULL) {
1032 self->om_free_list = m->_next_om;
1033 self->om_free_count--;
1034 guarantee(m->object() == NULL, "invariant");
1035 m->_next_om = self->om_in_use_list;
1036 self->om_in_use_list = m;
1037 self->om_in_use_count++;
1038 return m;
1039 }
1040
1041 // 2: try to allocate from the global g_free_list
1042 // CONSIDER: use muxTry() instead of muxAcquire().
1043 // If the muxTry() fails then drop immediately into case 3.
1044 // If we're using thread-local free lists then try
1045 // to reprovision the caller's free list.
1046 if (g_free_list != NULL) {
1047 // Reprovision the thread's om_free_list.
1048 // Use bulk transfers to reduce the allocation rate and heat
1049 // on various locks.
1050 Thread::muxAcquire(&gListLock, "om_alloc(1)");
1051 for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) {
1052 g_om_free_count--;
1053 ObjectMonitor* take = g_free_list;
1054 g_free_list = take->_next_om;
1055 guarantee(take->object() == NULL, "invariant");
1056 take->Recycle();
1057 om_release(self, take, false);
1058 }
1059 Thread::muxRelease(&gListLock);
1060 self->om_free_provision += 1 + (self->om_free_provision/2);
1061 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1062
1063 const int mx = MonitorBound;
1064 if (mx > 0 && (g_om_population-g_om_free_count) > mx) {
1065 // Not enough ObjectMonitors on the global free list.
1066 // We can't safely induce a STW safepoint from om_alloc() as our thread
1067 // state may not be appropriate for such activities and callers may hold
1068 // naked oops, so instead we defer the action.
1069 InduceScavenge(self, "om_alloc");
1070 }
1071 continue;
1072 }
1073
1074 // 3: allocate a block of new ObjectMonitors
1075 // Both the local and global free lists are empty -- resort to malloc().
1076 // In the current implementation ObjectMonitors are TSM - immortal.
1077 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1078 // each ObjectMonitor to start at the beginning of a cache line,
1079 // so we use align_up().
1080 // A better solution would be to use C++ placement-new.
1081 // BEWARE: As it stands currently, we don't run the ctors!
1082 assert(_BLOCKSIZE > 1, "invariant");
1083 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1084 PaddedObjectMonitor* temp;
1085 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1);
1086 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1087 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE);
1088 (void)memset((void *) temp, 0, neededsize);
1089
1090 // Format the block.
1091 // initialize the linked list, each monitor points to its next
1092 // forming the single linked free list, the very first monitor
1093 // will points to next block, which forms the block list.
1094 // The trick of using the 1st element in the block as g_block_list
1095 // linkage should be reconsidered. A better implementation would
1096 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1097
1098 for (int i = 1; i < _BLOCKSIZE; i++) {
1099 temp[i]._next_om = (ObjectMonitor *)&temp[i+1];
1100 }
1101
1102 // terminate the last monitor as the end of list
1103 temp[_BLOCKSIZE - 1]._next_om = NULL;
1104
1105 // Element [0] is reserved for global list linkage
1106 temp[0].set_object(CHAINMARKER);
1107
1108 // Consider carving out this thread's current request from the
1109 // block in hand. This avoids some lock traffic and redundant
1110 // list activity.
1111
1112 // Acquire the gListLock to manipulate g_block_list and g_free_list.
1113 // An Oyama-Taura-Yonezawa scheme might be more efficient.
1114 Thread::muxAcquire(&gListLock, "om_alloc(2)");
1115 g_om_population += _BLOCKSIZE-1;
1116 g_om_free_count += _BLOCKSIZE-1;
1117
1118 // Add the new block to the list of extant blocks (g_block_list).
1119 // The very first ObjectMonitor in a block is reserved and dedicated.
1120 // It serves as blocklist "next" linkage.
1121 temp[0]._next_om = g_block_list;
1122 // There are lock-free uses of g_block_list so make sure that
1123 // the previous stores happen before we update g_block_list.
1124 Atomic::release_store(&g_block_list, temp);
1125
1126 // Add the new string of ObjectMonitors to the global free list
1127 temp[_BLOCKSIZE - 1]._next_om = g_free_list;
1128 g_free_list = temp + 1;
1129 Thread::muxRelease(&gListLock);
1130 }
1131 }
1132
1133 // Place "m" on the caller's private per-thread om_free_list.
1134 // In practice there's no need to clamp or limit the number of
1135 // monitors on a thread's om_free_list as the only non-allocation time
1136 // we'll call om_release() is to return a monitor to the free list after
1137 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1138 // accumulate on a thread's free list.
1139 //
1140 // Key constraint: all ObjectMonitors on a thread's free list and the global
1141 // free list must have their object field set to null. This prevents the
1142 // scavenger -- deflate_monitor_list() -- from reclaiming them while we
1143 // are trying to release them.
1144
1145 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1146 bool from_per_thread_alloc) {
1147 guarantee(m->header().value() == 0, "invariant");
1148 guarantee(m->object() == NULL, "invariant");
1149 stringStream ss;
1150 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1151 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1152 m->_recursions);
1153 // _next_om is used for both per-thread in-use and free lists so
1154 // we have to remove 'm' from the in-use list first (as needed).
1155 if (from_per_thread_alloc) {
1156 // Need to remove 'm' from om_in_use_list.
1157 ObjectMonitor* cur_mid_in_use = NULL;
1158 bool extracted = false;
1159 for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) {
1160 if (m == mid) {
1161 // extract from per-thread in-use list
1162 if (mid == self->om_in_use_list) {
1163 self->om_in_use_list = mid->_next_om;
1164 } else if (cur_mid_in_use != NULL) {
1165 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
1166 }
1167 extracted = true;
1168 self->om_in_use_count--;
1169 break;
1170 }
1171 }
1172 assert(extracted, "Should have extracted from in-use list");
1173 }
1174
1175 m->_next_om = self->om_free_list;
1176 self->om_free_list = m;
1177 self->om_free_count++;
1178 }
1179
1180 // Return ObjectMonitors on a moribund thread's free and in-use
1181 // lists to the appropriate global lists. The ObjectMonitors on the
1182 // per-thread in-use list may still be in use by other threads.
1183 //
1184 // We currently call om_flush() from Threads::remove() before the
1185 // thread has been excised from the thread list and is no longer a
1186 // mutator. This means that om_flush() cannot run concurrently with
1187 // a safepoint and interleave with deflate_idle_monitors(). In
1188 // particular, this ensures that the thread's in-use monitors are
1189 // scanned by a GC safepoint, either via Thread::oops_do() (before
1190 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1191 // om_flush() is called).
1192
1193 void ObjectSynchronizer::om_flush(Thread* self) {
1194 ObjectMonitor* free_list = self->om_free_list;
1195 ObjectMonitor* free_tail = NULL;
1196 int free_count = 0;
1197 if (free_list != NULL) {
1198 ObjectMonitor* s;
1199 // The thread is going away. Set 'free_tail' to the last per-thread free
1200 // monitor which will be linked to g_free_list below under the gListLock.
1201 stringStream ss;
1202 for (s = free_list; s != NULL; s = s->_next_om) {
1203 free_count++;
1204 free_tail = s;
1205 guarantee(s->object() == NULL, "invariant");
1206 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1207 }
1208 guarantee(free_tail != NULL, "invariant");
1209 assert(self->om_free_count == free_count, "free-count off");
1210 self->om_free_list = NULL;
1211 self->om_free_count = 0;
1212 }
1213
1214 ObjectMonitor* in_use_list = self->om_in_use_list;
1215 ObjectMonitor* in_use_tail = NULL;
1216 int in_use_count = 0;
1217 if (in_use_list != NULL) {
1218 // The thread is going away, however the ObjectMonitors on the
1219 // om_in_use_list may still be in-use by other threads. Link
1220 // them to in_use_tail, which will be linked into the global
1221 // in-use list g_om_in_use_list below, under the gListLock.
1222 ObjectMonitor *cur_om;
1223 for (cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) {
1224 in_use_tail = cur_om;
1225 in_use_count++;
1226 }
1227 guarantee(in_use_tail != NULL, "invariant");
1228 assert(self->om_in_use_count == in_use_count, "in-use count off");
1229 self->om_in_use_list = NULL;
1230 self->om_in_use_count = 0;
1231 }
1232
1233 Thread::muxAcquire(&gListLock, "om_flush");
1234 if (free_tail != NULL) {
1235 free_tail->_next_om = g_free_list;
1236 g_free_list = free_list;
1237 g_om_free_count += free_count;
1238 }
1239
1240 if (in_use_tail != NULL) {
1241 in_use_tail->_next_om = g_om_in_use_list;
1242 g_om_in_use_list = in_use_list;
1243 g_om_in_use_count += in_use_count;
1244 }
1245
1246 Thread::muxRelease(&gListLock);
1247
1248 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1249 LogStreamHandle(Info, monitorinflation) lsh_info;
1250 LogStream* ls = NULL;
1251 if (log_is_enabled(Debug, monitorinflation)) {
1252 ls = &lsh_debug;
1253 } else if ((free_count != 0 || in_use_count != 0) &&
1254 log_is_enabled(Info, monitorinflation)) {
1255 ls = &lsh_info;
1256 }
1257 if (ls != NULL) {
1258 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1259 ", in_use_count=%d" ", om_free_provision=%d",
1260 p2i(self), free_count, in_use_count, self->om_free_provision);
1261 }
1262 }
1263
1264 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1265 const oop obj,
1266 ObjectSynchronizer::InflateCause cause) {
1267 assert(event != NULL, "invariant");
1268 assert(event->should_commit(), "invariant");
1269 event->set_monitorClass(obj->klass());
1270 event->set_address((uintptr_t)(void*)obj);
1271 event->set_cause((u1)cause);
1272 event->commit();
1273 }
1274
1275 // Fast path code shared by multiple functions
1276 void ObjectSynchronizer::inflate_helper(oop obj) {
1277 markWord mark = obj->mark();
1278 if (mark.has_monitor()) {
1279 assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid");
1280 assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header");
1281 return;
1282 }
1283 inflate(Thread::current(), obj, inflate_cause_vm_internal);
1284 }
1285
1286 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self,
1287 oop object,
1288 const InflateCause cause) {
1289 // Inflate mutates the heap ...
1290 // Relaxing assertion for bug 6320749.
1291 assert(Universe::verify_in_progress() ||
1292 !SafepointSynchronize::is_at_safepoint(), "invariant");
1293
1294 EventJavaMonitorInflate event;
1295
1296 for (;;) {
1297 const markWord mark = object->mark();
1298 assert(!mark.has_bias_pattern(), "invariant");
1299
1300 // The mark can be in one of the following states:
1301 // * Inflated - just return
1302 // * Stack-locked - coerce it to inflated
1303 // * INFLATING - busy wait for conversion to complete
1304 // * Neutral - aggressively inflate the object.
1305 // * BIASED - Illegal. We should never see this
1306
1307 // CASE: inflated
1308 if (mark.has_monitor()) {
1309 ObjectMonitor* inf = mark.monitor();
1310 markWord dmw = inf->header();
1311 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1312 assert(inf->object() == object, "invariant");
1313 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1314 return inf;
1315 }
1316
1317 // CASE: inflation in progress - inflating over a stack-lock.
1318 // Some other thread is converting from stack-locked to inflated.
1319 // Only that thread can complete inflation -- other threads must wait.
1320 // The INFLATING value is transient.
1321 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1322 // We could always eliminate polling by parking the thread on some auxiliary list.
1323 if (mark == markWord::INFLATING()) {
1324 read_stable_mark(object);
1325 continue;
1326 }
1327
1328 // CASE: stack-locked
1329 // Could be stack-locked either by this thread or by some other thread.
1330 //
1331 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1332 // to install INFLATING into the mark word. We originally installed INFLATING,
1333 // allocated the objectmonitor, and then finally STed the address of the
1334 // objectmonitor into the mark. This was correct, but artificially lengthened
1335 // the interval in which INFLATED appeared in the mark, thus increasing
1336 // the odds of inflation contention.
1337 //
1338 // We now use per-thread private objectmonitor free lists.
1339 // These list are reprovisioned from the global free list outside the
1340 // critical INFLATING...ST interval. A thread can transfer
1341 // multiple objectmonitors en-mass from the global free list to its local free list.
1342 // This reduces coherency traffic and lock contention on the global free list.
1343 // Using such local free lists, it doesn't matter if the om_alloc() call appears
1344 // before or after the CAS(INFLATING) operation.
1345 // See the comments in om_alloc().
1346
1347 LogStreamHandle(Trace, monitorinflation) lsh;
1348
1349 if (mark.has_locker()) {
1350 ObjectMonitor* m = om_alloc(self);
1351 // Optimistically prepare the objectmonitor - anticipate successful CAS
1352 // We do this before the CAS in order to minimize the length of time
1353 // in which INFLATING appears in the mark.
1354 m->Recycle();
1355 m->_Responsible = NULL;
1356 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1357
1358 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1359 if (cmp != mark) {
1360 om_release(self, m, true);
1361 continue; // Interference -- just retry
1362 }
1363
1364 // We've successfully installed INFLATING (0) into the mark-word.
1365 // This is the only case where 0 will appear in a mark-word.
1366 // Only the singular thread that successfully swings the mark-word
1367 // to 0 can perform (or more precisely, complete) inflation.
1368 //
1369 // Why do we CAS a 0 into the mark-word instead of just CASing the
1370 // mark-word from the stack-locked value directly to the new inflated state?
1371 // Consider what happens when a thread unlocks a stack-locked object.
1372 // It attempts to use CAS to swing the displaced header value from the
1373 // on-stack BasicLock back into the object header. Recall also that the
1374 // header value (hash code, etc) can reside in (a) the object header, or
1375 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1376 // header in an ObjectMonitor. The inflate() routine must copy the header
1377 // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1378 // the while preserving the hashCode stability invariants. If the owner
1379 // decides to release the lock while the value is 0, the unlock will fail
1380 // and control will eventually pass from slow_exit() to inflate. The owner
1381 // will then spin, waiting for the 0 value to disappear. Put another way,
1382 // the 0 causes the owner to stall if the owner happens to try to
1383 // drop the lock (restoring the header from the BasicLock to the object)
1384 // while inflation is in-progress. This protocol avoids races that might
1385 // would otherwise permit hashCode values to change or "flicker" for an object.
1386 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1387 // 0 serves as a "BUSY" inflate-in-progress indicator.
1388
1389
1390 // fetch the displaced mark from the owner's stack.
1391 // The owner can't die or unwind past the lock while our INFLATING
1392 // object is in the mark. Furthermore the owner can't complete
1393 // an unlock on the object, either.
1394 markWord dmw = mark.displaced_mark_helper();
1395 // Catch if the object's header is not neutral (not locked and
1396 // not marked is what we care about here).
1397 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1398
1399 // Setup monitor fields to proper values -- prepare the monitor
1400 m->set_header(dmw);
1401
1402 // Optimization: if the mark.locker stack address is associated
1403 // with this thread we could simply set m->_owner = self.
1404 // Note that a thread can inflate an object
1405 // that it has stack-locked -- as might happen in wait() -- directly
1406 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1407 m->set_owner(mark.locker());
1408 m->set_object(object);
1409 // TODO-FIXME: assert BasicLock->dhw != 0.
1410
1411 // Must preserve store ordering. The monitor state must
1412 // be stable at the time of publishing the monitor address.
1413 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1414 object->release_set_mark(markWord::encode(m));
1415
1416 // Hopefully the performance counters are allocated on distinct cache lines
1417 // to avoid false sharing on MP systems ...
1418 OM_PERFDATA_OP(Inflations, inc());
1419 if (log_is_enabled(Trace, monitorinflation)) {
1420 ResourceMark rm(self);
1421 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1422 INTPTR_FORMAT ", type='%s'", p2i(object),
1423 object->mark().value(), object->klass()->external_name());
1424 }
1425 if (event.should_commit()) {
1426 post_monitor_inflate_event(&event, object, cause);
1427 }
1428 return m;
1429 }
1430
1431 // CASE: neutral
1432 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1433 // If we know we're inflating for entry it's better to inflate by swinging a
1434 // pre-locked ObjectMonitor pointer into the object header. A successful
1435 // CAS inflates the object *and* confers ownership to the inflating thread.
1436 // In the current implementation we use a 2-step mechanism where we CAS()
1437 // to inflate and then CAS() again to try to swing _owner from NULL to self.
1438 // An inflateTry() method that we could call from enter() would be useful.
1439
1440 // Catch if the object's header is not neutral (not locked and
1441 // not marked is what we care about here).
1442 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1443 ObjectMonitor* m = om_alloc(self);
1444 // prepare m for installation - set monitor to initial state
1445 m->Recycle();
1446 m->set_header(mark);
1447 m->set_object(object);
1448 m->_Responsible = NULL;
1449 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
1450
1451 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
1452 m->set_header(markWord::zero());
1453 m->set_object(NULL);
1454 m->Recycle();
1455 om_release(self, m, true);
1456 m = NULL;
1457 continue;
1458 // interference - the markword changed - just retry.
1459 // The state-transitions are one-way, so there's no chance of
1460 // live-lock -- "Inflated" is an absorbing state.
1461 }
1462
1463 // Hopefully the performance counters are allocated on distinct
1464 // cache lines to avoid false sharing on MP systems ...
1465 OM_PERFDATA_OP(Inflations, inc());
1466 if (log_is_enabled(Trace, monitorinflation)) {
1467 ResourceMark rm(self);
1468 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
1469 INTPTR_FORMAT ", type='%s'", p2i(object),
1470 object->mark().value(), object->klass()->external_name());
1471 }
1472 if (event.should_commit()) {
1473 post_monitor_inflate_event(&event, object, cause);
1474 }
1475 return m;
1476 }
1477 }
1478
1479
1480 // We maintain a list of in-use monitors for each thread.
1481 //
1482 // deflate_thread_local_monitors() scans a single thread's in-use list, while
1483 // deflate_idle_monitors() scans only a global list of in-use monitors which
1484 // is populated only as a thread dies (see om_flush()).
1485 //
1486 // These operations are called at all safepoints, immediately after mutators
1487 // are stopped, but before any objects have moved. Collectively they traverse
1488 // the population of in-use monitors, deflating where possible. The scavenged
1489 // monitors are returned to the global monitor free list.
1490 //
1491 // Beware that we scavenge at *every* stop-the-world point. Having a large
1492 // number of monitors in-use could negatively impact performance. We also want
1493 // to minimize the total # of monitors in circulation, as they incur a small
1494 // footprint penalty.
1495 //
1496 // Perversely, the heap size -- and thus the STW safepoint rate --
1497 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
1498 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
1499 // This is an unfortunate aspect of this design.
1500
1501 // Deflate a single monitor if not in-use
1502 // Return true if deflated, false if in-use
1503 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1504 ObjectMonitor** free_head_p,
1505 ObjectMonitor** free_tail_p) {
1506 bool deflated;
1507 // Normal case ... The monitor is associated with obj.
1508 const markWord mark = obj->mark();
1509 guarantee(mark == markWord::encode(mid), "should match: mark="
1510 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
1511 markWord::encode(mid).value());
1512 // Make sure that mark.monitor() and markWord::encode() agree:
1513 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
1514 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
1515 const markWord dmw = mid->header();
1516 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1517
1518 if (mid->is_busy()) {
1519 deflated = false;
1520 } else {
1521 // Deflate the monitor if it is no longer being used
1522 // It's idle - scavenge and return to the global free list
1523 // plain old deflation ...
1524 if (log_is_enabled(Trace, monitorinflation)) {
1525 ResourceMark rm;
1526 log_trace(monitorinflation)("deflate_monitor: "
1527 "object=" INTPTR_FORMAT ", mark="
1528 INTPTR_FORMAT ", type='%s'", p2i(obj),
1529 mark.value(), obj->klass()->external_name());
1530 }
1531
1532 // Restore the header back to obj
1533 obj->release_set_mark(dmw);
1534 mid->clear();
1535
1536 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
1537 p2i(mid->object()));
1538
1539 // Move the deflated ObjectMonitor to the working free list
1540 // defined by free_head_p and free_tail_p.
1541 if (*free_head_p == NULL) *free_head_p = mid;
1542 if (*free_tail_p != NULL) {
1543 // We append to the list so the caller can use mid->_next_om
1544 // to fix the linkages in its context.
1545 ObjectMonitor* prevtail = *free_tail_p;
1546 // Should have been cleaned up by the caller:
1547 assert(prevtail->_next_om == NULL, "cleaned up deflated?");
1548 prevtail->_next_om = mid;
1549 }
1550 *free_tail_p = mid;
1551 // At this point, mid->_next_om still refers to its current
1552 // value and another ObjectMonitor's _next_om field still
1553 // refers to this ObjectMonitor. Those linkages have to be
1554 // cleaned up by the caller who has the complete context.
1555 deflated = true;
1556 }
1557 return deflated;
1558 }
1559
1560 // Walk a given monitor list, and deflate idle monitors
1561 // The given list could be a per-thread list or a global list
1562 // Caller acquires gListLock as needed.
1563 //
1564 // In the case of parallel processing of thread local monitor lists,
1565 // work is done by Threads::parallel_threads_do() which ensures that
1566 // each Java thread is processed by exactly one worker thread, and
1567 // thus avoid conflicts that would arise when worker threads would
1568 // process the same monitor lists concurrently.
1569 //
1570 // See also ParallelSPCleanupTask and
1571 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
1572 // Threads::parallel_java_threads_do() in thread.cpp.
1573 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
1574 ObjectMonitor** free_head_p,
1575 ObjectMonitor** free_tail_p) {
1576 ObjectMonitor* mid;
1577 ObjectMonitor* next;
1578 ObjectMonitor* cur_mid_in_use = NULL;
1579 int deflated_count = 0;
1580
1581 for (mid = *list_p; mid != NULL;) {
1582 oop obj = (oop) mid->object();
1583 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
1584 // Deflation succeeded and already updated free_head_p and
1585 // free_tail_p as needed. Finish the move to the local free list
1586 // by unlinking mid from the global or per-thread in-use list.
1587 if (mid == *list_p) {
1588 *list_p = mid->_next_om;
1589 } else if (cur_mid_in_use != NULL) {
1590 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list
1591 }
1592 next = mid->_next_om;
1593 mid->_next_om = NULL; // This mid is current tail in the free_head_p list
1594 mid = next;
1595 deflated_count++;
1596 } else {
1597 cur_mid_in_use = mid;
1598 mid = mid->_next_om;
1599 }
1600 }
1601 return deflated_count;
1602 }
1603
1604 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1605 counters->n_in_use = 0; // currently associated with objects
1606 counters->n_in_circulation = 0; // extant
1607 counters->n_scavenged = 0; // reclaimed (global and per-thread)
1608 counters->per_thread_scavenged = 0; // per-thread scavenge total
1609 counters->per_thread_times = 0.0; // per-thread scavenge times
1610 }
1611
1612 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
1613 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1614 bool deflated = false;
1615
1616 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
1617 ObjectMonitor* free_tail_p = NULL;
1618 elapsedTimer timer;
1619
1620 if (log_is_enabled(Info, monitorinflation)) {
1621 timer.start();
1622 }
1623
1624 // Prevent om_flush from changing mids in Thread dtor's during deflation
1625 // And in case the vm thread is acquiring a lock during a safepoint
1626 // See e.g. 6320749
1627 Thread::muxAcquire(&gListLock, "deflate_idle_monitors");
1628
1629 // Note: the thread-local monitors lists get deflated in
1630 // a separate pass. See deflate_thread_local_monitors().
1631
1632 // For moribund threads, scan g_om_in_use_list
1633 int deflated_count = 0;
1634 if (g_om_in_use_list) {
1635 counters->n_in_circulation += g_om_in_use_count;
1636 deflated_count = deflate_monitor_list((ObjectMonitor **)&g_om_in_use_list, &free_head_p, &free_tail_p);
1637 g_om_in_use_count -= deflated_count;
1638 counters->n_scavenged += deflated_count;
1639 counters->n_in_use += g_om_in_use_count;
1640 }
1641
1642 if (free_head_p != NULL) {
1643 // Move the deflated ObjectMonitors back to the global free list.
1644 guarantee(free_tail_p != NULL && counters->n_scavenged > 0, "invariant");
1645 assert(free_tail_p->_next_om == NULL, "invariant");
1646 // constant-time list splice - prepend scavenged segment to g_free_list
1647 free_tail_p->_next_om = g_free_list;
1648 g_free_list = free_head_p;
1649 }
1650 Thread::muxRelease(&gListLock);
1651 timer.stop();
1652
1653 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1654 LogStreamHandle(Info, monitorinflation) lsh_info;
1655 LogStream* ls = NULL;
1656 if (log_is_enabled(Debug, monitorinflation)) {
1657 ls = &lsh_debug;
1658 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
1659 ls = &lsh_info;
1660 }
1661 if (ls != NULL) {
1662 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
1663 }
1664 }
1665
1666 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
1667 // Report the cumulative time for deflating each thread's idle
1668 // monitors. Note: if the work is split among more than one
1669 // worker thread, then the reported time will likely be more
1670 // than a beginning to end measurement of the phase.
1671 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
1672
1673 g_om_free_count += counters->n_scavenged;
1674
1675 if (log_is_enabled(Debug, monitorinflation)) {
1676 // exit_globals()'s call to audit_and_print_stats() is done
1677 // at the Info level.
1678 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
1679 } else if (log_is_enabled(Info, monitorinflation)) {
1680 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors");
1681 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, "
1682 "g_om_free_count=%d", g_om_population,
1683 g_om_in_use_count, g_om_free_count);
1684 Thread::muxRelease(&gListLock);
1685 }
1686
1687 Atomic::store(&_forceMonitorScavenge, 0); // Reset
1688
1689 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
1690 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
1691
1692 GVars.stw_random = os::random();
1693 GVars.stw_cycle++;
1694 }
1695
1696 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
1697 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1698
1699 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
1700 ObjectMonitor* free_tail_p = NULL;
1701 elapsedTimer timer;
1702
1703 if (log_is_enabled(Info, safepoint, cleanup) ||
1704 log_is_enabled(Info, monitorinflation)) {
1705 timer.start();
1706 }
1707
1708 int deflated_count = deflate_monitor_list(thread->om_in_use_list_addr(), &free_head_p, &free_tail_p);
1709
1710 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors");
1711
1712 // Adjust counters
1713 counters->n_in_circulation += thread->om_in_use_count;
1714 thread->om_in_use_count -= deflated_count;
1715 counters->n_scavenged += deflated_count;
1716 counters->n_in_use += thread->om_in_use_count;
1717 counters->per_thread_scavenged += deflated_count;
1718
1719 if (free_head_p != NULL) {
1720 // Move the deflated ObjectMonitors back to the global free list.
1721 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
1722 assert(free_tail_p->_next_om == NULL, "invariant");
1723
1724 // constant-time list splice - prepend scavenged segment to g_free_list
1725 free_tail_p->_next_om = g_free_list;
1726 g_free_list = free_head_p;
1727 }
1728
1729 timer.stop();
1730 // Safepoint logging cares about cumulative per_thread_times and
1731 // we'll capture most of the cost, but not the muxRelease() which
1732 // should be cheap.
1733 counters->per_thread_times += timer.seconds();
1734
1735 Thread::muxRelease(&gListLock);
1736
1737 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1738 LogStreamHandle(Info, monitorinflation) lsh_info;
1739 LogStream* ls = NULL;
1740 if (log_is_enabled(Debug, monitorinflation)) {
1741 ls = &lsh_debug;
1742 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
1743 ls = &lsh_info;
1744 }
1745 if (ls != NULL) {
1746 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
1747 }
1748 }
1749
1750 // Monitor cleanup on JavaThread::exit
1751
1752 // Iterate through monitor cache and attempt to release thread's monitors
1753 // Gives up on a particular monitor if an exception occurs, but continues
1754 // the overall iteration, swallowing the exception.
1755 class ReleaseJavaMonitorsClosure: public MonitorClosure {
1756 private:
1767
1768 // Release all inflated monitors owned by THREAD. Lightweight monitors are
1769 // ignored. This is meant to be called during JNI thread detach which assumes
1770 // all remaining monitors are heavyweight. All exceptions are swallowed.
1771 // Scanning the extant monitor list can be time consuming.
1772 // A simple optimization is to add a per-thread flag that indicates a thread
1773 // called jni_monitorenter() during its lifetime.
1774 //
1775 // Instead of No_Savepoint_Verifier it might be cheaper to
1776 // use an idiom of the form:
1777 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
1778 // <code that must not run at safepoint>
1779 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
1780 // Since the tests are extremely cheap we could leave them enabled
1781 // for normal product builds.
1782
1783 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
1784 assert(THREAD == JavaThread::current(), "must be current Java thread");
1785 NoSafepointVerifier nsv;
1786 ReleaseJavaMonitorsClosure rjmc(THREAD);
1787 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread");
1788 ObjectSynchronizer::monitors_iterate(&rjmc);
1789 Thread::muxRelease(&gListLock);
1790 THREAD->clear_pending_exception();
1791 }
1792
1793 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
1794 switch (cause) {
1795 case inflate_cause_vm_internal: return "VM Internal";
1796 case inflate_cause_monitor_enter: return "Monitor Enter";
1797 case inflate_cause_wait: return "Monitor Wait";
1798 case inflate_cause_notify: return "Monitor Notify";
1799 case inflate_cause_hash_code: return "Monitor Hash Code";
1800 case inflate_cause_jni_enter: return "JNI Monitor Enter";
1801 case inflate_cause_jni_exit: return "JNI Monitor Exit";
1802 default:
1803 ShouldNotReachHere();
1804 }
1805 return "Unknown";
1806 }
1807
1808 //------------------------------------------------------------------------------
1809 // Debugging code
1823 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
1824 return (u_char*)&GVars.stw_random;
1825 }
1826
1827 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
1828 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
1829
1830 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1831 LogStreamHandle(Info, monitorinflation) lsh_info;
1832 LogStreamHandle(Trace, monitorinflation) lsh_trace;
1833 LogStream* ls = NULL;
1834 if (log_is_enabled(Trace, monitorinflation)) {
1835 ls = &lsh_trace;
1836 } else if (log_is_enabled(Debug, monitorinflation)) {
1837 ls = &lsh_debug;
1838 } else if (log_is_enabled(Info, monitorinflation)) {
1839 ls = &lsh_info;
1840 }
1841 assert(ls != NULL, "sanity check");
1842
1843 if (!on_exit) {
1844 // Not at VM exit so grab the global list lock.
1845 Thread::muxAcquire(&gListLock, "audit_and_print_stats");
1846 }
1847
1848 // Log counts for the global and per-thread monitor lists:
1849 int chk_om_population = log_monitor_list_counts(ls);
1850 int error_cnt = 0;
1851
1852 ls->print_cr("Checking global lists:");
1853
1854 // Check g_om_population:
1855 if (g_om_population == chk_om_population) {
1856 ls->print_cr("g_om_population=%d equals chk_om_population=%d",
1857 g_om_population, chk_om_population);
1858 } else {
1859 ls->print_cr("ERROR: g_om_population=%d is not equal to "
1860 "chk_om_population=%d", g_om_population,
1861 chk_om_population);
1862 error_cnt++;
1863 }
1864
1865 // Check g_om_in_use_list and g_om_in_use_count:
1866 chk_global_in_use_list_and_count(ls, &error_cnt);
1867
1868 // Check g_free_list and g_om_free_count:
1869 chk_global_free_list_and_count(ls, &error_cnt);
1870
1871 if (!on_exit) {
1872 Thread::muxRelease(&gListLock);
1873 }
1874
1875 ls->print_cr("Checking per-thread lists:");
1876
1877 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
1878 // Check om_in_use_list and om_in_use_count:
1879 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
1880
1881 // Check om_free_list and om_free_count:
1882 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
1883 }
1884
1885 if (error_cnt == 0) {
1886 ls->print_cr("No errors found in monitor list checks.");
1887 } else {
1888 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
1889 }
1890
1891 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
1892 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
1893 // When exiting this log output is at the Info level. When called
1894 // at a safepoint, this log output is at the Trace level since
1895 // there can be a lot of it.
1896 log_in_use_monitor_details(ls, on_exit);
1897 }
1898
1899 ls->flush();
1900
1901 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
1902 }
1903
1904 // Check a free monitor entry; log any errors.
1905 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
1906 outputStream * out, int *error_cnt_p) {
1907 stringStream ss;
1908 if (n->is_busy()) {
1909 if (jt != NULL) {
1910 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1911 ": free per-thread monitor must not be busy: %s", p2i(jt),
1912 p2i(n), n->is_busy_to_string(&ss));
1913 } else {
1914 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1915 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
1916 }
1917 *error_cnt_p = *error_cnt_p + 1;
1918 }
1919 if (n->header().value() != 0) {
1920 if (jt != NULL) {
1921 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1922 ": free per-thread monitor must have NULL _header "
1923 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
1924 n->header().value());
1925 } else {
1926 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1927 "must have NULL _header field: _header=" INTPTR_FORMAT,
1928 p2i(n), n->header().value());
1929 }
1930 *error_cnt_p = *error_cnt_p + 1;
1931 }
1932 if (n->object() != NULL) {
1933 if (jt != NULL) {
1934 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1935 ": free per-thread monitor must have NULL _object "
1936 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
1937 p2i(n->object()));
1938 } else {
1939 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
1940 "must have NULL _object field: _object=" INTPTR_FORMAT,
1941 p2i(n), p2i(n->object()));
1942 }
1943 *error_cnt_p = *error_cnt_p + 1;
1944 }
1945 }
1946
1947 // Check the global free list and count; log the results of the checks.
1948 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
1949 int *error_cnt_p) {
1950 int chk_om_free_count = 0;
1951 for (ObjectMonitor* n = g_free_list; n != NULL; n = n->_next_om) {
1952 chk_free_entry(NULL /* jt */, n, out, error_cnt_p);
1953 chk_om_free_count++;
1954 }
1955 if (g_om_free_count == chk_om_free_count) {
1956 out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d",
1957 g_om_free_count, chk_om_free_count);
1958 } else {
1959 out->print_cr("ERROR: g_om_free_count=%d is not equal to "
1960 "chk_om_free_count=%d", g_om_free_count,
1961 chk_om_free_count);
1962 *error_cnt_p = *error_cnt_p + 1;
1963 }
1964 }
1965
1966 // Check the global in-use list and count; log the results of the checks.
1967 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
1968 int *error_cnt_p) {
1969 int chk_om_in_use_count = 0;
1970 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
1971 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p);
1972 chk_om_in_use_count++;
1973 }
1974 if (g_om_in_use_count == chk_om_in_use_count) {
1975 out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", g_om_in_use_count,
1976 chk_om_in_use_count);
1977 } else {
1978 out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d",
1979 g_om_in_use_count, chk_om_in_use_count);
1980 *error_cnt_p = *error_cnt_p + 1;
1981 }
1982 }
1983
1984 // Check an in-use monitor entry; log any errors.
1985 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
1986 outputStream * out, int *error_cnt_p) {
1987 if (n->header().value() == 0) {
1988 if (jt != NULL) {
1989 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
1990 ": in-use per-thread monitor must have non-NULL _header "
1991 "field.", p2i(jt), p2i(n));
1992 } else {
1993 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
1994 "must have non-NULL _header field.", p2i(n));
1995 }
1996 *error_cnt_p = *error_cnt_p + 1;
1997 }
1998 if (n->object() == NULL) {
1999 if (jt != NULL) {
2000 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2028 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
2029 ": in-use per-thread monitor's object does not refer "
2030 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
2031 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
2032 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2033 } else {
2034 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
2035 "monitor's object does not refer to the same monitor: obj="
2036 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
2037 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
2038 }
2039 *error_cnt_p = *error_cnt_p + 1;
2040 }
2041 }
2042
2043 // Check the thread's free list and count; log the results of the checks.
2044 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
2045 outputStream * out,
2046 int *error_cnt_p) {
2047 int chk_om_free_count = 0;
2048 for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) {
2049 chk_free_entry(jt, n, out, error_cnt_p);
2050 chk_om_free_count++;
2051 }
2052 if (jt->om_free_count == chk_om_free_count) {
2053 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
2054 "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count);
2055 } else {
2056 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
2057 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
2058 chk_om_free_count);
2059 *error_cnt_p = *error_cnt_p + 1;
2060 }
2061 }
2062
2063 // Check the thread's in-use list and count; log the results of the checks.
2064 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
2065 outputStream * out,
2066 int *error_cnt_p) {
2067 int chk_om_in_use_count = 0;
2068 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
2069 chk_in_use_entry(jt, n, out, error_cnt_p);
2070 chk_om_in_use_count++;
2071 }
2072 if (jt->om_in_use_count == chk_om_in_use_count) {
2073 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
2074 "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
2075 chk_om_in_use_count);
2076 } else {
2077 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
2078 "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count,
2079 chk_om_in_use_count);
2080 *error_cnt_p = *error_cnt_p + 1;
2081 }
2082 }
2083
2084 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
2085 // flags indicate why the entry is in-use, 'object' and 'object type'
2086 // indicate the associated object and its type.
2087 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out,
2088 bool on_exit) {
2089 if (!on_exit) {
2090 // Not at VM exit so grab the global list lock.
2091 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details");
2092 }
2093
2094 stringStream ss;
2095 if (g_om_in_use_count > 0) {
2096 out->print_cr("In-use global monitor info:");
2097 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2098 out->print_cr("%18s %s %18s %18s",
2099 "monitor", "BHL", "object", "object type");
2100 out->print_cr("================== === ================== ==================");
2101 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) {
2102 const oop obj = (oop) n->object();
2103 const markWord mark = n->header();
2104 ResourceMark rm;
2105 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n),
2106 n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL,
2107 p2i(obj), obj->klass()->external_name());
2108 if (n->is_busy() != 0) {
2109 out->print(" (%s)", n->is_busy_to_string(&ss));
2110 ss.reset();
2111 }
2112 out->cr();
2113 }
2114 }
2115
2116 if (!on_exit) {
2117 Thread::muxRelease(&gListLock);
2118 }
2119
2120 out->print_cr("In-use per-thread monitor info:");
2121 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
2122 out->print_cr("%18s %18s %s %18s %18s",
2123 "jt", "monitor", "BHL", "object", "object type");
2124 out->print_cr("================== ================== === ================== ==================");
2125 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2126 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) {
2127 const oop obj = (oop) n->object();
2128 const markWord mark = n->header();
2129 ResourceMark rm;
2130 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT
2131 " %s", p2i(jt), p2i(n), n->is_busy() != 0,
2132 mark.hash() != 0, n->owner() != NULL, p2i(obj),
2133 obj->klass()->external_name());
2134 if (n->is_busy() != 0) {
2135 out->print(" (%s)", n->is_busy_to_string(&ss));
2136 ss.reset();
2137 }
2138 out->cr();
2139 }
2140 }
2141
2142 out->flush();
2143 }
2144
2145 // Log counts for the global and per-thread monitor lists and return
2146 // the population count.
2147 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
2148 int pop_count = 0;
2149 out->print_cr("%18s %10s %10s %10s",
2150 "Global Lists:", "InUse", "Free", "Total");
2151 out->print_cr("================== ========== ========== ==========");
2152 out->print_cr("%18s %10d %10d %10d", "",
2153 g_om_in_use_count, g_om_free_count, g_om_population);
2154 pop_count += g_om_in_use_count + g_om_free_count;
2155
2156 out->print_cr("%18s %10s %10s %10s",
2157 "Per-Thread Lists:", "InUse", "Free", "Provision");
2158 out->print_cr("================== ========== ========== ==========");
2159
2160 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2161 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
2162 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
2163 pop_count += jt->om_in_use_count + jt->om_free_count;
2164 }
2165 return pop_count;
2166 }
2167
2168 #ifndef PRODUCT
2169
2170 // Check if monitor belongs to the monitor cache
2171 // The list is grow-only so it's *relatively* safe to traverse
2172 // the list of extant blocks without taking a lock.
2173
2174 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
2175 PaddedObjectMonitor* block = Atomic::load_acquire(&g_block_list);
2176 while (block != NULL) {
2177 assert(block->object() == CHAINMARKER, "must be a block header");
2178 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
2179 address mon = (address)monitor;
2180 address blk = (address)block;
2181 size_t diff = mon - blk;
2182 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
2183 return 1;
2184 }
2185 block = (PaddedObjectMonitor*)block->_next_om;
2186 }
2187 return 0;
2188 }
2189
2190 #endif
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "logging/log.hpp"
28 #include "logging/logStream.hpp"
29 #include "jfr/jfrEvents.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/metaspaceShared.hpp"
32 #include "memory/padded.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/atomic.hpp"
38 #include "runtime/biasedLocking.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/handshake.hpp"
41 #include "runtime/interfaceSupport.inline.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "runtime/objectMonitor.hpp"
44 #include "runtime/objectMonitor.inline.hpp"
45 #include "runtime/osThread.hpp"
46 #include "runtime/safepointMechanism.inline.hpp"
47 #include "runtime/safepointVerifiers.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "runtime/stubRoutines.hpp"
50 #include "runtime/synchronizer.hpp"
51 #include "runtime/thread.inline.hpp"
52 #include "runtime/timer.hpp"
53 #include "runtime/vframe.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "utilities/align.hpp"
56 #include "utilities/dtrace.hpp"
57 #include "utilities/events.hpp"
58 #include "utilities/preserveException.hpp"
59
60 // The "core" versions of monitor enter and exit reside in this file.
61 // The interpreter and compilers contain specialized transliterated
62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(),
63 // for instance. If you make changes here, make sure to modify the
64 // interpreter, and both C1 and C2 fast-path inline locking code emission.
65 //
66 // -----------------------------------------------------------------------------
102 } \
103 }
104
105 #else // ndef DTRACE_ENABLED
106
107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;}
108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;}
109
110 #endif // ndef DTRACE_ENABLED
111
112 // This exists only as a workaround of dtrace bug 6254741
113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
115 return 0;
116 }
117
118 #define NINFLATIONLOCKS 256
119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
120
121 // global list of blocks of monitors
122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL;
123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false;
124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false;
125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0;
126
127 struct ListGlobals {
128 char _pad_prefix[OM_CACHE_LINE_SIZE];
129 // These are highly shared list related variables.
130 // To avoid false-sharing they need to be the sole occupants of a cache line.
131
132 // Global ObjectMonitor free list. Newly allocated and deflated
133 // ObjectMonitors are prepended here.
134 ObjectMonitor* free_list;
135 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
136
137 // Global ObjectMonitor in-use list. When a JavaThread is exiting,
138 // ObjectMonitors on its per-thread in-use list are prepended here.
139 ObjectMonitor* in_use_list;
140 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
141
142 // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors
143 // is true, deflated ObjectMonitors wait on this list until after a
144 // handshake or a safepoint for platforms that don't support handshakes.
145 // After the handshake or safepoint, the deflated ObjectMonitors are
146 // prepended to free_list.
147 ObjectMonitor* wait_list;
148 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*));
149
150 int free_count; // # on free_list
151 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int));
152
153 int in_use_count; // # on in_use_list
154 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int));
155
156 int population; // # Extant -- in circulation
157 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int));
158
159 int wait_count; // # on wait_list
160 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int));
161 };
162 static ListGlobals LVars;
163
164 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
165
166
167 // =====================> Spinlock functions
168
169 // ObjectMonitors are not lockable outside of this file. We use spinlocks
170 // implemented using a bit in the _next_om field instead of the heavier
171 // weight locking mechanisms for faster list management.
172
173 #define OM_LOCK_BIT 0x1
174
175 // Return true if the ObjectMonitor is locked.
176 // Otherwise returns false.
177 static bool is_locked(ObjectMonitor* om) {
178 return ((intptr_t)Atomic::load(&om->_next_om) & OM_LOCK_BIT) == OM_LOCK_BIT;
179 }
180
181 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
182 // Note: the om parameter may or may not have been marked originally.
183 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) {
184 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT);
185 }
186
187 // Try to lock an ObjectMonitor. Returns true if locking was successful.
188 // Otherwise returns false.
189 static bool try_om_lock(ObjectMonitor* om) {
190 // Get current next field without any OM_LOCK_BIT value.
191 ObjectMonitor* next = (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
192 if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) {
193 return false; // Cannot lock the ObjectMonitor.
194 }
195 return true;
196 }
197
198 // Lock an ObjectMonitor.
199 static void om_lock(ObjectMonitor* om) {
200 while (true) {
201 if (try_om_lock(om)) {
202 return;
203 }
204 }
205 }
206
207 // Unlock an ObjectMonitor.
208 static void om_unlock(ObjectMonitor* om) {
209 ObjectMonitor* next = Atomic::load(&om->_next_om);
210 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT
211 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
212
213 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
214 Atomic::store(&om->_next_om, next);
215 }
216
217 // Get the list head after locking it. Returns the list head or NULL
218 // if the list is empty.
219 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
220 while (true) {
221 ObjectMonitor* mid = Atomic::load(list_p);
222 if (mid == NULL) {
223 return NULL; // The list is empty.
224 }
225 if (try_om_lock(mid)) {
226 if (Atomic::load(list_p) != mid) {
227 // The list head changed so we have to retry.
228 om_unlock(mid);
229 continue;
230 }
231 return mid;
232 }
233 }
234 }
235
236 // Return the unmarked next field in an ObjectMonitor. Note: the next
237 // field may or may not have been marked with OM_LOCK_BIT originally.
238 static ObjectMonitor* unmarked_next(ObjectMonitor* om) {
239 return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT);
240 }
241
242 #undef OM_LOCK_BIT
243
244
245 // =====================> List Management functions
246
247 // Set the next field in an ObjectMonitor to the specified value.
248 static void set_next(ObjectMonitor* om, ObjectMonitor* value) {
249 Atomic::store(&om->_next_om, value);
250 }
251
252 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is
253 // the last ObjectMonitor in the list and there are 'count' on the list.
254 // Also updates the specified *count_p.
255 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
256 int count, ObjectMonitor** list_p,
257 int* count_p) {
258 while (true) {
259 ObjectMonitor* cur = Atomic::load(list_p);
260 // Prepend list to *list_p.
261 if (!try_om_lock(tail)) {
262 continue; // failed to lock tail so try it all again
263 }
264 set_next(tail, cur); // tail now points to cur (and unlocks tail)
265 if (cur == NULL) {
266 // No potential race with takers or other prependers since
267 // *list_p is empty.
268 if (Atomic::cmpxchg(list_p, cur, list) == cur) {
269 // Successfully switched *list_p to the list value.
270 Atomic::add(count_p, count);
271 break;
272 }
273 // Implied else: try it all again
274 } else {
275 if (!try_om_lock(cur)) {
276 continue; // failed to lock cur so try it all again
277 }
278 // We locked cur so try to switch *list_p to the list value.
279 if (Atomic::cmpxchg(list_p, cur, list) != cur) {
280 // The list head has changed so unlock cur and try again:
281 om_unlock(cur);
282 continue;
283 }
284 Atomic::add(count_p, count);
285 om_unlock(cur);
286 break;
287 }
288 }
289 }
290
291 // Prepend a newly allocated block of ObjectMonitors to g_block_list and
292 // LVars.free_list. Also updates LVars.population and LVars.free_count.
293 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) {
294 // First we handle g_block_list:
295 while (true) {
296 PaddedObjectMonitor* cur = Atomic::load(&g_block_list);
297 // Prepend new_blk to g_block_list. The first ObjectMonitor in
298 // a block is reserved for use as linkage to the next block.
299 new_blk[0]._next_om = cur;
300 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) {
301 // Successfully switched g_block_list to the new_blk value.
302 Atomic::add(&LVars.population, _BLOCKSIZE - 1);
303 break;
304 }
305 // Implied else: try it all again
306 }
307
308 // Second we handle LVars.free_list:
309 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1,
310 &LVars.free_list, &LVars.free_count);
311 }
312
313 // Prepend a list of ObjectMonitors to LVars.free_list. 'tail' is the last
314 // ObjectMonitor in the list and there are 'count' on the list. Also
315 // updates LVars.free_count.
316 static void prepend_list_to_global_free_list(ObjectMonitor* list,
317 ObjectMonitor* tail, int count) {
318 prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count);
319 }
320
321 // Prepend a list of ObjectMonitors to LVars.wait_list. 'tail' is the last
322 // ObjectMonitor in the list and there are 'count' on the list. Also
323 // updates LVars.wait_count.
324 static void prepend_list_to_global_wait_list(ObjectMonitor* list,
325 ObjectMonitor* tail, int count) {
326 assert(HandshakeAfterDeflateIdleMonitors, "sanity check");
327 prepend_list_to_common(list, tail, count, &LVars.wait_list, &LVars.wait_count);
328 }
329
330 // Prepend a list of ObjectMonitors to LVars.in_use_list. 'tail' is the last
331 // ObjectMonitor in the list and there are 'count' on the list. Also
332 // updates LVars.in_use_list.
333 static void prepend_list_to_global_in_use_list(ObjectMonitor* list,
334 ObjectMonitor* tail, int count) {
335 prepend_list_to_common(list, tail, count, &LVars.in_use_list, &LVars.in_use_count);
336 }
337
338 // Prepend an ObjectMonitor to the specified list. Also updates
339 // the specified counter.
340 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
341 int* count_p) {
342 while (true) {
343 om_lock(m); // Lock m so we can safely update its next field.
344 ObjectMonitor* cur = NULL;
345 // Lock the list head to guard against A-B-A race:
346 if ((cur = get_list_head_locked(list_p)) != NULL) {
347 // List head is now locked so we can safely switch it.
348 set_next(m, cur); // m now points to cur (and unlocks m)
349 Atomic::store(list_p, m); // Switch list head to unlocked m.
350 om_unlock(cur);
351 break;
352 }
353 // The list is empty so try to set the list head.
354 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
355 set_next(m, cur); // m now points to NULL (and unlocks m)
356 if (Atomic::cmpxchg(list_p, cur, m) == cur) {
357 // List head is now unlocked m.
358 break;
359 }
360 // Implied else: try it all again
361 }
362 Atomic::inc(count_p);
363 }
364
365 // Prepend an ObjectMonitor to a per-thread om_free_list.
366 // Also updates the per-thread om_free_count.
367 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) {
368 prepend_to_common(m, &self->om_free_list, &self->om_free_count);
369 }
370
371 // Prepend an ObjectMonitor to a per-thread om_in_use_list.
372 // Also updates the per-thread om_in_use_count.
373 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) {
374 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count);
375 }
376
377 // Take an ObjectMonitor from the start of the specified list. Also
378 // decrements the specified counter. Returns NULL if none are available.
379 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
380 int* count_p) {
381 ObjectMonitor* take = NULL;
382 // Lock the list head to guard against A-B-A race:
383 if ((take = get_list_head_locked(list_p)) == NULL) {
384 return NULL; // None are available.
385 }
386 ObjectMonitor* next = unmarked_next(take);
387 // Switch locked list head to next (which unlocks the list head, but
388 // leaves take locked):
389 Atomic::store(list_p, next);
390 Atomic::dec(count_p);
391 // Unlock take, but leave the next value for any lagging list
392 // walkers. It will get cleaned up when take is prepended to
393 // the in-use list:
394 om_unlock(take);
395 return take;
396 }
397
398 // Take an ObjectMonitor from the start of the LVars.free_list. Also
399 // updates LVars.free_count. Returns NULL if none are available.
400 static ObjectMonitor* take_from_start_of_global_free_list() {
401 return take_from_start_of_common(&LVars.free_list, &LVars.free_count);
402 }
403
404 // Take an ObjectMonitor from the start of a per-thread free-list.
405 // Also updates om_free_count. Returns NULL if none are available.
406 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) {
407 return take_from_start_of_common(&self->om_free_list, &self->om_free_count);
408 }
409
410
411 // =====================> Quick functions
412
413 // The quick_* forms are special fast-path variants used to improve
414 // performance. In the simplest case, a "quick_*" implementation could
415 // simply return false, in which case the caller will perform the necessary
416 // state transitions and call the slow-path form.
417 // The fast-path is designed to handle frequently arising cases in an efficient
418 // manner and is just a degenerate "optimistic" variant of the slow-path.
419 // returns true -- to indicate the call was satisfied.
420 // returns false -- to indicate the call needs the services of the slow-path.
421 // A no-loitering ordinance is in effect for code in the quick_* family
422 // operators: safepoints or indefinite blocking (blocking that might span a
423 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon
424 // entry.
425 //
426 // Consider: An interesting optimization is to have the JIT recognize the
427 // following common idiom:
428 // synchronized (someobj) { .... ; notify(); }
429 // That is, we find a notify() or notifyAll() call that immediately precedes
430 // the monitorexit operation. In that case the JIT could fuse the operations
469 }
470
471 // biased locking and any other IMS exception states take the slow-path
472 return false;
473 }
474
475
476 // The LockNode emitted directly at the synchronization site would have
477 // been too big if it were to have included support for the cases of inflated
478 // recursive enter and exit, so they go here instead.
479 // Note that we can't safely call AsyncPrintJavaStack() from within
480 // quick_enter() as our thread state remains _in_Java.
481
482 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self,
483 BasicLock * lock) {
484 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
485 assert(self->is_Java_thread(), "invariant");
486 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant");
487 NoSafepointVerifier nsv;
488 if (obj == NULL) return false; // Need to throw NPE
489
490 while (true) {
491 const markWord mark = obj->mark();
492
493 if (mark.has_monitor()) {
494 ObjectMonitorHandle omh;
495 if (!omh.save_om_ptr(obj, mark)) {
496 // Lost a race with async deflation so try again.
497 assert(AsyncDeflateIdleMonitors, "sanity check");
498 continue;
499 }
500 ObjectMonitor* const m = omh.om_ptr();
501 assert(m->object() == obj, "invariant");
502 Thread* const owner = (Thread *) m->_owner;
503
504 // Lock contention and Transactional Lock Elision (TLE) diagnostics
505 // and observability
506 // Case: light contention possibly amenable to TLE
507 // Case: TLE inimical operations such as nested/recursive synchronization
508
509 if (owner == self) {
510 m->_recursions++;
511 return true;
512 }
513
514 // This Java Monitor is inflated so obj's header will never be
515 // displaced to this thread's BasicLock. Make the displaced header
516 // non-NULL so this BasicLock is not seen as recursive nor as
517 // being locked. We do this unconditionally so that this thread's
518 // BasicLock cannot be mis-interpreted by any stack walkers. For
519 // performance reasons, stack walkers generally first check for
520 // Biased Locking in the object's header, the second check is for
521 // stack-locking in the object's header, the third check is for
522 // recursive stack-locking in the displaced header in the BasicLock,
523 // and last are the inflated Java Monitor (ObjectMonitor) checks.
524 lock->set_displaced_header(markWord::unused_mark());
525
526 if (owner == NULL && m->try_set_owner_from(self, NULL) == NULL) {
527 assert(m->_recursions == 0, "invariant");
528 return true;
529 }
530
531 if (AsyncDeflateIdleMonitors &&
532 m->try_set_owner_from(self, DEFLATER_MARKER) == DEFLATER_MARKER) {
533 // The deflation protocol finished the first part (setting owner),
534 // but it failed the second part (making ref_count negative) and
535 // bailed. Or the ObjectMonitor was async deflated and reused.
536 // Acquired the monitor.
537 assert(m->_recursions == 0, "invariant");
538 return true;
539 }
540 }
541 break;
542 }
543
544 // Note that we could inflate in quick_enter.
545 // This is likely a useful optimization
546 // Critically, in quick_enter() we must not:
547 // -- perform bias revocation, or
548 // -- block indefinitely, or
549 // -- reach a safepoint
550
551 return false; // revert to slow-path
552 }
553
554 // -----------------------------------------------------------------------------
555 // Monitor Enter/Exit
556 // The interpreter and compiler assembly code tries to lock using the fast path
557 // of this algorithm. Make sure to update that code if the following function is
558 // changed. The implementation is extremely sensitive to race condition. Be careful.
559
560 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) {
561 if (UseBiasedLocking) {
573 // Anticipate successful CAS -- the ST of the displaced mark must
574 // be visible <= the ST performed by the CAS.
575 lock->set_displaced_header(mark);
576 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
577 return;
578 }
579 // Fall through to inflate() ...
580 } else if (mark.has_locker() &&
581 THREAD->is_lock_owned((address)mark.locker())) {
582 assert(lock != mark.locker(), "must not re-lock the same lock");
583 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock");
584 lock->set_displaced_header(markWord::from_pointer(NULL));
585 return;
586 }
587
588 // The object header will never be displaced to this lock,
589 // so it does not matter what the value is, except that it
590 // must be non-zero to avoid looking like a re-entrant lock,
591 // and must not look locked either.
592 lock->set_displaced_header(markWord::unused_mark());
593 ObjectMonitorHandle omh;
594 inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter);
595 omh.om_ptr()->enter(THREAD);
596 }
597
598 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) {
599 markWord mark = object->mark();
600 // We cannot check for Biased Locking if we are racing an inflation.
601 assert(mark == markWord::INFLATING() ||
602 !mark.has_bias_pattern(), "should not see bias pattern here");
603
604 markWord dhw = lock->displaced_header();
605 if (dhw.value() == 0) {
606 // If the displaced header is NULL, then this exit matches up with
607 // a recursive enter. No real work to do here except for diagnostics.
608 #ifndef PRODUCT
609 if (mark != markWord::INFLATING()) {
610 // Only do diagnostics if we are not racing an inflation. Simply
611 // exiting a recursive enter of a Java Monitor that is being
612 // inflated is safe; see the has_monitor() comment below.
613 assert(!mark.is_neutral(), "invariant");
614 assert(!mark.has_locker() ||
615 THREAD->is_lock_owned((address)mark.locker()), "invariant");
624 // does not own the Java Monitor.
625 ObjectMonitor* m = mark.monitor();
626 assert(((oop)(m->object()))->mark() == mark, "invariant");
627 assert(m->is_entered(THREAD), "invariant");
628 }
629 }
630 #endif
631 return;
632 }
633
634 if (mark == markWord::from_pointer(lock)) {
635 // If the object is stack-locked by the current thread, try to
636 // swing the displaced header from the BasicLock back to the mark.
637 assert(dhw.is_neutral(), "invariant");
638 if (object->cas_set_mark(dhw, mark) == mark) {
639 return;
640 }
641 }
642
643 // We have to take the slow-path of possible inflation and then exit.
644 ObjectMonitorHandle omh;
645 inflate(&omh, THREAD, object, inflate_cause_vm_internal);
646 omh.om_ptr()->exit(true, THREAD);
647 }
648
649 // -----------------------------------------------------------------------------
650 // Class Loader support to workaround deadlocks on the class loader lock objects
651 // Also used by GC
652 // complete_exit()/reenter() are used to wait on a nested lock
653 // i.e. to give up an outer lock completely and then re-enter
654 // Used when holding nested locks - lock acquisition order: lock1 then lock2
655 // 1) complete_exit lock1 - saving recursion count
656 // 2) wait on lock2
657 // 3) when notified on lock2, unlock lock2
658 // 4) reenter lock1 with original recursion count
659 // 5) lock lock2
660 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
661 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) {
662 if (UseBiasedLocking) {
663 BiasedLocking::revoke(obj, THREAD);
664 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
665 }
666
667 ObjectMonitorHandle omh;
668 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
669 intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD);
670 return ret_code;
671 }
672
673 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
674 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) {
675 if (UseBiasedLocking) {
676 BiasedLocking::revoke(obj, THREAD);
677 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
678 }
679
680 ObjectMonitorHandle omh;
681 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal);
682 omh.om_ptr()->reenter(recursions, THREAD);
683 }
684 // -----------------------------------------------------------------------------
685 // JNI locks on java objects
686 // NOTE: must use heavy weight monitor to handle jni monitor enter
687 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
688 // the current locking is from JNI instead of Java code
689 if (UseBiasedLocking) {
690 BiasedLocking::revoke(obj, THREAD);
691 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
692 }
693 THREAD->set_current_pending_monitor_is_from_java(false);
694 ObjectMonitorHandle omh;
695 inflate(&omh, THREAD, obj(), inflate_cause_jni_enter);
696 omh.om_ptr()->enter(THREAD);
697 THREAD->set_current_pending_monitor_is_from_java(true);
698 }
699
700 // NOTE: must use heavy weight monitor to handle jni monitor exit
701 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
702 if (UseBiasedLocking) {
703 Handle h_obj(THREAD, obj);
704 BiasedLocking::revoke(h_obj, THREAD);
705 obj = h_obj();
706 }
707 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
708
709 ObjectMonitorHandle omh;
710 inflate(&omh, THREAD, obj, inflate_cause_jni_exit);
711 ObjectMonitor* monitor = omh.om_ptr();
712 // If this thread has locked the object, exit the monitor. We
713 // intentionally do not use CHECK here because we must exit the
714 // monitor even if an exception is pending.
715 if (monitor->check_owner(THREAD)) {
716 monitor->exit(true, THREAD);
717 }
718 }
719
720 // -----------------------------------------------------------------------------
721 // Internal VM locks on java objects
722 // standard constructor, allows locking failures
723 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) {
724 _dolock = do_lock;
725 _thread = thread;
726 _thread->check_for_valid_safepoint_state();
727 _obj = obj;
728
729 if (_dolock) {
730 ObjectSynchronizer::enter(_obj, &_lock, _thread);
731 }
732 }
733
734 ObjectLocker::~ObjectLocker() {
735 if (_dolock) {
736 ObjectSynchronizer::exit(_obj(), &_lock, _thread);
737 }
738 }
739
740
741 // -----------------------------------------------------------------------------
742 // Wait/Notify/NotifyAll
743 // NOTE: must use heavy weight monitor to handle wait()
744 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
745 if (UseBiasedLocking) {
746 BiasedLocking::revoke(obj, THREAD);
747 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
748 }
749 if (millis < 0) {
750 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
751 }
752 ObjectMonitorHandle omh;
753 inflate(&omh, THREAD, obj(), inflate_cause_wait);
754 ObjectMonitor* monitor = omh.om_ptr();
755
756 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis);
757 monitor->wait(millis, true, THREAD);
758
759 // This dummy call is in place to get around dtrace bug 6254741. Once
760 // that's fixed we can uncomment the following line, remove the call
761 // and change this function back into a "void" func.
762 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD);
763 int ret_code = dtrace_waited_probe(monitor, obj, THREAD);
764 return ret_code;
765 }
766
767 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) {
768 if (UseBiasedLocking) {
769 BiasedLocking::revoke(obj, THREAD);
770 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
771 }
772 if (millis < 0) {
773 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
774 }
775 ObjectMonitorHandle omh;
776 inflate(&omh, THREAD, obj(), inflate_cause_wait);
777 omh.om_ptr()->wait(millis, false, THREAD);
778 }
779
780 void ObjectSynchronizer::notify(Handle obj, TRAPS) {
781 if (UseBiasedLocking) {
782 BiasedLocking::revoke(obj, THREAD);
783 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
784 }
785
786 markWord mark = obj->mark();
787 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
788 return;
789 }
790 ObjectMonitorHandle omh;
791 inflate(&omh, THREAD, obj(), inflate_cause_notify);
792 omh.om_ptr()->notify(THREAD);
793 }
794
795 // NOTE: see comment of notify()
796 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) {
797 if (UseBiasedLocking) {
798 BiasedLocking::revoke(obj, THREAD);
799 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
800 }
801
802 markWord mark = obj->mark();
803 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) {
804 return;
805 }
806 ObjectMonitorHandle omh;
807 inflate(&omh, THREAD, obj(), inflate_cause_notify);
808 omh.om_ptr()->notifyAll(THREAD);
809 }
810
811 // -----------------------------------------------------------------------------
812 // Hash Code handling
813 //
814 // Performance concern:
815 // OrderAccess::storestore() calls release() which at one time stored 0
816 // into the global volatile OrderAccess::dummy variable. This store was
817 // unnecessary for correctness. Many threads storing into a common location
818 // causes considerable cache migration or "sloshing" on large SMP systems.
819 // As such, I avoided using OrderAccess::storestore(). In some cases
820 // OrderAccess::fence() -- which incurs local latency on the executing
821 // processor -- is a better choice as it scales on SMP systems.
822 //
823 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for
824 // a discussion of coherency costs. Note that all our current reference
825 // platforms provide strong ST-ST order, so the issue is moot on IA32,
826 // x64, and SPARC.
827 //
828 // As a general policy we use "volatile" to control compiler-based reordering
829 // and explicit fences (barriers) to control for architectural reordering
830 // performed by the CPU(s) or platform.
831
832 struct SharedGlobals {
833 char _pad_prefix[OM_CACHE_LINE_SIZE];
834 // These are highly shared mostly-read variables.
835 // To avoid false-sharing they need to be the sole occupants of a cache line.
836 volatile int stw_random;
837 volatile int stw_cycle;
838 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2);
839 // Hot RW variable -- Sequester to avoid false-sharing
840 volatile int hc_sequence;
841 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int));
842 };
843
844 static SharedGlobals GVars;
845 static int _forceMonitorScavenge = 0; // Scavenge required and pending
846
847 static markWord read_stable_mark(oop obj) {
848 markWord mark = obj->mark();
849 if (!mark.is_being_inflated()) {
850 return mark; // normal fast-path return
851 }
852
853 int its = 0;
854 for (;;) {
855 markWord mark = obj->mark();
856 if (!mark.is_being_inflated()) {
857 return mark; // normal fast-path return
858 }
859
860 // The object is being inflated by some other thread.
861 // The caller of read_stable_mark() must wait for inflation to complete.
981 Handle hobj(self, obj);
982 // Relaxing assertion for bug 6320749.
983 assert(Universe::verify_in_progress() ||
984 !SafepointSynchronize::is_at_safepoint(),
985 "biases should not be seen by VM thread here");
986 BiasedLocking::revoke(hobj, JavaThread::current());
987 obj = hobj();
988 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now");
989 }
990 }
991
992 // hashCode() is a heap mutator ...
993 // Relaxing assertion for bug 6320749.
994 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
995 !SafepointSynchronize::is_at_safepoint(), "invariant");
996 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
997 self->is_Java_thread() , "invariant");
998 assert(Universe::verify_in_progress() || DumpSharedSpaces ||
999 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant");
1000
1001 while (true) {
1002 ObjectMonitor* monitor = NULL;
1003 markWord temp, test;
1004 intptr_t hash;
1005 markWord mark = read_stable_mark(obj);
1006
1007 // object should remain ineligible for biased locking
1008 assert(!mark.has_bias_pattern(), "invariant");
1009
1010 if (mark.is_neutral()) { // if this is a normal header
1011 hash = mark.hash();
1012 if (hash != 0) { // if it has a hash, just return it
1013 return hash;
1014 }
1015 hash = get_next_hash(self, obj); // get a new hash
1016 temp = mark.copy_set_hash(hash); // merge the hash into header
1017 // try to install the hash
1018 test = obj->cas_set_mark(temp, mark);
1019 if (test == mark) { // if the hash was installed, return it
1020 return hash;
1021 }
1022 // Failed to install the hash. It could be that another thread
1023 // installed the hash just before our attempt or inflation has
1024 // occurred or... so we fall thru to inflate the monitor for
1025 // stability and then install the hash.
1026 } else if (mark.has_monitor()) {
1027 ObjectMonitorHandle omh;
1028 if (!omh.save_om_ptr(obj, mark)) {
1029 // Lost a race with async deflation so try again.
1030 assert(AsyncDeflateIdleMonitors, "sanity check");
1031 continue;
1032 }
1033 monitor = omh.om_ptr();
1034 temp = monitor->header();
1035 // Allow for a lagging install_displaced_markword_in_object() to
1036 // have marked the ObjectMonitor's header/dmw field.
1037 assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()),
1038 "invariant: header=" INTPTR_FORMAT, temp.value());
1039 hash = temp.hash();
1040 if (hash != 0) { // if it has a hash, just return it
1041 return hash;
1042 }
1043 // Fall thru so we only have one place that installs the hash in
1044 // the ObjectMonitor.
1045 } else if (self->is_lock_owned((address)mark.locker())) {
1046 // This is a stack lock owned by the calling thread so fetch the
1047 // displaced markWord from the BasicLock on the stack.
1048 temp = mark.displaced_mark_helper();
1049 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1050 hash = temp.hash();
1051 if (hash != 0) { // if it has a hash, just return it
1052 return hash;
1053 }
1054 // WARNING:
1055 // The displaced header in the BasicLock on a thread's stack
1056 // is strictly immutable. It CANNOT be changed in ANY cases.
1057 // So we have to inflate the stack lock into an ObjectMonitor
1058 // even if the current thread owns the lock. The BasicLock on
1059 // a thread's stack can be asynchronously read by other threads
1060 // during an inflate() call so any change to that stack memory
1061 // may not propagate to other threads correctly.
1062 }
1063
1064 // Inflate the monitor to set the hash.
1065 ObjectMonitorHandle omh;
1066 inflate(&omh, self, obj, inflate_cause_hash_code);
1067 monitor = omh.om_ptr();
1068 // Load ObjectMonitor's header/dmw field and see if it has a hash.
1069 mark = monitor->header();
1070 // Allow for a lagging install_displaced_markword_in_object() to
1071 // have marked the ObjectMonitor's header/dmw field.
1072 assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()),
1073 "invariant: header=" INTPTR_FORMAT, mark.value());
1074 hash = mark.hash();
1075 if (hash == 0) { // if it does not have a hash
1076 hash = get_next_hash(self, obj); // get a new hash
1077 temp = mark.copy_set_hash(hash); // merge the hash into header
1078 if (AsyncDeflateIdleMonitors && temp.is_marked()) {
1079 // A lagging install_displaced_markword_in_object() has marked
1080 // the ObjectMonitor's header/dmw field. We clear it to avoid
1081 // any confusion if we are able to set the hash.
1082 temp.set_unmarked();
1083 }
1084 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value());
1085 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value());
1086 test = markWord(v);
1087 if (test != mark) {
1088 // The attempt to update the ObjectMonitor's header/dmw field
1089 // did not work. This can happen if another thread managed to
1090 // merge in the hash just before our cmpxchg(). With async
1091 // deflation, a lagging install_displaced_markword_in_object()
1092 // could have just marked or just unmarked the header/dmw field.
1093 // If we add any new usages of the header/dmw field, this code
1094 // will need to be updated.
1095 if (AsyncDeflateIdleMonitors) {
1096 // Since async deflation gives us two possible reasons for
1097 // the cmwxchg() to fail, it is easier to simply retry.
1098 continue;
1099 }
1100 hash = test.hash();
1101 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value());
1102 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash");
1103 }
1104 }
1105 // We finally get the hash.
1106 return hash;
1107 }
1108 }
1109
1110 // Deprecated -- use FastHashCode() instead.
1111
1112 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
1113 return FastHashCode(Thread::current(), obj());
1114 }
1115
1116
1117 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
1118 Handle h_obj) {
1119 if (UseBiasedLocking) {
1120 BiasedLocking::revoke(h_obj, thread);
1121 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1122 }
1123
1124 assert(thread == JavaThread::current(), "Can only be called on current thread");
1125 oop obj = h_obj();
1126
1127 while (true) {
1128 markWord mark = read_stable_mark(obj);
1129
1130 // Uncontended case, header points to stack
1131 if (mark.has_locker()) {
1132 return thread->is_lock_owned((address)mark.locker());
1133 }
1134 // Contended case, header points to ObjectMonitor (tagged pointer)
1135 if (mark.has_monitor()) {
1136 ObjectMonitorHandle omh;
1137 if (!omh.save_om_ptr(obj, mark)) {
1138 // Lost a race with async deflation so try again.
1139 assert(AsyncDeflateIdleMonitors, "sanity check");
1140 continue;
1141 }
1142 bool ret_code = omh.om_ptr()->is_entered(thread) != 0;
1143 return ret_code;
1144 }
1145 // Unlocked case, header in place
1146 assert(mark.is_neutral(), "sanity check");
1147 return false;
1148 }
1149 }
1150
1151 // Be aware of this method could revoke bias of the lock object.
1152 // This method queries the ownership of the lock handle specified by 'h_obj'.
1153 // If the current thread owns the lock, it returns owner_self. If no
1154 // thread owns the lock, it returns owner_none. Otherwise, it will return
1155 // owner_other.
1156 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership
1157 (JavaThread *self, Handle h_obj) {
1158 // The caller must beware this method can revoke bias, and
1159 // revocation can result in a safepoint.
1160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
1161 assert(self->thread_state() != _thread_blocked, "invariant");
1162
1163 // Possible mark states: neutral, biased, stack-locked, inflated
1164
1165 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) {
1166 // CASE: biased
1167 BiasedLocking::revoke(h_obj, self);
1168 assert(!h_obj->mark().has_bias_pattern(),
1169 "biases should be revoked by now");
1170 }
1171
1172 assert(self == JavaThread::current(), "Can only be called on current thread");
1173 oop obj = h_obj();
1174
1175 while (true) {
1176 markWord mark = read_stable_mark(obj);
1177
1178 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack.
1179 if (mark.has_locker()) {
1180 return self->is_lock_owned((address)mark.locker()) ?
1181 owner_self : owner_other;
1182 }
1183
1184 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor.
1185 // The Object:ObjectMonitor relationship is stable as long as we're
1186 // not at a safepoint and AsyncDeflateIdleMonitors is false.
1187 if (mark.has_monitor()) {
1188 ObjectMonitorHandle omh;
1189 if (!omh.save_om_ptr(obj, mark)) {
1190 // Lost a race with async deflation so try again.
1191 assert(AsyncDeflateIdleMonitors, "sanity check");
1192 continue;
1193 }
1194 ObjectMonitor* monitor = omh.om_ptr();
1195 void* owner = monitor->_owner;
1196 if (owner == NULL) return owner_none;
1197 return (owner == self ||
1198 self->is_lock_owned((address)owner)) ? owner_self : owner_other;
1199 }
1200
1201 // CASE: neutral
1202 assert(mark.is_neutral(), "sanity check");
1203 return owner_none; // it's unlocked
1204 }
1205 }
1206
1207 // FIXME: jvmti should call this
1208 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) {
1209 if (UseBiasedLocking) {
1210 if (SafepointSynchronize::is_at_safepoint()) {
1211 BiasedLocking::revoke_at_safepoint(h_obj);
1212 } else {
1213 BiasedLocking::revoke(h_obj, JavaThread::current());
1214 }
1215 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now");
1216 }
1217
1218 oop obj = h_obj();
1219
1220 while (true) {
1221 address owner = NULL;
1222 markWord mark = read_stable_mark(obj);
1223
1224 // Uncontended case, header points to stack
1225 if (mark.has_locker()) {
1226 owner = (address) mark.locker();
1227 }
1228
1229 // Contended case, header points to ObjectMonitor (tagged pointer)
1230 else if (mark.has_monitor()) {
1231 ObjectMonitorHandle omh;
1232 if (!omh.save_om_ptr(obj, mark)) {
1233 // Lost a race with async deflation so try again.
1234 assert(AsyncDeflateIdleMonitors, "sanity check");
1235 continue;
1236 }
1237 ObjectMonitor* monitor = omh.om_ptr();
1238 assert(monitor != NULL, "monitor should be non-null");
1239 owner = (address) monitor->owner();
1240 }
1241
1242 if (owner != NULL) {
1243 // owning_thread_from_monitor_owner() may also return NULL here
1244 return Threads::owning_thread_from_monitor_owner(t_list, owner);
1245 }
1246
1247 // Unlocked case, header in place
1248 // Cannot have assertion since this object may have been
1249 // locked by another thread when reaching here.
1250 // assert(mark.is_neutral(), "sanity check");
1251
1252 return NULL;
1253 }
1254 }
1255
1256 // Visitors ...
1257
1258 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
1259 PaddedObjectMonitor* block = Atomic::load(&g_block_list);
1260 while (block != NULL) {
1261 assert(block->object() == CHAINMARKER, "must be a block header");
1262 for (int i = _BLOCKSIZE - 1; i > 0; i--) {
1263 ObjectMonitor* mid = (ObjectMonitor *)(block + i);
1264 ObjectMonitorHandle omh;
1265 if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) {
1266 // The ObjectMonitor* is not free and it has been made safe.
1267 if (mid->object() == NULL) {
1268 // Only process with closure if the object is set.
1269 continue;
1270 }
1271 closure->do_monitor(mid);
1272 }
1273 }
1274 // unmarked_next() is not needed with g_block_list (no locking
1275 // used with with block linkage _next_om fields).
1276 block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
1277 }
1278 }
1279
1280 static bool monitors_used_above_threshold() {
1281 if (Atomic::load(&LVars.population) == 0) {
1282 return false;
1283 }
1284 if (MonitorUsedDeflationThreshold > 0) {
1285 int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count);
1286 if (HandshakeAfterDeflateIdleMonitors) {
1287 monitors_used -= Atomic::load(&LVars.wait_count);
1288 }
1289 int monitor_usage = (monitors_used * 100LL) / Atomic::load(&LVars.population);
1290 return monitor_usage > MonitorUsedDeflationThreshold;
1291 }
1292 return false;
1293 }
1294
1295 // Returns true if MonitorBound is set (> 0) and if the specified
1296 // cnt is > MonitorBound. Otherwise returns false.
1297 static bool is_MonitorBound_exceeded(const int cnt) {
1298 const int mx = MonitorBound;
1299 return mx > 0 && cnt > mx;
1300 }
1301
1302 bool ObjectSynchronizer::is_async_deflation_needed() {
1303 if (!AsyncDeflateIdleMonitors) {
1304 return false;
1305 }
1306 if (is_async_deflation_requested()) {
1307 // Async deflation request.
1308 return true;
1309 }
1310 if (AsyncDeflationInterval > 0 &&
1311 time_since_last_async_deflation_ms() > AsyncDeflationInterval &&
1312 monitors_used_above_threshold()) {
1313 // It's been longer than our specified deflate interval and there
1314 // are too many monitors in use. We don't deflate more frequently
1315 // than AsyncDeflationInterval (unless is_async_deflation_requested)
1316 // in order to not swamp the ServiceThread.
1317 _last_async_deflation_time_ns = os::javaTimeNanos();
1318 return true;
1319 }
1320 int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count);
1321 if (HandshakeAfterDeflateIdleMonitors) {
1322 monitors_used -= Atomic::load(&LVars.wait_count);
1323 }
1324 if (is_MonitorBound_exceeded(monitors_used)) {
1325 // Not enough ObjectMonitors on the global free list.
1326 return true;
1327 }
1328 return false;
1329 }
1330
1331 bool ObjectSynchronizer::needs_monitor_scavenge() {
1332 if (Atomic::load(&_forceMonitorScavenge) == 1) {
1333 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
1334 return true;
1335 }
1336 return false;
1337 }
1338
1339 bool ObjectSynchronizer::is_safepoint_deflation_needed() {
1340 if (!AsyncDeflateIdleMonitors) {
1341 if (monitors_used_above_threshold()) {
1342 // Too many monitors in use.
1343 return true;
1344 }
1345 return needs_monitor_scavenge();
1346 }
1347 if (is_special_deflation_requested()) {
1348 // For AsyncDeflateIdleMonitors only do a safepoint deflation
1349 // if there is a special deflation request.
1350 return true;
1351 }
1352 return false;
1353 }
1354
1355 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() {
1356 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS);
1357 }
1358
1359 void ObjectSynchronizer::oops_do(OopClosure* f) {
1360 // We only scan the global used list here (for moribund threads), and
1361 // the thread-local monitors in Thread::oops_do().
1362 global_used_oops_do(f);
1363 }
1364
1365 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1366 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1367 list_oops_do(Atomic::load(&LVars.in_use_list), Atomic::load(&LVars.in_use_count), f);
1368 }
1369
1370 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1371 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1372 list_oops_do(thread->om_in_use_list, thread->om_in_use_count, f);
1373 }
1374
1375 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) {
1376 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1377 // The oops_do() phase does not overlap with monitor deflation
1378 // so no need to update the ObjectMonitor's ref_count for this
1379 // ObjectMonitor* use and no need to mark ObjectMonitors for the
1380 // list traversal.
1381 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) {
1382 if (mid->object() != NULL) {
1383 f->do_oop((oop*)mid->object_addr());
1384 }
1385 }
1386 }
1387
1388
1389 // -----------------------------------------------------------------------------
1390 // ObjectMonitor Lifecycle
1391 // -----------------------
1392 // Inflation unlinks monitors from LVars.free_list or a per-thread free
1393 // list and associates them with objects. Deflation -- which occurs at
1394 // STW-time or asynchronously -- disassociates idle monitors from objects.
1395 // Such scavenged monitors are returned to the LVars.free_list.
1396 //
1397 // ObjectMonitors reside in type-stable memory (TSM) and are immortal.
1398 //
1399 // Lifecycle:
1400 // -- unassigned and on the LVars.free_list
1401 // -- unassigned and on a per-thread free list
1402 // -- assigned to an object. The object is inflated and the mark refers
1403 // to the ObjectMonitor.
1404
1405
1406 // Constraining monitor pool growth via MonitorBound ...
1407 //
1408 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled.
1409 //
1410 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors):
1411 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the
1412 // the rate of scavenging is driven primarily by GC. As such, we can find
1413 // an inordinate number of monitors in circulation.
1414 // To avoid that scenario we can artificially induce a STW safepoint
1415 // if the pool appears to be growing past some reasonable bound.
1416 // Generally we favor time in space-time tradeoffs, but as there's no
1417 // natural back-pressure on the # of extant monitors we need to impose some
1418 // type of limit. Beware that if MonitorBound is set to too low a value
1419 // we could just loop. In addition, if MonitorBound is set to a low value
1420 // we'll incur more safepoints, which are harmful to performance.
1421 // See also: GuaranteedSafepointInterval
1422 //
1423 // When safepoint deflation is being used and MonitorBound is set, the
1424 // boundry applies to
1425 // (LVars.population - LVars.free_count)
1426 // i.e., if there are not enough ObjectMonitors on the global free list,
1427 // then a safepoint deflation is induced. Picking a good MonitorBound value
1428 // is non-trivial.
1429 //
1430 // When async deflation is being used:
1431 // The monitor pool is still grow-only. Async deflation is requested
1432 // by a safepoint's cleanup phase or by the ServiceThread at periodic
1433 // intervals when is_async_deflation_needed() returns true. In
1434 // addition to other policies that are checked, if there are not
1435 // enough ObjectMonitors on the global free list, then
1436 // is_async_deflation_needed() will return true. The ServiceThread
1437 // calls deflate_global_idle_monitors_using_JT() and also calls
1438 // deflate_per_thread_idle_monitors_using_JT() as needed.
1439
1440 static void InduceScavenge(Thread* self, const char * Whence) {
1441 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation");
1442
1443 // Induce STW safepoint to trim monitors
1444 // Ultimately, this results in a call to deflate_idle_monitors() in the near future.
1445 // More precisely, trigger a cleanup safepoint as the number
1446 // of active monitors passes the specified threshold.
1447 // TODO: assert thread state is reasonable
1448
1449 if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) {
1450 VMThread::check_for_forced_cleanup();
1451 }
1452 }
1453
1454 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self,
1455 const InflateCause cause) {
1456 // A large MAXPRIVATE value reduces both list lock contention
1457 // and list coherency traffic, but also tends to increase the
1458 // number of ObjectMonitors in circulation as well as the STW
1459 // scavenge costs. As usual, we lean toward time in space-time
1460 // tradeoffs.
1461 const int MAXPRIVATE = 1024;
1462
1463 stringStream ss;
1464 for (;;) {
1465 ObjectMonitor* m;
1466
1467 // 1: try to allocate from the thread's local om_free_list.
1468 // Threads will attempt to allocate first from their local list, then
1469 // from the global list, and only after those attempts fail will the
1470 // thread attempt to instantiate new monitors. Thread-local free lists
1471 // improve allocation latency, as well as reducing coherency traffic
1472 // on the shared global list.
1473 m = take_from_start_of_om_free_list(self);
1474 if (m != NULL) {
1475 guarantee(m->object() == NULL, "invariant");
1476 m->set_allocation_state(ObjectMonitor::New);
1477 prepend_to_om_in_use_list(self, m);
1478 return m;
1479 }
1480
1481 // 2: try to allocate from the global LVars.free_list
1482 // CONSIDER: use muxTry() instead of muxAcquire().
1483 // If the muxTry() fails then drop immediately into case 3.
1484 // If we're using thread-local free lists then try
1485 // to reprovision the caller's free list.
1486 if (Atomic::load(&LVars.free_list) != NULL) {
1487 // Reprovision the thread's om_free_list.
1488 // Use bulk transfers to reduce the allocation rate and heat
1489 // on various locks.
1490 for (int i = self->om_free_provision; --i >= 0;) {
1491 ObjectMonitor* take = take_from_start_of_global_free_list();
1492 if (take == NULL) {
1493 break; // No more are available.
1494 }
1495 guarantee(take->object() == NULL, "invariant");
1496 if (AsyncDeflateIdleMonitors) {
1497 // We allowed 3 field values to linger during async deflation.
1498 // We clear header and restore ref_count here, but we leave
1499 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor
1500 // enter optimization can no longer race with async deflation
1501 // and reuse.
1502 take->set_header(markWord::zero());
1503 if (take->ref_count() < 0) {
1504 // Add back max_jint to restore the ref_count field to its
1505 // proper value.
1506 Atomic::add(&take->_ref_count, max_jint);
1507
1508 #ifdef ASSERT
1509 jint l_ref_count = take->ref_count();
1510 #endif
1511 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
1512 l_ref_count, take->ref_count());
1513 }
1514 }
1515 take->Recycle();
1516 // Since we're taking from the global free-list, take must be Free.
1517 // om_release() also sets the allocation state to Free because it
1518 // is called from other code paths.
1519 assert(take->is_free(), "invariant");
1520 om_release(self, take, false);
1521 }
1522 self->om_free_provision += 1 + (self->om_free_provision / 2);
1523 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE;
1524
1525 if (!AsyncDeflateIdleMonitors &&
1526 is_MonitorBound_exceeded(Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count))) {
1527 // Not enough ObjectMonitors on the global free list.
1528 // We can't safely induce a STW safepoint from om_alloc() as our thread
1529 // state may not be appropriate for such activities and callers may hold
1530 // naked oops, so instead we defer the action.
1531 InduceScavenge(self, "om_alloc");
1532 }
1533 continue;
1534 }
1535
1536 // 3: allocate a block of new ObjectMonitors
1537 // Both the local and global free lists are empty -- resort to malloc().
1538 // In the current implementation ObjectMonitors are TSM - immortal.
1539 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want
1540 // each ObjectMonitor to start at the beginning of a cache line,
1541 // so we use align_up().
1542 // A better solution would be to use C++ placement-new.
1543 // BEWARE: As it stands currently, we don't run the ctors!
1544 assert(_BLOCKSIZE > 1, "invariant");
1545 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE;
1546 PaddedObjectMonitor* temp;
1547 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1);
1548 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal);
1549 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE);
1550 (void)memset((void *) temp, 0, neededsize);
1551
1552 // Format the block.
1553 // initialize the linked list, each monitor points to its next
1554 // forming the single linked free list, the very first monitor
1555 // will points to next block, which forms the block list.
1556 // The trick of using the 1st element in the block as g_block_list
1557 // linkage should be reconsidered. A better implementation would
1558 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1559
1560 for (int i = 1; i < _BLOCKSIZE; i++) {
1561 temp[i]._next_om = (ObjectMonitor*)&temp[i + 1];
1562 assert(temp[i].is_free(), "invariant");
1563 }
1564
1565 // terminate the last monitor as the end of list
1566 temp[_BLOCKSIZE - 1]._next_om = (ObjectMonitor*)NULL;
1567
1568 // Element [0] is reserved for global list linkage
1569 temp[0].set_object(CHAINMARKER);
1570
1571 // Consider carving out this thread's current request from the
1572 // block in hand. This avoids some lock traffic and redundant
1573 // list activity.
1574
1575 prepend_block_to_lists(temp);
1576 }
1577 }
1578
1579 // Place "m" on the caller's private per-thread om_free_list.
1580 // In practice there's no need to clamp or limit the number of
1581 // monitors on a thread's om_free_list as the only non-allocation time
1582 // we'll call om_release() is to return a monitor to the free list after
1583 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to
1584 // accumulate on a thread's free list.
1585 //
1586 // Key constraint: all ObjectMonitors on a thread's free list and the global
1587 // free list must have their object field set to null. This prevents the
1588 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT()
1589 // -- from reclaiming them while we are trying to release them.
1590
1591 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
1592 bool from_per_thread_alloc) {
1593 guarantee(m->header().value() == 0, "invariant");
1594 guarantee(m->object() == NULL, "invariant");
1595 stringStream ss;
1596 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: "
1597 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss),
1598 m->_recursions);
1599 m->set_allocation_state(ObjectMonitor::Free);
1600 // _next_om is used for both per-thread in-use and free lists so
1601 // we have to remove 'm' from the in-use list first (as needed).
1602 if (from_per_thread_alloc) {
1603 // Need to remove 'm' from om_in_use_list.
1604 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
1605 // protocol because async deflation can do list deletions in parallel.
1606 ObjectMonitor* cur_mid_in_use = NULL;
1607 ObjectMonitor* mid = NULL;
1608 ObjectMonitor* next = NULL;
1609 bool extracted = false;
1610
1611 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) {
1612 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self));
1613 }
1614 next = unmarked_next(mid);
1615 while (true) {
1616 if (m == mid) {
1617 // We found 'm' on the per-thread in-use list so try to extract it.
1618 if (cur_mid_in_use == NULL) {
1619 // mid is the list head and it is locked. Switch the list head
1620 // to next which unlocks the list head, but leaves mid locked:
1621 Atomic::store(&self->om_in_use_list, next);
1622 } else {
1623 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
1624 // next field to next which unlocks cur_mid_in_use, but leaves
1625 // mid locked:
1626 set_next(cur_mid_in_use, next);
1627 }
1628 extracted = true;
1629 Atomic::dec(&self->om_in_use_count);
1630 // Unlock mid, but leave the next value for any lagging list
1631 // walkers. It will get cleaned up when mid is prepended to
1632 // the thread's free list:
1633 om_unlock(mid);
1634 break;
1635 }
1636 if (cur_mid_in_use != NULL) {
1637 om_unlock(cur_mid_in_use);
1638 }
1639 // The next cur_mid_in_use keeps mid's locked state so
1640 // that it is stable for a possible next field change. It
1641 // cannot be deflated while it is locked.
1642 cur_mid_in_use = mid;
1643 mid = next;
1644 if (mid == NULL) {
1645 // Reached end of the list and didn't find m so:
1646 fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT,
1647 p2i(m), p2i(self->om_in_use_list));
1648 }
1649 // Lock mid so we can possibly extract it:
1650 om_lock(mid);
1651 next = unmarked_next(mid);
1652 }
1653 }
1654
1655 prepend_to_om_free_list(self, m);
1656 guarantee(m->is_free(), "invariant");
1657 }
1658
1659 // Return ObjectMonitors on a moribund thread's free and in-use
1660 // lists to the appropriate global lists. The ObjectMonitors on the
1661 // per-thread in-use list may still be in use by other threads.
1662 //
1663 // We currently call om_flush() from Threads::remove() before the
1664 // thread has been excised from the thread list and is no longer a
1665 // mutator. This means that om_flush() cannot run concurrently with
1666 // a safepoint and interleave with deflate_idle_monitors(). In
1667 // particular, this ensures that the thread's in-use monitors are
1668 // scanned by a GC safepoint, either via Thread::oops_do() (before
1669 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after
1670 // om_flush() is called).
1671 //
1672 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT()
1673 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can
1674 // run at the same time as om_flush() so we have to follow a careful
1675 // protocol to prevent list corruption.
1676
1677 void ObjectSynchronizer::om_flush(Thread* self) {
1678 // This function can race with an async deflater thread. Since
1679 // deflation has to process the per-thread in-use list before
1680 // prepending the deflated ObjectMonitors to the global free list,
1681 // we process the per-thread lists in the same order to prevent
1682 // ordering races.
1683 int in_use_count = 0;
1684 ObjectMonitor* in_use_list = NULL;
1685 ObjectMonitor* in_use_tail = NULL;
1686
1687 // An async deflation thread checks to see if the target thread
1688 // is exiting, but if it has made it past that check before we
1689 // started exiting, then it is racing to get to the in-use list.
1690 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) {
1691 // At this point, we have marked the in-use list head so an
1692 // async deflation thread cannot come in after us. If an async
1693 // deflation thread is ahead of us, then we'll detect that and
1694 // wait for it to finish its work.
1695 //
1696 // The thread is going away, however the ObjectMonitors on the
1697 // om_in_use_list may still be in-use by other threads. Link
1698 // them to in_use_tail, which will be linked into the global
1699 // in-use list (LVars.in_use_list) below.
1700 //
1701 // Account for the in-use list head before the loop since it is
1702 // already marked (by this thread):
1703 in_use_tail = in_use_list;
1704 in_use_count++;
1705 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) {
1706 if (is_locked(cur_om)) {
1707 // cur_om is locked so there must be an async deflater
1708 // thread ahead of us so we'll give it a chance to finish.
1709 while (is_locked(cur_om)) {
1710 os::naked_short_sleep(1);
1711 }
1712 // Refetch the possibly changed next field and try again.
1713 cur_om = unmarked_next(in_use_tail);
1714 continue;
1715 }
1716 if (cur_om->is_free()) {
1717 // cur_om was deflated and the allocation state was changed
1718 // to Free while it was marked. We happened to see it just
1719 // after it was unmarked (and added to the free list).
1720 // Refetch the possibly changed next field and try again.
1721 cur_om = unmarked_next(in_use_tail);
1722 continue;
1723 }
1724 in_use_tail = cur_om;
1725 in_use_count++;
1726 cur_om = unmarked_next(cur_om);
1727 }
1728 guarantee(in_use_tail != NULL, "invariant");
1729 int l_om_in_use_count = self->om_in_use_count;
1730 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't "
1731 "match: l_om_in_use_count=%d, in_use_count=%d",
1732 l_om_in_use_count, in_use_count);
1733 self->om_in_use_count = 0;
1734 // Clear the in-use list head (which also unlocks it):
1735 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
1736 om_unlock(in_use_list);
1737 }
1738
1739 int free_count = 0;
1740 ObjectMonitor* free_list = self->om_free_list;
1741 ObjectMonitor* free_tail = NULL;
1742 if (free_list != NULL) {
1743 // The thread is going away. Set 'free_tail' to the last per-thread free
1744 // monitor which will be linked to LVars.free_list below.
1745 stringStream ss;
1746 for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) {
1747 free_count++;
1748 free_tail = s;
1749 guarantee(s->object() == NULL, "invariant");
1750 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss));
1751 }
1752 guarantee(free_tail != NULL, "invariant");
1753 int l_om_free_count = self->om_free_count;
1754 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: "
1755 "l_om_free_count=%d, free_count=%d", l_om_free_count,
1756 free_count);
1757 self->om_free_count = 0;
1758 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
1759 }
1760
1761 if (free_tail != NULL) {
1762 prepend_list_to_global_free_list(free_list, free_tail, free_count);
1763 }
1764
1765 if (in_use_tail != NULL) {
1766 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count);
1767 }
1768
1769 LogStreamHandle(Debug, monitorinflation) lsh_debug;
1770 LogStreamHandle(Info, monitorinflation) lsh_info;
1771 LogStream* ls = NULL;
1772 if (log_is_enabled(Debug, monitorinflation)) {
1773 ls = &lsh_debug;
1774 } else if ((free_count != 0 || in_use_count != 0) &&
1775 log_is_enabled(Info, monitorinflation)) {
1776 ls = &lsh_info;
1777 }
1778 if (ls != NULL) {
1779 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d"
1780 ", in_use_count=%d" ", om_free_provision=%d",
1781 p2i(self), free_count, in_use_count, self->om_free_provision);
1782 }
1783 }
1784
1785 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
1786 const oop obj,
1787 ObjectSynchronizer::InflateCause cause) {
1788 assert(event != NULL, "invariant");
1789 assert(event->should_commit(), "invariant");
1790 event->set_monitorClass(obj->klass());
1791 event->set_address((uintptr_t)(void*)obj);
1792 event->set_cause((u1)cause);
1793 event->commit();
1794 }
1795
1796 // Fast path code shared by multiple functions
1797 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) {
1798 while (true) {
1799 markWord mark = obj->mark();
1800 if (mark.has_monitor()) {
1801 if (!omh_p->save_om_ptr(obj, mark)) {
1802 // Lost a race with async deflation so try again.
1803 assert(AsyncDeflateIdleMonitors, "sanity check");
1804 continue;
1805 }
1806 ObjectMonitor* monitor = omh_p->om_ptr();
1807 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid");
1808 markWord dmw = monitor->header();
1809 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value());
1810 return;
1811 }
1812 inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal);
1813 return;
1814 }
1815 }
1816
1817 void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self,
1818 oop object, const InflateCause cause) {
1819 // Inflate mutates the heap ...
1820 // Relaxing assertion for bug 6320749.
1821 assert(Universe::verify_in_progress() ||
1822 !SafepointSynchronize::is_at_safepoint(), "invariant");
1823
1824 EventJavaMonitorInflate event;
1825
1826 for (;;) {
1827 const markWord mark = object->mark();
1828 assert(!mark.has_bias_pattern(), "invariant");
1829
1830 // The mark can be in one of the following states:
1831 // * Inflated - just return
1832 // * Stack-locked - coerce it to inflated
1833 // * INFLATING - busy wait for conversion to complete
1834 // * Neutral - aggressively inflate the object.
1835 // * BIASED - Illegal. We should never see this
1836
1837 // CASE: inflated
1838 if (mark.has_monitor()) {
1839 if (!omh_p->save_om_ptr(object, mark)) {
1840 // Lost a race with async deflation so try again.
1841 assert(AsyncDeflateIdleMonitors, "sanity check");
1842 continue;
1843 }
1844 ObjectMonitor* inf = omh_p->om_ptr();
1845 markWord dmw = inf->header();
1846 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1847 assert(inf->object() == object, "invariant");
1848 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1849 return;
1850 }
1851
1852 // CASE: inflation in progress - inflating over a stack-lock.
1853 // Some other thread is converting from stack-locked to inflated.
1854 // Only that thread can complete inflation -- other threads must wait.
1855 // The INFLATING value is transient.
1856 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1857 // We could always eliminate polling by parking the thread on some auxiliary list.
1858 if (mark == markWord::INFLATING()) {
1859 read_stable_mark(object);
1860 continue;
1861 }
1862
1863 // CASE: stack-locked
1864 // Could be stack-locked either by this thread or by some other thread.
1865 //
1866 // Note that we allocate the objectmonitor speculatively, _before_ attempting
1867 // to install INFLATING into the mark word. We originally installed INFLATING,
1868 // allocated the objectmonitor, and then finally STed the address of the
1869 // objectmonitor into the mark. This was correct, but artificially lengthened
1870 // the interval in which INFLATED appeared in the mark, thus increasing
1871 // the odds of inflation contention.
1872 //
1873 // We now use per-thread private objectmonitor free lists.
1874 // These list are reprovisioned from the global free list outside the
1875 // critical INFLATING...ST interval. A thread can transfer
1876 // multiple objectmonitors en-mass from the global free list to its local free list.
1877 // This reduces coherency traffic and lock contention on the global free list.
1878 // Using such local free lists, it doesn't matter if the om_alloc() call appears
1879 // before or after the CAS(INFLATING) operation.
1880 // See the comments in om_alloc().
1881
1882 LogStreamHandle(Trace, monitorinflation) lsh;
1883
1884 if (mark.has_locker()) {
1885 ObjectMonitor* m = om_alloc(self, cause);
1886 // Optimistically prepare the objectmonitor - anticipate successful CAS
1887 // We do this before the CAS in order to minimize the length of time
1888 // in which INFLATING appears in the mark.
1889 m->Recycle();
1890 m->_Responsible = NULL;
1891 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class
1892
1893 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark);
1894 if (cmp != mark) {
1895 // om_release() will reset the allocation state from New to Free.
1896 om_release(self, m, true);
1897 continue; // Interference -- just retry
1898 }
1899
1900 // We've successfully installed INFLATING (0) into the mark-word.
1901 // This is the only case where 0 will appear in a mark-word.
1902 // Only the singular thread that successfully swings the mark-word
1903 // to 0 can perform (or more precisely, complete) inflation.
1904 //
1905 // Why do we CAS a 0 into the mark-word instead of just CASing the
1906 // mark-word from the stack-locked value directly to the new inflated state?
1907 // Consider what happens when a thread unlocks a stack-locked object.
1908 // It attempts to use CAS to swing the displaced header value from the
1909 // on-stack BasicLock back into the object header. Recall also that the
1910 // header value (hash code, etc) can reside in (a) the object header, or
1911 // (b) a displaced header associated with the stack-lock, or (c) a displaced
1912 // header in an ObjectMonitor. The inflate() routine must copy the header
1913 // value from the BasicLock on the owner's stack to the ObjectMonitor, all
1914 // the while preserving the hashCode stability invariants. If the owner
1915 // decides to release the lock while the value is 0, the unlock will fail
1916 // and control will eventually pass from slow_exit() to inflate. The owner
1917 // will then spin, waiting for the 0 value to disappear. Put another way,
1918 // the 0 causes the owner to stall if the owner happens to try to
1919 // drop the lock (restoring the header from the BasicLock to the object)
1920 // while inflation is in-progress. This protocol avoids races that might
1921 // would otherwise permit hashCode values to change or "flicker" for an object.
1922 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable.
1923 // 0 serves as a "BUSY" inflate-in-progress indicator.
1924
1925
1926 // fetch the displaced mark from the owner's stack.
1927 // The owner can't die or unwind past the lock while our INFLATING
1928 // object is in the mark. Furthermore the owner can't complete
1929 // an unlock on the object, either.
1930 markWord dmw = mark.displaced_mark_helper();
1931 // Catch if the object's header is not neutral (not locked and
1932 // not marked is what we care about here).
1933 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
1934
1935 // Setup monitor fields to proper values -- prepare the monitor
1936 m->set_header(dmw);
1937
1938 // Optimization: if the mark.locker stack address is associated
1939 // with this thread we could simply set m->_owner = self.
1940 // Note that a thread can inflate an object
1941 // that it has stack-locked -- as might happen in wait() -- directly
1942 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom.
1943 if (AsyncDeflateIdleMonitors) {
1944 m->simply_set_owner_from(mark.locker(), NULL, DEFLATER_MARKER);
1945 } else {
1946 m->simply_set_owner_from(mark.locker(), NULL);
1947 }
1948 m->set_object(object);
1949 // TODO-FIXME: assert BasicLock->dhw != 0.
1950
1951 omh_p->set_om_ptr(m);
1952
1953 // Must preserve store ordering. The monitor state must
1954 // be stable at the time of publishing the monitor address.
1955 guarantee(object->mark() == markWord::INFLATING(), "invariant");
1956 object->release_set_mark(markWord::encode(m));
1957
1958 // Once ObjectMonitor is configured and the object is associated
1959 // with the ObjectMonitor, it is safe to allow async deflation:
1960 assert(m->is_new(), "freshly allocated monitor must be new");
1961 m->set_allocation_state(ObjectMonitor::Old);
1962
1963 // Hopefully the performance counters are allocated on distinct cache lines
1964 // to avoid false sharing on MP systems ...
1965 OM_PERFDATA_OP(Inflations, inc());
1966 if (log_is_enabled(Trace, monitorinflation)) {
1967 ResourceMark rm(self);
1968 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
1969 INTPTR_FORMAT ", type='%s'", p2i(object),
1970 object->mark().value(), object->klass()->external_name());
1971 }
1972 if (event.should_commit()) {
1973 post_monitor_inflate_event(&event, object, cause);
1974 }
1975 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
1976 return;
1977 }
1978
1979 // CASE: neutral
1980 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
1981 // If we know we're inflating for entry it's better to inflate by swinging a
1982 // pre-locked ObjectMonitor pointer into the object header. A successful
1983 // CAS inflates the object *and* confers ownership to the inflating thread.
1984 // In the current implementation we use a 2-step mechanism where we CAS()
1985 // to inflate and then CAS() again to try to swing _owner from NULL to self.
1986 // An inflateTry() method that we could call from enter() would be useful.
1987
1988 // Catch if the object's header is not neutral (not locked and
1989 // not marked is what we care about here).
1990 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT,mark.value());
1991 ObjectMonitor* m = om_alloc(self, cause);
1992 // prepare m for installation - set monitor to initial state
1993 m->Recycle();
1994 m->set_header(mark);
1995 // If we leave _owner == DEFLATER_MARKER here, then the simple C2
1996 // ObjectMonitor enter optimization can no longer race with async
1997 // deflation and reuse.
1998 m->set_object(object);
1999 m->_Responsible = NULL;
2000 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class
2001
2002 omh_p->set_om_ptr(m);
2003
2004 if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
2005 m->set_header(markWord::zero());
2006 m->set_object(NULL);
2007 m->Recycle();
2008 omh_p->set_om_ptr(NULL);
2009 // om_release() will reset the allocation state from New to Free.
2010 om_release(self, m, true);
2011 m = NULL;
2012 continue;
2013 // interference - the markword changed - just retry.
2014 // The state-transitions are one-way, so there's no chance of
2015 // live-lock -- "Inflated" is an absorbing state.
2016 }
2017
2018 // Once the ObjectMonitor is configured and object is associated
2019 // with the ObjectMonitor, it is safe to allow async deflation:
2020 assert(m->is_new(), "freshly allocated monitor must be new");
2021 m->set_allocation_state(ObjectMonitor::Old);
2022
2023 // Hopefully the performance counters are allocated on distinct
2024 // cache lines to avoid false sharing on MP systems ...
2025 OM_PERFDATA_OP(Inflations, inc());
2026 if (log_is_enabled(Trace, monitorinflation)) {
2027 ResourceMark rm(self);
2028 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
2029 INTPTR_FORMAT ", type='%s'", p2i(object),
2030 object->mark().value(), object->klass()->external_name());
2031 }
2032 if (event.should_commit()) {
2033 post_monitor_inflate_event(&event, object, cause);
2034 }
2035 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free");
2036 return;
2037 }
2038 }
2039
2040
2041 // We maintain a list of in-use monitors for each thread.
2042 //
2043 // For safepoint based deflation:
2044 // deflate_thread_local_monitors() scans a single thread's in-use list, while
2045 // deflate_idle_monitors() scans only a global list of in-use monitors which
2046 // is populated only as a thread dies (see om_flush()).
2047 //
2048 // These operations are called at all safepoints, immediately after mutators
2049 // are stopped, but before any objects have moved. Collectively they traverse
2050 // the population of in-use monitors, deflating where possible. The scavenged
2051 // monitors are returned to the global monitor free list.
2052 //
2053 // Beware that we scavenge at *every* stop-the-world point. Having a large
2054 // number of monitors in-use could negatively impact performance. We also want
2055 // to minimize the total # of monitors in circulation, as they incur a small
2056 // footprint penalty.
2057 //
2058 // Perversely, the heap size -- and thus the STW safepoint rate --
2059 // typically drives the scavenge rate. Large heaps can mean infrequent GC,
2060 // which in turn can mean large(r) numbers of ObjectMonitors in circulation.
2061 // This is an unfortunate aspect of this design.
2062 //
2063 // For async deflation:
2064 // If a special deflation request is made, then the safepoint based
2065 // deflation mechanism is used. Otherwise, an async deflation request
2066 // is registered with the ServiceThread and it is notified.
2067
2068 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) {
2069 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2070
2071 // The per-thread in-use lists are handled in
2072 // ParallelSPCleanupThreadClosure::do_thread().
2073
2074 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) {
2075 // Use the older mechanism for the global in-use list or if a
2076 // special deflation has been requested before the safepoint.
2077 ObjectSynchronizer::deflate_idle_monitors(counters);
2078 return;
2079 }
2080
2081 log_debug(monitorinflation)("requesting async deflation of idle monitors.");
2082 // Request deflation of idle monitors by the ServiceThread:
2083 set_is_async_deflation_requested(true);
2084 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
2085 ml.notify_all();
2086
2087 if (log_is_enabled(Debug, monitorinflation)) {
2088 // exit_globals()'s call to audit_and_print_stats() is done
2089 // at the Info level and not at a safepoint.
2090 // For safepoint based deflation, audit_and_print_stats() is called
2091 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the
2092 // Debug level at a safepoint.
2093 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2094 }
2095 }
2096
2097 // Deflate a single monitor if not in-use
2098 // Return true if deflated, false if in-use
2099 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
2100 ObjectMonitor** free_head_p,
2101 ObjectMonitor** free_tail_p) {
2102 bool deflated;
2103 // Normal case ... The monitor is associated with obj.
2104 const markWord mark = obj->mark();
2105 guarantee(mark == markWord::encode(mid), "should match: mark="
2106 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(),
2107 markWord::encode(mid).value());
2108 // Make sure that mark.monitor() and markWord::encode() agree:
2109 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT
2110 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid));
2111 const markWord dmw = mid->header();
2112 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
2113
2114 if (mid->is_busy() || mid->ref_count() != 0) {
2115 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2116 // is in use so no deflation.
2117 deflated = false;
2118 } else {
2119 // Deflate the monitor if it is no longer being used
2120 // It's idle - scavenge and return to the global free list
2121 // plain old deflation ...
2122 if (log_is_enabled(Trace, monitorinflation)) {
2123 ResourceMark rm;
2124 log_trace(monitorinflation)("deflate_monitor: "
2125 "object=" INTPTR_FORMAT ", mark="
2126 INTPTR_FORMAT ", type='%s'", p2i(obj),
2127 mark.value(), obj->klass()->external_name());
2128 }
2129
2130 // Restore the header back to obj
2131 obj->release_set_mark(dmw);
2132 if (AsyncDeflateIdleMonitors) {
2133 // clear() expects the owner field to be NULL and we won't race
2134 // with the simple C2 ObjectMonitor enter optimization since
2135 // we're at a safepoint. DEFLATER_MARKER is the only non-NULL
2136 // value we should see here.
2137 mid->try_set_owner_from(NULL, DEFLATER_MARKER);
2138 }
2139 mid->clear();
2140
2141 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT,
2142 p2i(mid->object()));
2143 assert(mid->is_free(), "invariant");
2144
2145 // Move the deflated ObjectMonitor to the working free list
2146 // defined by free_head_p and free_tail_p. No races on this list
2147 // so no need for load_acquire() or store_release().
2148 if (*free_head_p == NULL) *free_head_p = mid;
2149 if (*free_tail_p != NULL) {
2150 // We append to the list so the caller can use mid->_next_om
2151 // to fix the linkages in its context.
2152 ObjectMonitor* prevtail = *free_tail_p;
2153 // Should have been cleaned up by the caller:
2154 // Note: Should not have to lock prevtail here since we're at a
2155 // safepoint and ObjectMonitors on the local free list should
2156 // not be accessed in parallel.
2157 assert(prevtail->_next_om == NULL, "must be NULL: _next_om="
2158 INTPTR_FORMAT, p2i(prevtail->_next_om));
2159 set_next(prevtail, mid);
2160 }
2161 *free_tail_p = mid;
2162 // At this point, mid->_next_om still refers to its current
2163 // value and another ObjectMonitor's _next_om field still
2164 // refers to this ObjectMonitor. Those linkages have to be
2165 // cleaned up by the caller who has the complete context.
2166 deflated = true;
2167 }
2168 return deflated;
2169 }
2170
2171 // Deflate the specified ObjectMonitor if not in-use using a JavaThread.
2172 // Returns true if it was deflated and false otherwise.
2173 //
2174 // The async deflation protocol sets owner to DEFLATER_MARKER and
2175 // makes ref_count negative as signals to contending threads that
2176 // an async deflation is in progress. There are a number of checks
2177 // as part of the protocol to make sure that the calling thread has
2178 // not lost the race to a contending thread or to a thread that just
2179 // wants to use the ObjectMonitor*.
2180 //
2181 // The ObjectMonitor has been successfully async deflated when:
2182 // (owner == DEFLATER_MARKER && ref_count < 0)
2183 // Contending threads or ObjectMonitor* using threads that see those
2184 // values know to retry their operation.
2185 //
2186 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid,
2187 ObjectMonitor** free_head_p,
2188 ObjectMonitor** free_tail_p) {
2189 assert(AsyncDeflateIdleMonitors, "sanity check");
2190 assert(Thread::current()->is_Java_thread(), "precondition");
2191 // A newly allocated ObjectMonitor should not be seen here so we
2192 // avoid an endless inflate/deflate cycle.
2193 assert(mid->is_old(), "must be old: allocation_state=%d",
2194 (int) mid->allocation_state());
2195
2196 if (mid->is_busy() || mid->ref_count() != 0) {
2197 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor*
2198 // is in use so no deflation.
2199 return false;
2200 }
2201
2202 if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) == NULL) {
2203 // ObjectMonitor is not owned by another thread. Our setting
2204 // owner to DEFLATER_MARKER forces any contending thread through
2205 // the slow path. This is just the first part of the async
2206 // deflation dance.
2207
2208 if (mid->_contentions != 0 || mid->_waiters != 0) {
2209 // Another thread has raced to enter the ObjectMonitor after
2210 // mid->is_busy() above or has already entered and waited on
2211 // it which makes it busy so no deflation. Restore owner to
2212 // NULL if it is still DEFLATER_MARKER.
2213 mid->try_set_owner_from(NULL, DEFLATER_MARKER);
2214 return false;
2215 }
2216
2217 if (Atomic::cmpxchg(&mid->_ref_count, (jint)0, -max_jint) == 0) {
2218 // Make ref_count negative to force any contending threads or
2219 // ObjectMonitor* using threads to retry. This is the second
2220 // part of the async deflation dance.
2221
2222 if (mid->owner_is_DEFLATER_MARKER()) {
2223 // If owner is still DEFLATER_MARKER, then we have successfully
2224 // signaled any contending threads to retry. If it is not, then we
2225 // have lost the race to an entering thread and the ObjectMonitor
2226 // is now busy. This is the third and final part of the async
2227 // deflation dance.
2228 // Note: This owner check solves the ABA problem with ref_count
2229 // where another thread acquired the ObjectMonitor, finished
2230 // using it and restored the ref_count to zero.
2231
2232 // Sanity checks for the races:
2233 guarantee(mid->_contentions == 0, "must be 0: contentions=%d",
2234 mid->_contentions);
2235 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters);
2236 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq="
2237 INTPTR_FORMAT, p2i(mid->_cxq));
2238 guarantee(mid->_EntryList == NULL,
2239 "must be no entering threads: EntryList=" INTPTR_FORMAT,
2240 p2i(mid->_EntryList));
2241
2242 const oop obj = (oop) mid->object();
2243 if (log_is_enabled(Trace, monitorinflation)) {
2244 ResourceMark rm;
2245 log_trace(monitorinflation)("deflate_monitor_using_JT: "
2246 "object=" INTPTR_FORMAT ", mark="
2247 INTPTR_FORMAT ", type='%s'",
2248 p2i(obj), obj->mark().value(),
2249 obj->klass()->external_name());
2250 }
2251
2252 // Install the old mark word if nobody else has already done it.
2253 mid->install_displaced_markword_in_object(obj);
2254 mid->clear_using_JT();
2255
2256 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT,
2257 p2i(mid->object()));
2258 assert(mid->is_free(), "must be free: allocation_state=%d",
2259 (int) mid->allocation_state());
2260
2261 // Move the deflated ObjectMonitor to the working free list
2262 // defined by free_head_p and free_tail_p. No races on this list
2263 // so no need for load_acquire() or store_release().
2264 if (*free_head_p == NULL) {
2265 // First one on the list.
2266 *free_head_p = mid;
2267 }
2268 if (*free_tail_p != NULL) {
2269 // We append to the list so the caller can use mid->_next_om
2270 // to fix the linkages in its context.
2271 ObjectMonitor* prevtail = *free_tail_p;
2272 // Should have been cleaned up by the caller:
2273 om_lock(prevtail);
2274 assert(unmarked_next(prevtail) == NULL, "must be NULL: _next_om="
2275 INTPTR_FORMAT, p2i(unmarked_next(prevtail)));
2276 set_next(prevtail, mid); // prevtail now points to mid (and is unlocked)
2277 }
2278 *free_tail_p = mid;
2279
2280 // At this point, mid->_next_om still refers to its current
2281 // value and another ObjectMonitor's _next_om field still
2282 // refers to this ObjectMonitor. Those linkages have to be
2283 // cleaned up by the caller who has the complete context.
2284
2285 // We leave owner == DEFLATER_MARKER and ref_count < 0
2286 // to force any racing threads to retry.
2287 return true; // Success, ObjectMonitor has been deflated.
2288 }
2289
2290 // The owner was changed from DEFLATER_MARKER so we lost the
2291 // race since the ObjectMonitor is now busy.
2292
2293 // Add back max_jint to restore the ref_count field to its
2294 // proper value (which may not be what we saw above):
2295 Atomic::add(&mid->_ref_count, max_jint);
2296
2297 #ifdef ASSERT
2298 jint l_ref_count = mid->ref_count();
2299 #endif
2300 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d",
2301 l_ref_count, mid->ref_count());
2302 return false;
2303 }
2304
2305 // The ref_count was no longer 0 so we lost the race since the
2306 // ObjectMonitor is now busy or the ObjectMonitor* is now is use.
2307 // Restore owner to NULL if it is still DEFLATER_MARKER:
2308 mid->try_set_owner_from(NULL, DEFLATER_MARKER);
2309 }
2310
2311 // The owner field is no longer NULL so we lost the race since the
2312 // ObjectMonitor is now busy.
2313 return false;
2314 }
2315
2316 // Walk a given monitor list, and deflate idle monitors.
2317 // The given list could be a per-thread list or a global list.
2318 //
2319 // In the case of parallel processing of thread local monitor lists,
2320 // work is done by Threads::parallel_threads_do() which ensures that
2321 // each Java thread is processed by exactly one worker thread, and
2322 // thus avoid conflicts that would arise when worker threads would
2323 // process the same monitor lists concurrently.
2324 //
2325 // See also ParallelSPCleanupTask and
2326 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and
2327 // Threads::parallel_java_threads_do() in thread.cpp.
2328 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p,
2329 int* count_p,
2330 ObjectMonitor** free_head_p,
2331 ObjectMonitor** free_tail_p) {
2332 ObjectMonitor* cur_mid_in_use = NULL;
2333 ObjectMonitor* mid = NULL;
2334 ObjectMonitor* next = NULL;
2335 int deflated_count = 0;
2336
2337 // We use the simpler lock-mid-as-we-go protocol since there are no
2338 // parallel list deletions since we are at a safepoint.
2339 if ((mid = get_list_head_locked(list_p)) == NULL) {
2340 return 0; // The list is empty so nothing to deflate.
2341 }
2342 next = unmarked_next(mid);
2343
2344 while (true) {
2345 oop obj = (oop) mid->object();
2346 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) {
2347 // Deflation succeeded and already updated free_head_p and
2348 // free_tail_p as needed. Finish the move to the local free list
2349 // by unlinking mid from the global or per-thread in-use list.
2350 if (cur_mid_in_use == NULL) {
2351 // mid is the list head and it is locked. Switch the list head
2352 // to next which unlocks the list head, but leaves mid locked:
2353 Atomic::store(list_p, next);
2354 } else {
2355 // mid is locked. Switch cur_mid_in_use's next field to next
2356 // which is safe because we have no parallel list deletions,
2357 // but we leave mid locked:
2358 set_next(cur_mid_in_use, next);
2359 }
2360 // At this point mid is disconnected from the in-use list so
2361 // its lock no longer has any effects on the in-use list.
2362 deflated_count++;
2363 Atomic::dec(count_p);
2364 // mid is current tail in the free_head_p list so NULL terminate it
2365 // (which also unlocks it):
2366 set_next(mid, NULL);
2367 } else {
2368 om_unlock(mid);
2369 cur_mid_in_use = mid;
2370 }
2371 // All the list management is done so move on to the next one:
2372 mid = next;
2373 if (mid == NULL) {
2374 break; // Reached end of the list so nothing more to deflate.
2375 }
2376 // Lock mid so we can possibly deflate it:
2377 om_lock(mid);
2378 next = unmarked_next(mid);
2379 }
2380 return deflated_count;
2381 }
2382
2383 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using
2384 // a JavaThread. Returns the number of deflated ObjectMonitors. The given
2385 // list could be a per-thread in-use list or the global in-use list.
2386 // If a safepoint has started, then we save state via saved_mid_in_use_p
2387 // and return to the caller to honor the safepoint.
2388 //
2389 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
2390 int* count_p,
2391 ObjectMonitor** free_head_p,
2392 ObjectMonitor** free_tail_p,
2393 ObjectMonitor** saved_mid_in_use_p) {
2394 assert(AsyncDeflateIdleMonitors, "sanity check");
2395 JavaThread* self = JavaThread::current();
2396
2397 ObjectMonitor* cur_mid_in_use = NULL;
2398 ObjectMonitor* mid = NULL;
2399 ObjectMonitor* next = NULL;
2400 ObjectMonitor* next_next = NULL;
2401 int deflated_count = 0;
2402
2403 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go
2404 // protocol because om_release() can do list deletions in parallel.
2405 // We also lock-next-next-as-we-go to prevent an om_flush() that is
2406 // behind this thread from passing us.
2407 if (*saved_mid_in_use_p == NULL) {
2408 // No saved state so start at the beginning.
2409 // Lock the list head so we can possibly deflate it:
2410 if ((mid = get_list_head_locked(list_p)) == NULL) {
2411 return 0; // The list is empty so nothing to deflate.
2412 }
2413 next = unmarked_next(mid);
2414 } else {
2415 // We're restarting after a safepoint so restore the necessary state
2416 // before we resume.
2417 cur_mid_in_use = *saved_mid_in_use_p;
2418 // Lock cur_mid_in_use so we can possibly update its
2419 // next field to extract a deflated ObjectMonitor.
2420 om_lock(cur_mid_in_use);
2421 mid = unmarked_next(cur_mid_in_use);
2422 if (mid == NULL) {
2423 om_unlock(cur_mid_in_use);
2424 *saved_mid_in_use_p = NULL;
2425 return 0; // The remainder is empty so nothing more to deflate.
2426 }
2427 // Lock mid so we can possibly deflate it:
2428 om_lock(mid);
2429 next = unmarked_next(mid);
2430 }
2431
2432 while (true) {
2433 // The current mid's next field is marked at this point. If we have
2434 // a cur_mid_in_use, then its next field is also marked at this point.
2435
2436 if (next != NULL) {
2437 // We lock next so that an om_flush() thread that is behind us
2438 // cannot pass us when we unlock the current mid.
2439 om_lock(next);
2440 next_next = unmarked_next(next);
2441 }
2442
2443 // Only try to deflate if there is an associated Java object and if
2444 // mid is old (is not newly allocated and is not newly freed).
2445 if (mid->object() != NULL && mid->is_old() &&
2446 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) {
2447 // Deflation succeeded and already updated free_head_p and
2448 // free_tail_p as needed. Finish the move to the local free list
2449 // by unlinking mid from the global or per-thread in-use list.
2450 if (cur_mid_in_use == NULL) {
2451 // mid is the list head and it is locked. Switch the list head
2452 // to next which is also locked (if not NULL) and also leave
2453 // mid locked:
2454 Atomic::store(list_p, next);
2455 } else {
2456 ObjectMonitor* locked_next = mark_om_ptr(next);
2457 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
2458 // next field to locked_next and also leave mid locked:
2459 set_next(cur_mid_in_use, locked_next);
2460 }
2461 // At this point mid is disconnected from the in-use list so
2462 // its lock longer has any effects on in-use list.
2463 deflated_count++;
2464 Atomic::dec(count_p);
2465 // mid is current tail in the free_head_p list so NULL terminate it
2466 // (which also unlocks it):
2467 set_next(mid, NULL);
2468
2469 // All the list management is done so move on to the next one:
2470 mid = next; // mid keeps non-NULL next's locked next field
2471 next = next_next;
2472 } else {
2473 // mid is considered in-use if it does not have an associated
2474 // Java object or mid is not old or deflation did not succeed.
2475 // A mid->is_new() node can be seen here when it is freshly
2476 // returned by om_alloc() (and skips the deflation code path).
2477 // A mid->is_old() node can be seen here when deflation failed.
2478 // A mid->is_free() node can be seen here when a fresh node from
2479 // om_alloc() is released by om_release() due to losing the race
2480 // in inflate().
2481
2482 // All the list management is done so move on to the next one:
2483 if (cur_mid_in_use != NULL) {
2484 om_unlock(cur_mid_in_use);
2485 }
2486 // The next cur_mid_in_use keeps mid's lock state so
2487 // that it is stable for a possible next field change. It
2488 // cannot be modified by om_release() while it is locked.
2489 cur_mid_in_use = mid;
2490 mid = next; // mid keeps non-NULL next's locked state
2491 next = next_next;
2492
2493 if (SafepointMechanism::should_block(self) &&
2494 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
2495 // If a safepoint has started and cur_mid_in_use is not the list
2496 // head and is old, then it is safe to use as saved state. Return
2497 // to the caller before blocking.
2498 *saved_mid_in_use_p = cur_mid_in_use;
2499 om_unlock(cur_mid_in_use);
2500 if (mid != NULL) {
2501 om_unlock(mid);
2502 }
2503 return deflated_count;
2504 }
2505 }
2506 if (mid == NULL) {
2507 if (cur_mid_in_use != NULL) {
2508 om_unlock(cur_mid_in_use);
2509 }
2510 break; // Reached end of the list so nothing more to deflate.
2511 }
2512
2513 // The current mid's next field is locked at this point. If we have
2514 // a cur_mid_in_use, then it is also locked at this point.
2515 }
2516 // We finished the list without a safepoint starting so there's
2517 // no need to save state.
2518 *saved_mid_in_use_p = NULL;
2519 return deflated_count;
2520 }
2521
2522 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2523 counters->n_in_use = 0; // currently associated with objects
2524 counters->n_in_circulation = 0; // extant
2525 counters->n_scavenged = 0; // reclaimed (global and per-thread)
2526 counters->per_thread_scavenged = 0; // per-thread scavenge total
2527 counters->per_thread_times = 0.0; // per-thread scavenge times
2528 OrderAccess::storestore(); // flush inits for worker threads
2529 }
2530
2531 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) {
2532 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2533
2534 if (AsyncDeflateIdleMonitors) {
2535 // Nothing to do when global idle ObjectMonitors are deflated using
2536 // a JavaThread unless a special deflation has been requested.
2537 if (!is_special_deflation_requested()) {
2538 return;
2539 }
2540 }
2541
2542 bool deflated = false;
2543
2544 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2545 ObjectMonitor* free_tail_p = NULL;
2546 elapsedTimer timer;
2547
2548 if (log_is_enabled(Info, monitorinflation)) {
2549 timer.start();
2550 }
2551
2552 // Note: the thread-local monitors lists get deflated in
2553 // a separate pass. See deflate_thread_local_monitors().
2554
2555 // For moribund threads, scan LVars.in_use_list
2556 int deflated_count = 0;
2557 if (Atomic::load(&LVars.in_use_list) != NULL) {
2558 // Update n_in_circulation before LVars.in_use_count is updated by deflation.
2559 Atomic::add(&counters->n_in_circulation, Atomic::load(&LVars.in_use_count));
2560
2561 deflated_count = deflate_monitor_list(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p);
2562 Atomic::add(&counters->n_in_use, Atomic::load(&LVars.in_use_count));
2563 }
2564
2565 if (free_head_p != NULL) {
2566 // Move the deflated ObjectMonitors back to the global free list.
2567 // No races on the working free list so no need for load_acquire().
2568 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2569 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2570 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2571 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
2572 Atomic::add(&counters->n_scavenged, deflated_count);
2573 }
2574 timer.stop();
2575
2576 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2577 LogStreamHandle(Info, monitorinflation) lsh_info;
2578 LogStream* ls = NULL;
2579 if (log_is_enabled(Debug, monitorinflation)) {
2580 ls = &lsh_debug;
2581 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2582 ls = &lsh_info;
2583 }
2584 if (ls != NULL) {
2585 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2586 }
2587 }
2588
2589 class HandshakeForDeflation : public HandshakeClosure {
2590 public:
2591 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {}
2592
2593 void do_thread(Thread* thread) {
2594 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread="
2595 INTPTR_FORMAT, p2i(thread));
2596 }
2597 };
2598
2599 void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
2600 assert(AsyncDeflateIdleMonitors, "sanity check");
2601
2602 // Deflate any global idle monitors.
2603 deflate_global_idle_monitors_using_JT();
2604
2605 int count = 0;
2606 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2607 if (jt->om_in_use_count > 0 && !jt->is_exiting()) {
2608 // This JavaThread is using ObjectMonitors so deflate any that
2609 // are idle unless this JavaThread is exiting; do not race with
2610 // ObjectSynchronizer::om_flush().
2611 deflate_per_thread_idle_monitors_using_JT(jt);
2612 count++;
2613 }
2614 }
2615 if (count > 0) {
2616 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count);
2617 }
2618
2619 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, "
2620 "global_free_count=%d, global_wait_count=%d",
2621 Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count),
2622 Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count));
2623
2624 // The ServiceThread's async deflation request has been processed.
2625 set_is_async_deflation_requested(false);
2626
2627 if (HandshakeAfterDeflateIdleMonitors && Atomic::load(&LVars.wait_count) > 0) {
2628 // There are deflated ObjectMonitors waiting for a handshake
2629 // (or a safepoint) for safety.
2630
2631 ObjectMonitor* list = Atomic::load(&LVars.wait_list);
2632 ADIM_guarantee(list != NULL, "LVars.wait_list must not be NULL");
2633 int count = Atomic::load(&LVars.wait_count);
2634 Atomic::store(&LVars.wait_count, 0);
2635 Atomic::store(&LVars.wait_list, (ObjectMonitor*)NULL);
2636
2637 // Find the tail for prepend_list_to_common(). No need to mark
2638 // ObjectMonitors for this list walk since only the deflater
2639 // thread manages the wait list.
2640 int l_count = 0;
2641 ObjectMonitor* tail = NULL;
2642 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) {
2643 tail = n;
2644 l_count++;
2645 }
2646 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count);
2647
2648 // Will execute a safepoint if !ThreadLocalHandshakes:
2649 HandshakeForDeflation hfd_hc;
2650 Handshake::execute(&hfd_hc);
2651
2652 prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count);
2653
2654 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count);
2655 }
2656 }
2657
2658 // Deflate global idle ObjectMonitors using a JavaThread.
2659 //
2660 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() {
2661 assert(AsyncDeflateIdleMonitors, "sanity check");
2662 assert(Thread::current()->is_Java_thread(), "precondition");
2663 JavaThread* self = JavaThread::current();
2664
2665 deflate_common_idle_monitors_using_JT(true /* is_global */, self);
2666 }
2667
2668 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread.
2669 //
2670 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) {
2671 assert(AsyncDeflateIdleMonitors, "sanity check");
2672 assert(Thread::current()->is_Java_thread(), "precondition");
2673
2674 deflate_common_idle_monitors_using_JT(false /* !is_global */, target);
2675 }
2676
2677 // Deflate global or per-thread idle ObjectMonitors using a JavaThread.
2678 //
2679 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) {
2680 JavaThread* self = JavaThread::current();
2681
2682 int deflated_count = 0;
2683 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors
2684 ObjectMonitor* free_tail_p = NULL;
2685 ObjectMonitor* saved_mid_in_use_p = NULL;
2686 elapsedTimer timer;
2687
2688 if (log_is_enabled(Info, monitorinflation)) {
2689 timer.start();
2690 }
2691
2692 if (is_global) {
2693 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&LVars.in_use_count)));
2694 } else {
2695 OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count));
2696 }
2697
2698 do {
2699 int local_deflated_count;
2700 if (is_global) {
2701 local_deflated_count = deflate_monitor_list_using_JT(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2702 } else {
2703 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p);
2704 }
2705 deflated_count += local_deflated_count;
2706
2707 if (free_head_p != NULL) {
2708 // Move the deflated ObjectMonitors to the global free list.
2709 // No races on the working list so no need for load_acquire().
2710 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count);
2711 // Note: The target thread can be doing an om_alloc() that
2712 // is trying to prepend an ObjectMonitor on its in-use list
2713 // at the same time that we have deflated the current in-use
2714 // list head and put it on the local free list. prepend_to_common()
2715 // will detect the race and retry which avoids list corruption,
2716 // but the next field in free_tail_p can flicker to marked
2717 // and then unmarked while prepend_to_common() is sorting it
2718 // all out.
2719 assert(unmarked_next(free_tail_p) == NULL, "must be NULL: _next_om="
2720 INTPTR_FORMAT, p2i(unmarked_next(free_tail_p)));
2721
2722 if (HandshakeAfterDeflateIdleMonitors) {
2723 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count);
2724 } else {
2725 prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count);
2726 }
2727
2728 OM_PERFDATA_OP(Deflations, inc(local_deflated_count));
2729 }
2730
2731 if (saved_mid_in_use_p != NULL) {
2732 // deflate_monitor_list_using_JT() detected a safepoint starting.
2733 timer.stop();
2734 {
2735 if (is_global) {
2736 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint.");
2737 } else {
2738 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target));
2739 }
2740 assert(SafepointMechanism::should_block(self), "sanity check");
2741 ThreadBlockInVM blocker(self);
2742 }
2743 // Prepare for another loop after the safepoint.
2744 free_head_p = NULL;
2745 free_tail_p = NULL;
2746 if (log_is_enabled(Info, monitorinflation)) {
2747 timer.start();
2748 }
2749 }
2750 } while (saved_mid_in_use_p != NULL);
2751 timer.stop();
2752
2753 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2754 LogStreamHandle(Info, monitorinflation) lsh_info;
2755 LogStream* ls = NULL;
2756 if (log_is_enabled(Debug, monitorinflation)) {
2757 ls = &lsh_debug;
2758 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2759 ls = &lsh_info;
2760 }
2761 if (ls != NULL) {
2762 if (is_global) {
2763 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count);
2764 } else {
2765 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count);
2766 }
2767 }
2768 }
2769
2770 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) {
2771 // Report the cumulative time for deflating each thread's idle
2772 // monitors. Note: if the work is split among more than one
2773 // worker thread, then the reported time will likely be more
2774 // than a beginning to end measurement of the phase.
2775 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged);
2776
2777 bool needs_special_deflation = is_special_deflation_requested();
2778 if (AsyncDeflateIdleMonitors && !needs_special_deflation) {
2779 // Nothing to do when idle ObjectMonitors are deflated using
2780 // a JavaThread unless a special deflation has been requested.
2781 return;
2782 }
2783
2784 if (log_is_enabled(Debug, monitorinflation)) {
2785 // exit_globals()'s call to audit_and_print_stats() is done
2786 // at the Info level and not at a safepoint.
2787 // For async deflation, audit_and_print_stats() is called in
2788 // ObjectSynchronizer::do_safepoint_work() at the Debug level
2789 // at a safepoint.
2790 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */);
2791 } else if (log_is_enabled(Info, monitorinflation)) {
2792 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, "
2793 "global_free_count=%d, global_wait_count=%d",
2794 Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count),
2795 Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count));
2796 }
2797
2798 Atomic::store(&_forceMonitorScavenge, 0); // Reset
2799
2800 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
2801 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));
2802
2803 GVars.stw_random = os::random();
2804 GVars.stw_cycle++;
2805
2806 if (needs_special_deflation) {
2807 set_is_special_deflation_requested(false); // special deflation is done
2808 }
2809 }
2810
2811 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
2812 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
2813
2814 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) {
2815 // Nothing to do if a special deflation has NOT been requested.
2816 return;
2817 }
2818
2819 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors
2820 ObjectMonitor* free_tail_p = NULL;
2821 elapsedTimer timer;
2822
2823 if (log_is_enabled(Info, safepoint, cleanup) ||
2824 log_is_enabled(Info, monitorinflation)) {
2825 timer.start();
2826 }
2827
2828 // Update n_in_circulation before om_in_use_count is updated by deflation.
2829 Atomic::add(&counters->n_in_circulation, thread->om_in_use_count);
2830
2831 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p);
2832 Atomic::add(&counters->n_in_use, thread->om_in_use_count);
2833
2834 if (free_head_p != NULL) {
2835 // Move the deflated ObjectMonitors back to the global free list.
2836 // No races on the working list so no need for load_acquire().
2837 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant");
2838 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om="
2839 INTPTR_FORMAT, p2i(free_tail_p->_next_om));
2840 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count);
2841 Atomic::add(&counters->n_scavenged, deflated_count);
2842 Atomic::add(&counters->per_thread_scavenged, deflated_count);
2843 }
2844
2845 timer.stop();
2846 // Safepoint logging cares about cumulative per_thread_times and
2847 // we'll capture most of the cost, but not the muxRelease() which
2848 // should be cheap.
2849 counters->per_thread_times += timer.seconds();
2850
2851 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2852 LogStreamHandle(Info, monitorinflation) lsh_info;
2853 LogStream* ls = NULL;
2854 if (log_is_enabled(Debug, monitorinflation)) {
2855 ls = &lsh_debug;
2856 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) {
2857 ls = &lsh_info;
2858 }
2859 if (ls != NULL) {
2860 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count);
2861 }
2862 }
2863
2864 // Monitor cleanup on JavaThread::exit
2865
2866 // Iterate through monitor cache and attempt to release thread's monitors
2867 // Gives up on a particular monitor if an exception occurs, but continues
2868 // the overall iteration, swallowing the exception.
2869 class ReleaseJavaMonitorsClosure: public MonitorClosure {
2870 private:
2881
2882 // Release all inflated monitors owned by THREAD. Lightweight monitors are
2883 // ignored. This is meant to be called during JNI thread detach which assumes
2884 // all remaining monitors are heavyweight. All exceptions are swallowed.
2885 // Scanning the extant monitor list can be time consuming.
2886 // A simple optimization is to add a per-thread flag that indicates a thread
2887 // called jni_monitorenter() during its lifetime.
2888 //
2889 // Instead of No_Savepoint_Verifier it might be cheaper to
2890 // use an idiom of the form:
2891 // auto int tmp = SafepointSynchronize::_safepoint_counter ;
2892 // <code that must not run at safepoint>
2893 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ;
2894 // Since the tests are extremely cheap we could leave them enabled
2895 // for normal product builds.
2896
2897 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) {
2898 assert(THREAD == JavaThread::current(), "must be current Java thread");
2899 NoSafepointVerifier nsv;
2900 ReleaseJavaMonitorsClosure rjmc(THREAD);
2901 ObjectSynchronizer::monitors_iterate(&rjmc);
2902 THREAD->clear_pending_exception();
2903 }
2904
2905 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) {
2906 switch (cause) {
2907 case inflate_cause_vm_internal: return "VM Internal";
2908 case inflate_cause_monitor_enter: return "Monitor Enter";
2909 case inflate_cause_wait: return "Monitor Wait";
2910 case inflate_cause_notify: return "Monitor Notify";
2911 case inflate_cause_hash_code: return "Monitor Hash Code";
2912 case inflate_cause_jni_enter: return "JNI Monitor Enter";
2913 case inflate_cause_jni_exit: return "JNI Monitor Exit";
2914 default:
2915 ShouldNotReachHere();
2916 }
2917 return "Unknown";
2918 }
2919
2920 //------------------------------------------------------------------------------
2921 // Debugging code
2935 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() {
2936 return (u_char*)&GVars.stw_random;
2937 }
2938
2939 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) {
2940 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant");
2941
2942 LogStreamHandle(Debug, monitorinflation) lsh_debug;
2943 LogStreamHandle(Info, monitorinflation) lsh_info;
2944 LogStreamHandle(Trace, monitorinflation) lsh_trace;
2945 LogStream* ls = NULL;
2946 if (log_is_enabled(Trace, monitorinflation)) {
2947 ls = &lsh_trace;
2948 } else if (log_is_enabled(Debug, monitorinflation)) {
2949 ls = &lsh_debug;
2950 } else if (log_is_enabled(Info, monitorinflation)) {
2951 ls = &lsh_info;
2952 }
2953 assert(ls != NULL, "sanity check");
2954
2955 // Log counts for the global and per-thread monitor lists:
2956 int chk_om_population = log_monitor_list_counts(ls);
2957 int error_cnt = 0;
2958
2959 ls->print_cr("Checking global lists:");
2960
2961 // Check LVars.population:
2962 if (Atomic::load(&LVars.population) == chk_om_population) {
2963 ls->print_cr("global_population=%d equals chk_om_population=%d",
2964 Atomic::load(&LVars.population), chk_om_population);
2965 } else {
2966 // With lock free access to the monitor lists, it is possible for
2967 // log_monitor_list_counts() to return a value that doesn't match
2968 // LVars.population. So far a higher value has been seen in testing
2969 // so something is being double counted by log_monitor_list_counts().
2970 ls->print_cr("WARNING: global_population=%d is not equal to "
2971 "chk_om_population=%d", Atomic::load(&LVars.population), chk_om_population);
2972 }
2973
2974 // Check LVars.in_use_list and LVars.in_use_count:
2975 chk_global_in_use_list_and_count(ls, &error_cnt);
2976
2977 // Check LVars.free_list and LVars.free_count:
2978 chk_global_free_list_and_count(ls, &error_cnt);
2979
2980 if (HandshakeAfterDeflateIdleMonitors) {
2981 // Check LVars.wait_list and LVars.wait_count:
2982 chk_global_wait_list_and_count(ls, &error_cnt);
2983 }
2984
2985 ls->print_cr("Checking per-thread lists:");
2986
2987 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
2988 // Check om_in_use_list and om_in_use_count:
2989 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt);
2990
2991 // Check om_free_list and om_free_count:
2992 chk_per_thread_free_list_and_count(jt, ls, &error_cnt);
2993 }
2994
2995 if (error_cnt == 0) {
2996 ls->print_cr("No errors found in monitor list checks.");
2997 } else {
2998 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt);
2999 }
3000
3001 if ((on_exit && log_is_enabled(Info, monitorinflation)) ||
3002 (!on_exit && log_is_enabled(Trace, monitorinflation))) {
3003 // When exiting this log output is at the Info level. When called
3004 // at a safepoint, this log output is at the Trace level since
3005 // there can be a lot of it.
3006 log_in_use_monitor_details(ls);
3007 }
3008
3009 ls->flush();
3010
3011 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt);
3012 }
3013
3014 // Check a free monitor entry; log any errors.
3015 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n,
3016 outputStream * out, int *error_cnt_p) {
3017 stringStream ss;
3018 if (n->is_busy()) {
3019 if (jt != NULL) {
3020 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3021 ": free per-thread monitor must not be busy: %s", p2i(jt),
3022 p2i(n), n->is_busy_to_string(&ss));
3023 } else {
3024 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3025 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss));
3026 }
3027 *error_cnt_p = *error_cnt_p + 1;
3028 }
3029 if (n->header().value() != 0) {
3030 if (jt != NULL) {
3031 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3032 ": free per-thread monitor must have NULL _header "
3033 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n),
3034 n->header().value());
3035 *error_cnt_p = *error_cnt_p + 1;
3036 } else if (!AsyncDeflateIdleMonitors) {
3037 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3038 "must have NULL _header field: _header=" INTPTR_FORMAT,
3039 p2i(n), n->header().value());
3040 *error_cnt_p = *error_cnt_p + 1;
3041 }
3042 }
3043 if (n->object() != NULL) {
3044 if (jt != NULL) {
3045 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3046 ": free per-thread monitor must have NULL _object "
3047 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n),
3048 p2i(n->object()));
3049 } else {
3050 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor "
3051 "must have NULL _object field: _object=" INTPTR_FORMAT,
3052 p2i(n), p2i(n->object()));
3053 }
3054 *error_cnt_p = *error_cnt_p + 1;
3055 }
3056 }
3057
3058 // Lock the next ObjectMonitor for traversal. The current ObjectMonitor
3059 // is unlocked after the next ObjectMonitor is locked. *cur_p and *next_p
3060 // are updated to their next values in the list traversal. *cur_p is set
3061 // to NULL when the end of the list is reached.
3062 static void lock_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) {
3063 ObjectMonitor* prev = *cur_p; // Save current for unlocking.
3064 if (*next_p == NULL) { // Reached the end of the list.
3065 om_unlock(prev); // Unlock previous.
3066 *cur_p = NULL; // Tell the caller we are done.
3067 return;
3068 }
3069 om_lock(*next_p); // Lock next.
3070 om_unlock(prev); // Unlock previous.
3071 *cur_p = *next_p; // Update current.
3072 *next_p = unmarked_next(*cur_p); // Update next.
3073 }
3074
3075 // Check the global free list and count; log the results of the checks.
3076 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out,
3077 int *error_cnt_p) {
3078 int chk_om_free_count = 0;
3079 ObjectMonitor* cur = NULL;
3080 ObjectMonitor* next = NULL;
3081 if ((cur = get_list_head_locked(&LVars.free_list)) != NULL) {
3082 next = unmarked_next(cur);
3083 // Marked the global free list head so process the list.
3084 while (true) {
3085 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
3086 chk_om_free_count++;
3087
3088 lock_next_for_traversal(&cur, &next);
3089 if (cur == NULL) {
3090 break;
3091 }
3092 }
3093 }
3094 if (Atomic::load(&LVars.free_count) == chk_om_free_count) {
3095 out->print_cr("global_free_count=%d equals chk_om_free_count=%d",
3096 Atomic::load(&LVars.free_count), chk_om_free_count);
3097 } else {
3098 // With lock free access to LVars.free_list, it is possible for an
3099 // ObjectMonitor to be prepended to LVars.free_list after we started
3100 // calculating chk_om_free_count so LVars.free_count may not
3101 // match anymore.
3102 out->print_cr("WARNING: global_free_count=%d is not equal to "
3103 "chk_om_free_count=%d", Atomic::load(&LVars.free_count), chk_om_free_count);
3104 }
3105 }
3106
3107 // Check the global wait list and count; log the results of the checks.
3108 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out,
3109 int *error_cnt_p) {
3110 int chk_om_wait_count = 0;
3111 ObjectMonitor* cur = NULL;
3112 ObjectMonitor* next = NULL;
3113 if ((cur = get_list_head_locked(&LVars.wait_list)) != NULL) {
3114 next = unmarked_next(cur);
3115 // Marked the global wait list head so process the list.
3116 while (true) {
3117 // Rules for LVars.wait_list are the same as of LVars.free_list:
3118 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p);
3119 chk_om_wait_count++;
3120
3121 lock_next_for_traversal(&cur, &next);
3122 if (cur == NULL) {
3123 break;
3124 }
3125 }
3126 }
3127 if (Atomic::load(&LVars.wait_count) == chk_om_wait_count) {
3128 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d",
3129 Atomic::load(&LVars.wait_count), chk_om_wait_count);
3130 } else {
3131 out->print_cr("ERROR: global_wait_count=%d is not equal to "
3132 "chk_om_wait_count=%d", Atomic::load(&LVars.wait_count), chk_om_wait_count);
3133 *error_cnt_p = *error_cnt_p + 1;
3134 }
3135 }
3136
3137 // Check the global in-use list and count; log the results of the checks.
3138 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out,
3139 int *error_cnt_p) {
3140 int chk_om_in_use_count = 0;
3141 ObjectMonitor* cur = NULL;
3142 ObjectMonitor* next = NULL;
3143 if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
3144 next = unmarked_next(cur);
3145 // Marked the global in-use list head so process the list.
3146 while (true) {
3147 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p);
3148 chk_om_in_use_count++;
3149
3150 lock_next_for_traversal(&cur, &next);
3151 if (cur == NULL) {
3152 break;
3153 }
3154 }
3155 }
3156 if (Atomic::load(&LVars.in_use_count) == chk_om_in_use_count) {
3157 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d",
3158 Atomic::load(&LVars.in_use_count), chk_om_in_use_count);
3159 } else {
3160 // With lock free access to the monitor lists, it is possible for
3161 // an exiting JavaThread to put its in-use ObjectMonitors on the
3162 // global in-use list after chk_om_in_use_count is calculated above.
3163 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d",
3164 Atomic::load(&LVars.in_use_count), chk_om_in_use_count);
3165 }
3166 }
3167
3168 // Check an in-use monitor entry; log any errors.
3169 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n,
3170 outputStream * out, int *error_cnt_p) {
3171 if (n->header().value() == 0) {
3172 if (jt != NULL) {
3173 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3174 ": in-use per-thread monitor must have non-NULL _header "
3175 "field.", p2i(jt), p2i(n));
3176 } else {
3177 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor "
3178 "must have non-NULL _header field.", p2i(n));
3179 }
3180 *error_cnt_p = *error_cnt_p + 1;
3181 }
3182 if (n->object() == NULL) {
3183 if (jt != NULL) {
3184 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3212 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT
3213 ": in-use per-thread monitor's object does not refer "
3214 "to the same monitor: obj=" INTPTR_FORMAT ", mark="
3215 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt),
3216 p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3217 } else {
3218 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global "
3219 "monitor's object does not refer to the same monitor: obj="
3220 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon="
3221 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon));
3222 }
3223 *error_cnt_p = *error_cnt_p + 1;
3224 }
3225 }
3226
3227 // Check the thread's free list and count; log the results of the checks.
3228 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt,
3229 outputStream * out,
3230 int *error_cnt_p) {
3231 int chk_om_free_count = 0;
3232 ObjectMonitor* cur = NULL;
3233 ObjectMonitor* next = NULL;
3234 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) {
3235 next = unmarked_next(cur);
3236 // Marked the per-thread free list head so process the list.
3237 while (true) {
3238 chk_free_entry(jt, cur, out, error_cnt_p);
3239 chk_om_free_count++;
3240
3241 lock_next_for_traversal(&cur, &next);
3242 if (cur == NULL) {
3243 break;
3244 }
3245 }
3246 }
3247 if (jt->om_free_count == chk_om_free_count) {
3248 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals "
3249 "chk_om_free_count=%d", p2i(jt), jt->om_free_count,
3250 chk_om_free_count);
3251 } else {
3252 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not "
3253 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count,
3254 chk_om_free_count);
3255 *error_cnt_p = *error_cnt_p + 1;
3256 }
3257 }
3258
3259 // Check the thread's in-use list and count; log the results of the checks.
3260 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt,
3261 outputStream * out,
3262 int *error_cnt_p) {
3263 int chk_om_in_use_count = 0;
3264 ObjectMonitor* cur = NULL;
3265 ObjectMonitor* next = NULL;
3266 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
3267 next = unmarked_next(cur);
3268 // Marked the per-thread in-use list head so process the list.
3269 while (true) {
3270 chk_in_use_entry(jt, cur, out, error_cnt_p);
3271 chk_om_in_use_count++;
3272
3273 lock_next_for_traversal(&cur, &next);
3274 if (cur == NULL) {
3275 break;
3276 }
3277 }
3278 }
3279 if (jt->om_in_use_count == chk_om_in_use_count) {
3280 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals "
3281 "chk_om_in_use_count=%d", p2i(jt),
3282 jt->om_in_use_count, chk_om_in_use_count);
3283 } else {
3284 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not "
3285 "equal to chk_om_in_use_count=%d", p2i(jt),
3286 jt->om_in_use_count, chk_om_in_use_count);
3287 *error_cnt_p = *error_cnt_p + 1;
3288 }
3289 }
3290
3291 // Log details about ObjectMonitors on the in-use lists. The 'BHL'
3292 // flags indicate why the entry is in-use, 'object' and 'object type'
3293 // indicate the associated object and its type.
3294 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) {
3295 stringStream ss;
3296 if (Atomic::load(&LVars.in_use_count) > 0) {
3297 out->print_cr("In-use global monitor info:");
3298 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3299 out->print_cr("%18s %s %7s %18s %18s",
3300 "monitor", "BHL", "ref_cnt", "object", "object type");
3301 out->print_cr("================== === ======= ================== ==================");
3302 ObjectMonitor* cur = NULL;
3303 ObjectMonitor* next = NULL;
3304 if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) {
3305 next = unmarked_next(cur);
3306 // Marked the global in-use list head so process the list.
3307 while (true) {
3308 const oop obj = (oop) cur->object();
3309 const markWord mark = cur->header();
3310 ResourceMark rm;
3311 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s",
3312 p2i(cur), cur->is_busy() != 0, mark.hash() != 0,
3313 cur->owner() != NULL, (int)cur->ref_count(), p2i(obj),
3314 obj->klass()->external_name());
3315 if (cur->is_busy() != 0) {
3316 out->print(" (%s)", cur->is_busy_to_string(&ss));
3317 ss.reset();
3318 }
3319 out->cr();
3320
3321 lock_next_for_traversal(&cur, &next);
3322 if (cur == NULL) {
3323 break;
3324 }
3325 }
3326 }
3327 }
3328
3329 out->print_cr("In-use per-thread monitor info:");
3330 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)");
3331 out->print_cr("%18s %18s %s %7s %18s %18s",
3332 "jt", "monitor", "BHL", "ref_cnt", "object", "object type");
3333 out->print_cr("================== ================== === ======= ================== ==================");
3334 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3335 ObjectMonitor* cur = NULL;
3336 ObjectMonitor* next = NULL;
3337 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) {
3338 next = unmarked_next(cur);
3339 // Marked the global in-use list head so process the list.
3340 while (true) {
3341 const oop obj = (oop) cur->object();
3342 const markWord mark = cur->header();
3343 ResourceMark rm;
3344 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d "
3345 INTPTR_FORMAT " %s", p2i(jt), p2i(cur), cur->is_busy() != 0,
3346 mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(),
3347 p2i(obj), obj->klass()->external_name());
3348 if (cur->is_busy() != 0) {
3349 out->print(" (%s)", cur->is_busy_to_string(&ss));
3350 ss.reset();
3351 }
3352 out->cr();
3353
3354 lock_next_for_traversal(&cur, &next);
3355 if (cur == NULL) {
3356 break;
3357 }
3358 }
3359 }
3360 }
3361
3362 out->flush();
3363 }
3364
3365 // Log counts for the global and per-thread monitor lists and return
3366 // the population count.
3367 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) {
3368 int pop_count = 0;
3369 out->print_cr("%18s %10s %10s %10s %10s",
3370 "Global Lists:", "InUse", "Free", "Wait", "Total");
3371 out->print_cr("================== ========== ========== ========== ==========");
3372 out->print_cr("%18s %10d %10d %10d %10d", "", Atomic::load(&LVars.in_use_count),
3373 Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count), Atomic::load(&LVars.population));
3374 pop_count += Atomic::load(&LVars.in_use_count) + Atomic::load(&LVars.free_count);
3375 if (HandshakeAfterDeflateIdleMonitors) {
3376 pop_count += Atomic::load(&LVars.wait_count);
3377 }
3378
3379 out->print_cr("%18s %10s %10s %10s",
3380 "Per-Thread Lists:", "InUse", "Free", "Provision");
3381 out->print_cr("================== ========== ========== ==========");
3382
3383 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) {
3384 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt),
3385 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision);
3386 pop_count += jt->om_in_use_count + jt->om_free_count;
3387 }
3388 return pop_count;
3389 }
3390
3391 #ifndef PRODUCT
3392
3393 // Check if monitor belongs to the monitor cache
3394 // The list is grow-only so it's *relatively* safe to traverse
3395 // the list of extant blocks without taking a lock.
3396
3397 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
3398 PaddedObjectMonitor* block = Atomic::load(&g_block_list);
3399 while (block != NULL) {
3400 assert(block->object() == CHAINMARKER, "must be a block header");
3401 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) {
3402 address mon = (address)monitor;
3403 address blk = (address)block;
3404 size_t diff = mon - blk;
3405 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned");
3406 return 1;
3407 }
3408 // unmarked_next() is not needed with g_block_list (no locking
3409 // used with with block linkage _next_om fields).
3410 block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om);
3411 }
3412 return 0;
3413 }
3414
3415 #endif
|