1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/markOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/interfaceSupport.inline.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "runtime/objectMonitor.hpp" 42 #include "runtime/objectMonitor.inline.hpp" 43 #include "runtime/osThread.hpp" 44 #include "runtime/safepointVerifiers.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/synchronizer.hpp" 48 #include "runtime/thread.inline.hpp" 49 #include "runtime/timer.hpp" 50 #include "runtime/vframe.hpp" 51 #include "runtime/vmThread.hpp" 52 #include "utilities/align.hpp" 53 #include "utilities/dtrace.hpp" 54 #include "utilities/events.hpp" 55 #include "utilities/preserveException.hpp" 56 57 // The "core" versions of monitor enter and exit reside in this file. 58 // The interpreter and compilers contain specialized transliterated 59 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 60 // for instance. If you make changes here, make sure to modify the 61 // interpreter, and both C1 and C2 fast-path inline locking code emission. 62 // 63 // ----------------------------------------------------------------------------- 64 65 #ifdef DTRACE_ENABLED 66 67 // Only bother with this argument setup if dtrace is available 68 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 69 70 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 71 char* bytes = NULL; \ 72 int len = 0; \ 73 jlong jtid = SharedRuntime::get_java_tid(thread); \ 74 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 75 if (klassname != NULL) { \ 76 bytes = (char*)klassname->bytes(); \ 77 len = klassname->utf8_length(); \ 78 } 79 80 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 81 { \ 82 if (DTraceMonitorProbes) { \ 83 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 84 HOTSPOT_MONITOR_WAIT(jtid, \ 85 (uintptr_t)(monitor), bytes, len, (millis)); \ 86 } \ 87 } 88 89 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 90 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 91 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 92 93 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 94 { \ 95 if (DTraceMonitorProbes) { \ 96 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 97 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 98 (uintptr_t)(monitor), bytes, len); \ 99 } \ 100 } 101 102 #else // ndef DTRACE_ENABLED 103 104 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 105 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 106 107 #endif // ndef DTRACE_ENABLED 108 109 // This exists only as a workaround of dtrace bug 6254741 110 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 111 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 112 return 0; 113 } 114 115 #define NINFLATIONLOCKS 256 116 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 117 118 // global list of blocks of monitors 119 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL; 120 // global monitor free list 121 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 122 // global monitor in-use list, for moribund threads, 123 // monitors they inflated need to be scanned for deflation 124 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 125 // count of entries in gOmInUseList 126 int ObjectSynchronizer::gOmInUseCount = 0; 127 bool ObjectSynchronizer::_gOmShouldDeflateIdleMonitors = false; 128 bool volatile ObjectSynchronizer::_is_cleanup_requested = false; 129 130 static volatile intptr_t gListLock = 0; // protects global monitor lists 131 static volatile int gMonitorFreeCount = 0; // # on gFreeList 132 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 133 134 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 135 136 137 // =====================> Quick functions 138 139 // The quick_* forms are special fast-path variants used to improve 140 // performance. In the simplest case, a "quick_*" implementation could 141 // simply return false, in which case the caller will perform the necessary 142 // state transitions and call the slow-path form. 143 // The fast-path is designed to handle frequently arising cases in an efficient 144 // manner and is just a degenerate "optimistic" variant of the slow-path. 145 // returns true -- to indicate the call was satisfied. 146 // returns false -- to indicate the call needs the services of the slow-path. 147 // A no-loitering ordinance is in effect for code in the quick_* family 148 // operators: safepoints or indefinite blocking (blocking that might span a 149 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 150 // entry. 151 // 152 // Consider: An interesting optimization is to have the JIT recognize the 153 // following common idiom: 154 // synchronized (someobj) { .... ; notify(); } 155 // That is, we find a notify() or notifyAll() call that immediately precedes 156 // the monitorexit operation. In that case the JIT could fuse the operations 157 // into a single notifyAndExit() runtime primitive. 158 159 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 161 assert(self->is_Java_thread(), "invariant"); 162 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 163 NoSafepointVerifier nsv; 164 if (obj == NULL) return false; // slow-path for invalid obj 165 const markOop mark = obj->mark(); 166 167 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 168 // Degenerate notify 169 // stack-locked by caller so by definition the implied waitset is empty. 170 return true; 171 } 172 173 if (mark->has_monitor()) { 174 ObjectMonitor * const mon = mark->monitor(); 175 assert(oopDesc::equals((oop) mon->object(), obj), "invariant"); 176 if (mon->owner() != self) return false; // slow-path for IMS exception 177 178 if (mon->first_waiter() != NULL) { 179 // We have one or more waiters. Since this is an inflated monitor 180 // that we own, we can transfer one or more threads from the waitset 181 // to the entrylist here and now, avoiding the slow-path. 182 if (all) { 183 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 184 } else { 185 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 186 } 187 int tally = 0; 188 do { 189 mon->INotify(self); 190 ++tally; 191 } while (mon->first_waiter() != NULL && all); 192 OM_PERFDATA_OP(Notifications, inc(tally)); 193 } 194 return true; 195 } 196 197 // biased locking and any other IMS exception states take the slow-path 198 return false; 199 } 200 201 202 // The LockNode emitted directly at the synchronization site would have 203 // been too big if it were to have included support for the cases of inflated 204 // recursive enter and exit, so they go here instead. 205 // Note that we can't safely call AsyncPrintJavaStack() from within 206 // quick_enter() as our thread state remains _in_Java. 207 208 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 209 BasicLock * lock) { 210 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 211 assert(Self->is_Java_thread(), "invariant"); 212 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 213 NoSafepointVerifier nsv; 214 if (obj == NULL) return false; // Need to throw NPE 215 216 while (true) { 217 const markOop mark = obj->mark(); 218 219 if (mark->has_monitor()) { 220 ObjectMonitorHandle omh; 221 if (!omh.save_om_ptr(obj, mark)) { 222 // Lost a race with async deflation so try again. 223 assert(AsyncDeflateIdleMonitors, "sanity check"); 224 continue; 225 } 226 ObjectMonitor * const m = omh.om_ptr(); 227 assert(oopDesc::equals((oop) m->object(), obj), "invariant"); 228 Thread * const owner = (Thread *) m->_owner; 229 230 // Lock contention and Transactional Lock Elision (TLE) diagnostics 231 // and observability 232 // Case: light contention possibly amenable to TLE 233 // Case: TLE inimical operations such as nested/recursive synchronization 234 235 if (owner == Self) { 236 m->_recursions++; 237 return true; 238 } 239 240 // This Java Monitor is inflated so obj's header will never be 241 // displaced to this thread's BasicLock. Make the displaced header 242 // non-NULL so this BasicLock is not seen as recursive nor as 243 // being locked. We do this unconditionally so that this thread's 244 // BasicLock cannot be mis-interpreted by any stack walkers. For 245 // performance reasons, stack walkers generally first check for 246 // Biased Locking in the object's header, the second check is for 247 // stack-locking in the object's header, the third check is for 248 // recursive stack-locking in the displaced header in the BasicLock, 249 // and last are the inflated Java Monitor (ObjectMonitor) checks. 250 lock->set_displaced_header(markOopDesc::unused_mark()); 251 252 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { 253 assert(m->_recursions == 0, "invariant"); 254 assert(m->_owner == Self, "invariant"); 255 return true; 256 } 257 } 258 break; 259 } 260 261 // Note that we could inflate in quick_enter. 262 // This is likely a useful optimization 263 // Critically, in quick_enter() we must not: 264 // -- perform bias revocation, or 265 // -- block indefinitely, or 266 // -- reach a safepoint 267 268 return false; // revert to slow-path 269 } 270 271 // ----------------------------------------------------------------------------- 272 // Fast Monitor Enter/Exit 273 // This the fast monitor enter. The interpreter and compiler use 274 // some assembly copies of this code. Make sure update those code 275 // if the following function is changed. The implementation is 276 // extremely sensitive to race condition. Be careful. 277 278 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 279 bool attempt_rebias, TRAPS) { 280 if (UseBiasedLocking) { 281 if (!SafepointSynchronize::is_at_safepoint()) { 282 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 283 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 284 return; 285 } 286 } else { 287 assert(!attempt_rebias, "can not rebias toward VM thread"); 288 BiasedLocking::revoke_at_safepoint(obj); 289 } 290 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 291 } 292 293 slow_enter(obj, lock, THREAD); 294 } 295 296 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 297 markOop mark = object->mark(); 298 // We cannot check for Biased Locking if we are racing an inflation. 299 assert(mark == markOopDesc::INFLATING() || 300 !mark->has_bias_pattern(), "should not see bias pattern here"); 301 302 markOop dhw = lock->displaced_header(); 303 if (dhw == NULL) { 304 // If the displaced header is NULL, then this exit matches up with 305 // a recursive enter. No real work to do here except for diagnostics. 306 #ifndef PRODUCT 307 if (mark != markOopDesc::INFLATING()) { 308 // Only do diagnostics if we are not racing an inflation. Simply 309 // exiting a recursive enter of a Java Monitor that is being 310 // inflated is safe; see the has_monitor() comment below. 311 assert(!mark->is_neutral(), "invariant"); 312 assert(!mark->has_locker() || 313 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 314 if (mark->has_monitor()) { 315 // The BasicLock's displaced_header is marked as a recursive 316 // enter and we have an inflated Java Monitor (ObjectMonitor). 317 // This is a special case where the Java Monitor was inflated 318 // after this thread entered the stack-lock recursively. When a 319 // Java Monitor is inflated, we cannot safely walk the Java 320 // Monitor owner's stack and update the BasicLocks because a 321 // Java Monitor can be asynchronously inflated by a thread that 322 // does not own the Java Monitor. 323 ObjectMonitor * m = mark->monitor(); 324 assert(((oop)(m->object()))->mark() == mark, "invariant"); 325 assert(m->is_entered(THREAD), "invariant"); 326 } 327 } 328 #endif 329 return; 330 } 331 332 if (mark == (markOop) lock) { 333 // If the object is stack-locked by the current thread, try to 334 // swing the displaced header from the BasicLock back to the mark. 335 assert(dhw->is_neutral(), "invariant"); 336 if (object->cas_set_mark(dhw, mark) == mark) { 337 return; 338 } 339 } 340 341 // We have to take the slow-path of possible inflation and then exit. 342 ObjectMonitorHandle omh; 343 inflate(&omh, THREAD, object, inflate_cause_vm_internal); 344 omh.om_ptr()->exit(true, THREAD); 345 } 346 347 // ----------------------------------------------------------------------------- 348 // Interpreter/Compiler Slow Case 349 // This routine is used to handle interpreter/compiler slow case 350 // We don't need to use fast path here, because it must have been 351 // failed in the interpreter/compiler code. 352 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 353 bool do_loop = true; 354 while (do_loop) { 355 markOop mark = obj->mark(); 356 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 357 358 if (mark->is_neutral()) { 359 // Anticipate successful CAS -- the ST of the displaced mark must 360 // be visible <= the ST performed by the CAS. 361 lock->set_displaced_header(mark); 362 if (mark == obj()->cas_set_mark((markOop) lock, mark)) { 363 return; 364 } 365 // Fall through to inflate() ... 366 } else if (mark->has_locker() && 367 THREAD->is_lock_owned((address)mark->locker())) { 368 assert(lock != mark->locker(), "must not re-lock the same lock"); 369 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 370 lock->set_displaced_header(NULL); 371 return; 372 } 373 374 // The object header will never be displaced to this lock, 375 // so it does not matter what the value is, except that it 376 // must be non-zero to avoid looking like a re-entrant lock, 377 // and must not look locked either. 378 lock->set_displaced_header(markOopDesc::unused_mark()); 379 ObjectMonitorHandle omh; 380 inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); 381 do_loop = !omh.om_ptr()->enter(THREAD); 382 } 383 } 384 385 // This routine is used to handle interpreter/compiler slow case 386 // We don't need to use fast path here, because it must have 387 // failed in the interpreter/compiler code. Simply use the heavy 388 // weight monitor should be ok, unless someone find otherwise. 389 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 390 fast_exit(object, lock, THREAD); 391 } 392 393 // ----------------------------------------------------------------------------- 394 // Class Loader support to workaround deadlocks on the class loader lock objects 395 // Also used by GC 396 // complete_exit()/reenter() are used to wait on a nested lock 397 // i.e. to give up an outer lock completely and then re-enter 398 // Used when holding nested locks - lock acquisition order: lock1 then lock2 399 // 1) complete_exit lock1 - saving recursion count 400 // 2) wait on lock2 401 // 3) when notified on lock2, unlock lock2 402 // 4) reenter lock1 with original recursion count 403 // 5) lock lock2 404 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 405 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 406 if (UseBiasedLocking) { 407 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 408 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 409 } 410 411 ObjectMonitorHandle omh; 412 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 413 intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD); 414 return ret_code; 415 } 416 417 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 418 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 419 if (UseBiasedLocking) { 420 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 421 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 422 } 423 424 bool do_loop = true; 425 while (do_loop) { 426 ObjectMonitorHandle omh; 427 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 428 do_loop = !omh.om_ptr()->reenter(recursion, THREAD); 429 } 430 } 431 // ----------------------------------------------------------------------------- 432 // JNI locks on java objects 433 // NOTE: must use heavy weight monitor to handle jni monitor enter 434 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 435 // the current locking is from JNI instead of Java code 436 if (UseBiasedLocking) { 437 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 438 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 439 } 440 THREAD->set_current_pending_monitor_is_from_java(false); 441 bool do_loop = true; 442 while (do_loop) { 443 ObjectMonitorHandle omh; 444 inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); 445 do_loop = !omh.om_ptr()->enter(THREAD); 446 } 447 THREAD->set_current_pending_monitor_is_from_java(true); 448 } 449 450 // NOTE: must use heavy weight monitor to handle jni monitor exit 451 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 452 if (UseBiasedLocking) { 453 Handle h_obj(THREAD, obj); 454 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 455 obj = h_obj(); 456 } 457 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 458 459 ObjectMonitorHandle omh; 460 inflate(&omh, THREAD, obj, inflate_cause_jni_exit); 461 ObjectMonitor * monitor = omh.om_ptr(); 462 // If this thread has locked the object, exit the monitor. Note: can't use 463 // monitor->check(CHECK); must exit even if an exception is pending. 464 if (monitor->check(THREAD)) { 465 monitor->exit(true, THREAD); 466 } 467 } 468 469 // ----------------------------------------------------------------------------- 470 // Internal VM locks on java objects 471 // standard constructor, allows locking failures 472 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 473 _dolock = doLock; 474 _thread = thread; 475 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 476 _obj = obj; 477 478 if (_dolock) { 479 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 480 } 481 } 482 483 ObjectLocker::~ObjectLocker() { 484 if (_dolock) { 485 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 486 } 487 } 488 489 490 // ----------------------------------------------------------------------------- 491 // Wait/Notify/NotifyAll 492 // NOTE: must use heavy weight monitor to handle wait() 493 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 494 if (UseBiasedLocking) { 495 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 496 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 497 } 498 if (millis < 0) { 499 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 500 } 501 ObjectMonitorHandle omh; 502 inflate(&omh, THREAD, obj(), inflate_cause_wait); 503 ObjectMonitor * monitor = omh.om_ptr(); 504 505 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 506 monitor->wait(millis, true, THREAD); 507 508 // This dummy call is in place to get around dtrace bug 6254741. Once 509 // that's fixed we can uncomment the following line, remove the call 510 // and change this function back into a "void" func. 511 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 512 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 513 return ret_code; 514 } 515 516 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 517 if (UseBiasedLocking) { 518 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 519 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 520 } 521 if (millis < 0) { 522 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 523 } 524 ObjectMonitorHandle omh; 525 inflate(&omh, THREAD, obj(), inflate_cause_wait); 526 omh.om_ptr()->wait(millis, false, THREAD); 527 } 528 529 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 530 if (UseBiasedLocking) { 531 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 532 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 533 } 534 535 markOop mark = obj->mark(); 536 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 537 return; 538 } 539 ObjectMonitorHandle omh; 540 inflate(&omh, THREAD, obj(), inflate_cause_notify); 541 omh.om_ptr()->notify(THREAD); 542 } 543 544 // NOTE: see comment of notify() 545 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 546 if (UseBiasedLocking) { 547 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 548 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 549 } 550 551 markOop mark = obj->mark(); 552 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 553 return; 554 } 555 ObjectMonitorHandle omh; 556 inflate(&omh, THREAD, obj(), inflate_cause_notify); 557 omh.om_ptr()->notifyAll(THREAD); 558 } 559 560 // ----------------------------------------------------------------------------- 561 // Hash Code handling 562 // 563 // Performance concern: 564 // OrderAccess::storestore() calls release() which at one time stored 0 565 // into the global volatile OrderAccess::dummy variable. This store was 566 // unnecessary for correctness. Many threads storing into a common location 567 // causes considerable cache migration or "sloshing" on large SMP systems. 568 // As such, I avoided using OrderAccess::storestore(). In some cases 569 // OrderAccess::fence() -- which incurs local latency on the executing 570 // processor -- is a better choice as it scales on SMP systems. 571 // 572 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 573 // a discussion of coherency costs. Note that all our current reference 574 // platforms provide strong ST-ST order, so the issue is moot on IA32, 575 // x64, and SPARC. 576 // 577 // As a general policy we use "volatile" to control compiler-based reordering 578 // and explicit fences (barriers) to control for architectural reordering 579 // performed by the CPU(s) or platform. 580 581 struct SharedGlobals { 582 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 583 // These are highly shared mostly-read variables. 584 // To avoid false-sharing they need to be the sole occupants of a cache line. 585 volatile int stwRandom; 586 volatile int stwCycle; 587 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 588 // Hot RW variable -- Sequester to avoid false-sharing 589 volatile int hcSequence; 590 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 591 }; 592 593 static SharedGlobals GVars; 594 static int MonitorScavengeThreshold = 1000000; 595 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 596 597 static markOop ReadStableMark(oop obj) { 598 markOop mark = obj->mark(); 599 if (!mark->is_being_inflated()) { 600 return mark; // normal fast-path return 601 } 602 603 int its = 0; 604 for (;;) { 605 markOop mark = obj->mark(); 606 if (!mark->is_being_inflated()) { 607 return mark; // normal fast-path return 608 } 609 610 // The object is being inflated by some other thread. 611 // The caller of ReadStableMark() must wait for inflation to complete. 612 // Avoid live-lock 613 // TODO: consider calling SafepointSynchronize::do_call_back() while 614 // spinning to see if there's a safepoint pending. If so, immediately 615 // yielding or blocking would be appropriate. Avoid spinning while 616 // there is a safepoint pending. 617 // TODO: add inflation contention performance counters. 618 // TODO: restrict the aggregate number of spinners. 619 620 ++its; 621 if (its > 10000 || !os::is_MP()) { 622 if (its & 1) { 623 os::naked_yield(); 624 } else { 625 // Note that the following code attenuates the livelock problem but is not 626 // a complete remedy. A more complete solution would require that the inflating 627 // thread hold the associated inflation lock. The following code simply restricts 628 // the number of spinners to at most one. We'll have N-2 threads blocked 629 // on the inflationlock, 1 thread holding the inflation lock and using 630 // a yield/park strategy, and 1 thread in the midst of inflation. 631 // A more refined approach would be to change the encoding of INFLATING 632 // to allow encapsulation of a native thread pointer. Threads waiting for 633 // inflation to complete would use CAS to push themselves onto a singly linked 634 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 635 // and calling park(). When inflation was complete the thread that accomplished inflation 636 // would detach the list and set the markword to inflated with a single CAS and 637 // then for each thread on the list, set the flag and unpark() the thread. 638 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 639 // wakes at most one thread whereas we need to wake the entire list. 640 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 641 int YieldThenBlock = 0; 642 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 643 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 644 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 645 while (obj->mark() == markOopDesc::INFLATING()) { 646 // Beware: NakedYield() is advisory and has almost no effect on some platforms 647 // so we periodically call Self->_ParkEvent->park(1). 648 // We use a mixed spin/yield/block mechanism. 649 if ((YieldThenBlock++) >= 16) { 650 Thread::current()->_ParkEvent->park(1); 651 } else { 652 os::naked_yield(); 653 } 654 } 655 Thread::muxRelease(gInflationLocks + ix); 656 } 657 } else { 658 SpinPause(); // SMP-polite spinning 659 } 660 } 661 } 662 663 // hashCode() generation : 664 // 665 // Possibilities: 666 // * MD5Digest of {obj,stwRandom} 667 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 668 // * A DES- or AES-style SBox[] mechanism 669 // * One of the Phi-based schemes, such as: 670 // 2654435761 = 2^32 * Phi (golden ratio) 671 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 672 // * A variation of Marsaglia's shift-xor RNG scheme. 673 // * (obj ^ stwRandom) is appealing, but can result 674 // in undesirable regularity in the hashCode values of adjacent objects 675 // (objects allocated back-to-back, in particular). This could potentially 676 // result in hashtable collisions and reduced hashtable efficiency. 677 // There are simple ways to "diffuse" the middle address bits over the 678 // generated hashCode values: 679 680 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 681 intptr_t value = 0; 682 if (hashCode == 0) { 683 // This form uses global Park-Miller RNG. 684 // On MP system we'll have lots of RW access to a global, so the 685 // mechanism induces lots of coherency traffic. 686 value = os::random(); 687 } else if (hashCode == 1) { 688 // This variation has the property of being stable (idempotent) 689 // between STW operations. This can be useful in some of the 1-0 690 // synchronization schemes. 691 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 692 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 693 } else if (hashCode == 2) { 694 value = 1; // for sensitivity testing 695 } else if (hashCode == 3) { 696 value = ++GVars.hcSequence; 697 } else if (hashCode == 4) { 698 value = cast_from_oop<intptr_t>(obj); 699 } else { 700 // Marsaglia's xor-shift scheme with thread-specific state 701 // This is probably the best overall implementation -- we'll 702 // likely make this the default in future releases. 703 unsigned t = Self->_hashStateX; 704 t ^= (t << 11); 705 Self->_hashStateX = Self->_hashStateY; 706 Self->_hashStateY = Self->_hashStateZ; 707 Self->_hashStateZ = Self->_hashStateW; 708 unsigned v = Self->_hashStateW; 709 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 710 Self->_hashStateW = v; 711 value = v; 712 } 713 714 value &= markOopDesc::hash_mask; 715 if (value == 0) value = 0xBAD; 716 assert(value != markOopDesc::no_hash, "invariant"); 717 return value; 718 } 719 720 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 721 if (UseBiasedLocking) { 722 // NOTE: many places throughout the JVM do not expect a safepoint 723 // to be taken here, in particular most operations on perm gen 724 // objects. However, we only ever bias Java instances and all of 725 // the call sites of identity_hash that might revoke biases have 726 // been checked to make sure they can handle a safepoint. The 727 // added check of the bias pattern is to avoid useless calls to 728 // thread-local storage. 729 if (obj->mark()->has_bias_pattern()) { 730 // Handle for oop obj in case of STW safepoint 731 Handle hobj(Self, obj); 732 // Relaxing assertion for bug 6320749. 733 assert(Universe::verify_in_progress() || 734 !SafepointSynchronize::is_at_safepoint(), 735 "biases should not be seen by VM thread here"); 736 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 737 obj = hobj(); 738 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 739 } 740 } 741 742 // hashCode() is a heap mutator ... 743 // Relaxing assertion for bug 6320749. 744 assert(Universe::verify_in_progress() || DumpSharedSpaces || 745 !SafepointSynchronize::is_at_safepoint(), "invariant"); 746 assert(Universe::verify_in_progress() || DumpSharedSpaces || 747 Self->is_Java_thread() , "invariant"); 748 assert(Universe::verify_in_progress() || DumpSharedSpaces || 749 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 750 751 Retry: 752 ObjectMonitor* monitor = NULL; 753 markOop temp, test; 754 intptr_t hash; 755 markOop mark = ReadStableMark(obj); 756 757 // object should remain ineligible for biased locking 758 assert(!mark->has_bias_pattern(), "invariant"); 759 760 if (mark->is_neutral()) { 761 hash = mark->hash(); // this is a normal header 762 if (hash != 0) { // if it has hash, just return it 763 return hash; 764 } 765 hash = get_next_hash(Self, obj); // allocate a new hash code 766 temp = mark->copy_set_hash(hash); // merge the hash code into header 767 // use (machine word version) atomic operation to install the hash 768 test = obj->cas_set_mark(temp, mark); 769 if (test == mark) { 770 return hash; 771 } 772 // If atomic operation failed, we must inflate the header 773 // into heavy weight monitor. We could add more code here 774 // for fast path, but it does not worth the complexity. 775 } else if (mark->has_monitor()) { 776 ObjectMonitorHandle omh; 777 if (!omh.save_om_ptr(obj, mark)) { 778 // Lost a race with async deflation so try again. 779 assert(AsyncDeflateIdleMonitors, "sanity check"); 780 goto Retry; 781 } 782 monitor = omh.om_ptr(); 783 temp = monitor->header(); 784 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp)); 785 hash = temp->hash(); 786 if (hash != 0) { 787 return hash; 788 } 789 // Skip to the following code to reduce code size 790 } else if (Self->is_lock_owned((address)mark->locker())) { 791 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 792 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp)); 793 hash = temp->hash(); // by current thread, check if the displaced 794 if (hash != 0) { // header contains hash code 795 return hash; 796 } 797 // WARNING: 798 // The displaced header is strictly immutable. 799 // It can NOT be changed in ANY cases. So we have 800 // to inflate the header into heavyweight monitor 801 // even the current thread owns the lock. The reason 802 // is the BasicLock (stack slot) will be asynchronously 803 // read by other threads during the inflate() function. 804 // Any change to stack may not propagate to other threads 805 // correctly. 806 } 807 808 // Inflate the monitor to set hash code 809 ObjectMonitorHandle omh; 810 inflate(&omh, Self, obj, inflate_cause_hash_code); 811 monitor = omh.om_ptr(); 812 // Load displaced header and check it has hash code 813 mark = monitor->header(); 814 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)mark)); 815 hash = mark->hash(); 816 if (hash == 0) { 817 hash = get_next_hash(Self, obj); 818 temp = mark->copy_set_hash(hash); // merge hash code into header 819 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp)); 820 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); 821 if (test != mark) { 822 // The only update to the header in the monitor (outside GC) 823 // is install the hash code. If someone add new usage of 824 // displaced header, please update this code 825 hash = test->hash(); 826 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)test)); 827 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 828 } 829 } 830 // We finally get the hash 831 return hash; 832 } 833 834 // Deprecated -- use FastHashCode() instead. 835 836 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 837 return FastHashCode(Thread::current(), obj()); 838 } 839 840 841 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 842 Handle h_obj) { 843 if (UseBiasedLocking) { 844 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 845 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 846 } 847 848 assert(thread == JavaThread::current(), "Can only be called on current thread"); 849 oop obj = h_obj(); 850 851 while (true) { 852 markOop mark = ReadStableMark(obj); 853 854 // Uncontended case, header points to stack 855 if (mark->has_locker()) { 856 return thread->is_lock_owned((address)mark->locker()); 857 } 858 // Contended case, header points to ObjectMonitor (tagged pointer) 859 if (mark->has_monitor()) { 860 ObjectMonitorHandle omh; 861 if (!omh.save_om_ptr(obj, mark)) { 862 // Lost a race with async deflation so try again. 863 assert(AsyncDeflateIdleMonitors, "sanity check"); 864 continue; 865 } 866 bool ret_code = omh.om_ptr()->is_entered(thread) != 0; 867 return ret_code; 868 } 869 // Unlocked case, header in place 870 assert(mark->is_neutral(), "sanity check"); 871 return false; 872 } 873 } 874 875 // Be aware of this method could revoke bias of the lock object. 876 // This method queries the ownership of the lock handle specified by 'h_obj'. 877 // If the current thread owns the lock, it returns owner_self. If no 878 // thread owns the lock, it returns owner_none. Otherwise, it will return 879 // owner_other. 880 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 881 (JavaThread *self, Handle h_obj) { 882 // The caller must beware this method can revoke bias, and 883 // revocation can result in a safepoint. 884 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 885 assert(self->thread_state() != _thread_blocked, "invariant"); 886 887 // Possible mark states: neutral, biased, stack-locked, inflated 888 889 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 890 // CASE: biased 891 BiasedLocking::revoke_and_rebias(h_obj, false, self); 892 assert(!h_obj->mark()->has_bias_pattern(), 893 "biases should be revoked by now"); 894 } 895 896 assert(self == JavaThread::current(), "Can only be called on current thread"); 897 oop obj = h_obj(); 898 899 while (true) { 900 markOop mark = ReadStableMark(obj); 901 902 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 903 if (mark->has_locker()) { 904 return self->is_lock_owned((address)mark->locker()) ? 905 owner_self : owner_other; 906 } 907 908 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 909 // The Object:ObjectMonitor relationship is stable as long as we're 910 // not at a safepoint and AsyncDeflateIdleMonitors is false. 911 if (mark->has_monitor()) { 912 ObjectMonitorHandle omh; 913 if (!omh.save_om_ptr(obj, mark)) { 914 // Lost a race with async deflation so try again. 915 assert(AsyncDeflateIdleMonitors, "sanity check"); 916 continue; 917 } 918 ObjectMonitor * monitor = omh.om_ptr(); 919 void * owner = monitor->_owner; 920 if (owner == NULL) return owner_none; 921 return (owner == self || 922 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 923 } 924 925 // CASE: neutral 926 assert(mark->is_neutral(), "sanity check"); 927 return owner_none; // it's unlocked 928 } 929 } 930 931 // FIXME: jvmti should call this 932 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 933 if (UseBiasedLocking) { 934 if (SafepointSynchronize::is_at_safepoint()) { 935 BiasedLocking::revoke_at_safepoint(h_obj); 936 } else { 937 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 938 } 939 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 940 } 941 942 oop obj = h_obj(); 943 944 while (true) { 945 address owner = NULL; 946 markOop mark = ReadStableMark(obj); 947 948 // Uncontended case, header points to stack 949 if (mark->has_locker()) { 950 owner = (address) mark->locker(); 951 } 952 953 // Contended case, header points to ObjectMonitor (tagged pointer) 954 else if (mark->has_monitor()) { 955 ObjectMonitorHandle omh; 956 if (!omh.save_om_ptr(obj, mark)) { 957 // Lost a race with async deflation so try again. 958 assert(AsyncDeflateIdleMonitors, "sanity check"); 959 continue; 960 } 961 ObjectMonitor* monitor = omh.om_ptr(); 962 assert(monitor != NULL, "monitor should be non-null"); 963 owner = (address) monitor->owner(); 964 } 965 966 if (owner != NULL) { 967 // owning_thread_from_monitor_owner() may also return NULL here 968 return Threads::owning_thread_from_monitor_owner(t_list, owner); 969 } 970 971 // Unlocked case, header in place 972 // Cannot have assertion since this object may have been 973 // locked by another thread when reaching here. 974 // assert(mark->is_neutral(), "sanity check"); 975 976 return NULL; 977 } 978 } 979 980 // Visitors ... 981 982 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 983 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 984 while (block != NULL) { 985 assert(block->object() == CHAINMARKER, "must be a block header"); 986 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 987 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 988 if (mid->is_active()) { 989 ObjectMonitorHandle omh(mid); 990 991 if (mid->object() == NULL || 992 (AsyncDeflateIdleMonitors && mid->_owner == DEFLATER_MARKER)) { 993 // Only process with closure if the object is set. 994 // For async deflation, race here if monitor is not owned! 995 // The above ref_count bump (in ObjectMonitorHandle ctr) 996 // will cause subsequent async deflation to skip it. 997 // However, previous or concurrent async deflation is a race. 998 continue; 999 } 1000 closure->do_monitor(mid); 1001 } 1002 } 1003 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1004 } 1005 } 1006 1007 // Get the next block in the block list. 1008 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) { 1009 assert(block->object() == CHAINMARKER, "must be a block header"); 1010 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext; 1011 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 1012 return block; 1013 } 1014 1015 static bool monitors_used_above_threshold() { 1016 if (gMonitorPopulation == 0) { 1017 return false; 1018 } 1019 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 1020 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; 1021 return monitor_usage > MonitorUsedDeflationThreshold; 1022 } 1023 1024 bool ObjectSynchronizer::is_cleanup_needed() { 1025 if (MonitorUsedDeflationThreshold > 0) { 1026 return monitors_used_above_threshold(); 1027 } 1028 return false; 1029 } 1030 1031 void ObjectSynchronizer::oops_do(OopClosure* f) { 1032 // We only scan the global used list here (for moribund threads), and 1033 // the thread-local monitors in Thread::oops_do(). 1034 global_used_oops_do(f); 1035 } 1036 1037 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1038 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1039 list_oops_do(gOmInUseList, f); 1040 } 1041 1042 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1043 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1044 list_oops_do(thread->omInUseList, f); 1045 } 1046 1047 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1048 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1049 ObjectMonitor* mid; 1050 for (mid = list; mid != NULL; mid = mid->FreeNext) { 1051 if (mid->object() != NULL) { 1052 f->do_oop((oop*)mid->object_addr()); 1053 } 1054 } 1055 } 1056 1057 1058 // ----------------------------------------------------------------------------- 1059 // ObjectMonitor Lifecycle 1060 // ----------------------- 1061 // Inflation unlinks monitors from the global gFreeList and 1062 // associates them with objects. Deflation -- which occurs at 1063 // STW-time -- disassociates idle monitors from objects. Such 1064 // scavenged monitors are returned to the gFreeList. 1065 // 1066 // The global list is protected by gListLock. All the critical sections 1067 // are short and operate in constant-time. 1068 // 1069 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1070 // 1071 // Lifecycle: 1072 // -- unassigned and on the global free list 1073 // -- unassigned and on a thread's private omFreeList 1074 // -- assigned to an object. The object is inflated and the mark refers 1075 // to the objectmonitor. 1076 1077 1078 // Constraining monitor pool growth via MonitorBound ... 1079 // 1080 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1081 // the rate of scavenging is driven primarily by GC. As such, we can find 1082 // an inordinate number of monitors in circulation. 1083 // To avoid that scenario we can artificially induce a STW safepoint 1084 // if the pool appears to be growing past some reasonable bound. 1085 // Generally we favor time in space-time tradeoffs, but as there's no 1086 // natural back-pressure on the # of extant monitors we need to impose some 1087 // type of limit. Beware that if MonitorBound is set to too low a value 1088 // we could just loop. In addition, if MonitorBound is set to a low value 1089 // we'll incur more safepoints, which are harmful to performance. 1090 // See also: GuaranteedSafepointInterval 1091 // 1092 // The current implementation uses asynchronous VM operations. 1093 1094 static void InduceScavenge(Thread * Self, const char * Whence) { 1095 // Induce STW safepoint to trim monitors 1096 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1097 // More precisely, trigger an asynchronous STW safepoint as the number 1098 // of active monitors passes the specified threshold. 1099 // TODO: assert thread state is reasonable 1100 1101 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1102 // Induce a 'null' safepoint to scavenge monitors 1103 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1104 // to the VMthread and have a lifespan longer than that of this activation record. 1105 // The VMThread will delete the op when completed. 1106 VMThread::execute(new VM_ScavengeMonitors()); 1107 } 1108 } 1109 1110 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self, 1111 const InflateCause cause) { 1112 // A large MAXPRIVATE value reduces both list lock contention 1113 // and list coherency traffic, but also tends to increase the 1114 // number of objectMonitors in circulation as well as the STW 1115 // scavenge costs. As usual, we lean toward time in space-time 1116 // tradeoffs. 1117 const int MAXPRIVATE = 1024; 1118 1119 if (AsyncDeflateIdleMonitors) { 1120 JavaThread * jt = (JavaThread *)Self; 1121 if (jt->omShouldDeflateIdleMonitors && jt->omInUseCount > 0 && 1122 cause != inflate_cause_vm_internal) { 1123 // Deflate any per-thread idle monitors for this JavaThread if 1124 // this is not an internal inflation. Clean up your own mess. 1125 // (Gibbs Rule 45) Otherwise, skip this cleanup. 1126 // deflate_global_idle_monitors_using_JT() is called by the ServiceThread. 1127 debug_only(jt->check_for_valid_safepoint_state(false);) 1128 ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(); 1129 } 1130 } 1131 1132 for (;;) { 1133 ObjectMonitor * m; 1134 1135 // 1: try to allocate from the thread's local omFreeList. 1136 // Threads will attempt to allocate first from their local list, then 1137 // from the global list, and only after those attempts fail will the thread 1138 // attempt to instantiate new monitors. Thread-local free lists take 1139 // heat off the gListLock and improve allocation latency, as well as reducing 1140 // coherency traffic on the shared global list. 1141 m = Self->omFreeList; 1142 if (m != NULL) { 1143 Self->omFreeList = m->FreeNext; 1144 Self->omFreeCount--; 1145 guarantee(m->object() == NULL, "invariant"); 1146 m->set_allocation_state(ObjectMonitor::New); 1147 m->FreeNext = Self->omInUseList; 1148 Self->omInUseList = m; 1149 Self->omInUseCount++; 1150 return m; 1151 } 1152 1153 // 2: try to allocate from the global gFreeList 1154 // CONSIDER: use muxTry() instead of muxAcquire(). 1155 // If the muxTry() fails then drop immediately into case 3. 1156 // If we're using thread-local free lists then try 1157 // to reprovision the caller's free list. 1158 if (gFreeList != NULL) { 1159 // Reprovision the thread's omFreeList. 1160 // Use bulk transfers to reduce the allocation rate and heat 1161 // on various locks. 1162 Thread::muxAcquire(&gListLock, "omAlloc(1)"); 1163 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1164 gMonitorFreeCount--; 1165 ObjectMonitor * take = gFreeList; 1166 gFreeList = take->FreeNext; 1167 guarantee(take->object() == NULL, "invariant"); 1168 if (AsyncDeflateIdleMonitors) { 1169 take->set_owner(NULL); 1170 take->_count = 0; 1171 } 1172 guarantee(!take->is_busy(), "invariant"); 1173 take->Recycle(); 1174 assert(take->is_free(), "invariant"); 1175 omRelease(Self, take, false); 1176 } 1177 Thread::muxRelease(&gListLock); 1178 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1179 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1180 1181 const int mx = MonitorBound; 1182 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1183 // We can't safely induce a STW safepoint from omAlloc() as our thread 1184 // state may not be appropriate for such activities and callers may hold 1185 // naked oops, so instead we defer the action. 1186 InduceScavenge(Self, "omAlloc"); 1187 } 1188 continue; 1189 } 1190 1191 // 3: allocate a block of new ObjectMonitors 1192 // Both the local and global free lists are empty -- resort to malloc(). 1193 // In the current implementation objectMonitors are TSM - immortal. 1194 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1195 // each ObjectMonitor to start at the beginning of a cache line, 1196 // so we use align_up(). 1197 // A better solution would be to use C++ placement-new. 1198 // BEWARE: As it stands currently, we don't run the ctors! 1199 assert(_BLOCKSIZE > 1, "invariant"); 1200 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1201 PaddedEnd<ObjectMonitor> * temp; 1202 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1203 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1204 mtInternal); 1205 temp = (PaddedEnd<ObjectMonitor> *) 1206 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); 1207 1208 // NOTE: (almost) no way to recover if allocation failed. 1209 // We might be able to induce a STW safepoint and scavenge enough 1210 // objectMonitors to permit progress. 1211 if (temp == NULL) { 1212 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1213 "Allocate ObjectMonitors"); 1214 } 1215 (void)memset((void *) temp, 0, neededsize); 1216 1217 // Format the block. 1218 // initialize the linked list, each monitor points to its next 1219 // forming the single linked free list, the very first monitor 1220 // will points to next block, which forms the block list. 1221 // The trick of using the 1st element in the block as gBlockList 1222 // linkage should be reconsidered. A better implementation would 1223 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1224 1225 for (int i = 1; i < _BLOCKSIZE; i++) { 1226 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1227 assert(temp[i].is_free(), "invariant"); 1228 } 1229 1230 // terminate the last monitor as the end of list 1231 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1232 1233 // Element [0] is reserved for global list linkage 1234 temp[0].set_object(CHAINMARKER); 1235 1236 // Consider carving out this thread's current request from the 1237 // block in hand. This avoids some lock traffic and redundant 1238 // list activity. 1239 1240 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1241 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1242 Thread::muxAcquire(&gListLock, "omAlloc(2)"); 1243 gMonitorPopulation += _BLOCKSIZE-1; 1244 gMonitorFreeCount += _BLOCKSIZE-1; 1245 1246 // Add the new block to the list of extant blocks (gBlockList). 1247 // The very first objectMonitor in a block is reserved and dedicated. 1248 // It serves as blocklist "next" linkage. 1249 temp[0].FreeNext = gBlockList; 1250 // There are lock-free uses of gBlockList so make sure that 1251 // the previous stores happen before we update gBlockList. 1252 OrderAccess::release_store(&gBlockList, temp); 1253 1254 // Add the new string of objectMonitors to the global free list 1255 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1256 gFreeList = temp + 1; 1257 Thread::muxRelease(&gListLock); 1258 } 1259 } 1260 1261 // Place "m" on the caller's private per-thread omFreeList. 1262 // In practice there's no need to clamp or limit the number of 1263 // monitors on a thread's omFreeList as the only time we'll call 1264 // omRelease is to return a monitor to the free list after a CAS 1265 // attempt failed. This doesn't allow unbounded #s of monitors to 1266 // accumulate on a thread's free list. 1267 // 1268 // Key constraint: all ObjectMonitors on a thread's free list and the global 1269 // free list must have their object field set to null. This prevents the 1270 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1271 // -- from reclaiming them while we are trying to release them. 1272 1273 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1274 bool fromPerThreadAlloc) { 1275 guarantee(m->header() == NULL, "invariant"); 1276 guarantee(m->object() == NULL, "invariant"); 1277 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1278 m->set_allocation_state(ObjectMonitor::Free); 1279 // Remove from omInUseList 1280 if (fromPerThreadAlloc) { 1281 ObjectMonitor* cur_mid_in_use = NULL; 1282 bool extracted = false; 1283 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1284 if (m == mid) { 1285 // extract from per-thread in-use list 1286 if (mid == Self->omInUseList) { 1287 Self->omInUseList = mid->FreeNext; 1288 } else if (cur_mid_in_use != NULL) { 1289 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1290 } 1291 extracted = true; 1292 Self->omInUseCount--; 1293 break; 1294 } 1295 } 1296 assert(extracted, "Should have extracted from in-use list"); 1297 } 1298 1299 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1300 m->FreeNext = Self->omFreeList; 1301 guarantee(m->is_free(), "invariant"); 1302 Self->omFreeList = m; 1303 Self->omFreeCount++; 1304 } 1305 1306 // Return the monitors of a moribund thread's local free list to 1307 // the global free list. Typically a thread calls omFlush() when 1308 // it's dying. We could also consider having the VM thread steal 1309 // monitors from threads that have not run java code over a few 1310 // consecutive STW safepoints. Relatedly, we might decay 1311 // omFreeProvision at STW safepoints. 1312 // 1313 // Also return the monitors of a moribund thread's omInUseList to 1314 // a global gOmInUseList under the global list lock so these 1315 // will continue to be scanned. 1316 // 1317 // We currently call omFlush() from Threads::remove() _before the thread 1318 // has been excised from the thread list and is no longer a mutator. 1319 // This means that omFlush() cannot run concurrently with a safepoint and 1320 // interleave with the deflate_idle_monitors scavenge operator. In particular, 1321 // this ensures that the thread's monitors are scanned by a GC safepoint, 1322 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via 1323 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1324 // monitors have been transferred to the global in-use list). 1325 // 1326 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1327 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1328 // run at the same time as omFlush() so we have to be careful. 1329 1330 void ObjectSynchronizer::omFlush(Thread * Self) { 1331 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1332 ObjectMonitor * tail = NULL; 1333 int tally = 0; 1334 if (list != NULL) { 1335 ObjectMonitor * s; 1336 // The thread is going away, the per-thread free monitors 1337 // are freed via set_owner(NULL) 1338 // Link them to tail, which will be linked into the global free list 1339 // gFreeList below, under the gListLock 1340 for (s = list; s != NULL; s = s->FreeNext) { 1341 tally++; 1342 tail = s; 1343 guarantee(s->object() == NULL, "invariant"); 1344 guarantee(!s->is_busy(), "invariant"); 1345 s->set_owner(NULL); // redundant but good hygiene 1346 } 1347 guarantee(tail != NULL, "invariant"); 1348 guarantee(Self->omFreeCount == tally, "free-count off"); 1349 Self->omFreeList = NULL; 1350 Self->omFreeCount = 0; 1351 } 1352 1353 ObjectMonitor * inUseList = Self->omInUseList; 1354 ObjectMonitor * inUseTail = NULL; 1355 int inUseTally = 0; 1356 if (inUseList != NULL) { 1357 ObjectMonitor *cur_om; 1358 // The thread is going away, however the omInUseList inflated 1359 // monitors may still be in-use by other threads. 1360 // Link them to inUseTail, which will be linked into the global in-use list 1361 // gOmInUseList below, under the gListLock 1362 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1363 inUseTail = cur_om; 1364 inUseTally++; 1365 guarantee(cur_om->is_active(), "invariant"); 1366 } 1367 guarantee(inUseTail != NULL, "invariant"); 1368 guarantee(Self->omInUseCount == inUseTally, "in-use count off"); 1369 Self->omInUseList = NULL; 1370 Self->omInUseCount = 0; 1371 } 1372 1373 Thread::muxAcquire(&gListLock, "omFlush"); 1374 if (tail != NULL) { 1375 tail->FreeNext = gFreeList; 1376 gFreeList = list; 1377 gMonitorFreeCount += tally; 1378 } 1379 1380 if (inUseTail != NULL) { 1381 inUseTail->FreeNext = gOmInUseList; 1382 gOmInUseList = inUseList; 1383 gOmInUseCount += inUseTally; 1384 } 1385 1386 Thread::muxRelease(&gListLock); 1387 1388 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1389 LogStreamHandle(Info, monitorinflation) lsh_info; 1390 LogStream * ls = NULL; 1391 if (log_is_enabled(Debug, monitorinflation)) { 1392 ls = &lsh_debug; 1393 } else if ((tally != 0 || inUseTally != 0) && 1394 log_is_enabled(Info, monitorinflation)) { 1395 ls = &lsh_info; 1396 } 1397 if (ls != NULL) { 1398 ls->print_cr("omFlush: jt=" INTPTR_FORMAT ", free_monitor_tally=%d" 1399 ", in_use_monitor_tally=%d" ", omFreeProvision=%d", 1400 p2i(Self), tally, inUseTally, Self->omFreeProvision); 1401 } 1402 } 1403 1404 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1405 const oop obj, 1406 ObjectSynchronizer::InflateCause cause) { 1407 assert(event != NULL, "invariant"); 1408 assert(event->should_commit(), "invariant"); 1409 event->set_monitorClass(obj->klass()); 1410 event->set_address((uintptr_t)(void*)obj); 1411 event->set_cause((u1)cause); 1412 event->commit(); 1413 } 1414 1415 // Fast path code shared by multiple functions 1416 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle * omh_p, oop obj) { 1417 while (true) { 1418 markOop mark = obj->mark(); 1419 if (mark->has_monitor()) { 1420 if (!omh_p->save_om_ptr(obj, mark)) { 1421 // Lost a race with async deflation so try again. 1422 assert(AsyncDeflateIdleMonitors, "sanity check"); 1423 continue; 1424 } 1425 ObjectMonitor * monitor = omh_p->om_ptr(); 1426 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid"); 1427 markOop dmw = monitor->header(); 1428 assert(dmw->is_neutral(), "sanity check: header=" INTPTR_FORMAT, p2i((address)dmw)); 1429 return; 1430 } 1431 inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal); 1432 return; 1433 } 1434 } 1435 1436 void ObjectSynchronizer::inflate(ObjectMonitorHandle * omh_p, Thread * Self, 1437 oop object, const InflateCause cause) { 1438 // Inflate mutates the heap ... 1439 // Relaxing assertion for bug 6320749. 1440 assert(Universe::verify_in_progress() || 1441 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1442 1443 EventJavaMonitorInflate event; 1444 1445 for (;;) { 1446 const markOop mark = object->mark(); 1447 assert(!mark->has_bias_pattern(), "invariant"); 1448 1449 // The mark can be in one of the following states: 1450 // * Inflated - just return 1451 // * Stack-locked - coerce it to inflated 1452 // * INFLATING - busy wait for conversion to complete 1453 // * Neutral - aggressively inflate the object. 1454 // * BIASED - Illegal. We should never see this 1455 1456 // CASE: inflated 1457 if (mark->has_monitor()) { 1458 if (!omh_p->save_om_ptr(object, mark)) { 1459 // Lost a race with async deflation so try again. 1460 assert(AsyncDeflateIdleMonitors, "sanity check"); 1461 continue; 1462 } 1463 ObjectMonitor * inf = omh_p->om_ptr(); 1464 markOop dmw = inf->header(); 1465 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)dmw)); 1466 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); 1467 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1468 return; 1469 } 1470 1471 // CASE: inflation in progress - inflating over a stack-lock. 1472 // Some other thread is converting from stack-locked to inflated. 1473 // Only that thread can complete inflation -- other threads must wait. 1474 // The INFLATING value is transient. 1475 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1476 // We could always eliminate polling by parking the thread on some auxiliary list. 1477 if (mark == markOopDesc::INFLATING()) { 1478 ReadStableMark(object); 1479 continue; 1480 } 1481 1482 // CASE: stack-locked 1483 // Could be stack-locked either by this thread or by some other thread. 1484 // 1485 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1486 // to install INFLATING into the mark word. We originally installed INFLATING, 1487 // allocated the objectmonitor, and then finally STed the address of the 1488 // objectmonitor into the mark. This was correct, but artificially lengthened 1489 // the interval in which INFLATED appeared in the mark, thus increasing 1490 // the odds of inflation contention. 1491 // 1492 // We now use per-thread private objectmonitor free lists. 1493 // These list are reprovisioned from the global free list outside the 1494 // critical INFLATING...ST interval. A thread can transfer 1495 // multiple objectmonitors en-mass from the global free list to its local free list. 1496 // This reduces coherency traffic and lock contention on the global free list. 1497 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1498 // before or after the CAS(INFLATING) operation. 1499 // See the comments in omAlloc(). 1500 1501 LogStreamHandle(Trace, monitorinflation) lsh; 1502 1503 if (mark->has_locker()) { 1504 ObjectMonitor * m; 1505 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { 1506 // If !AsyncDeflateIdleMonitors or if an internal inflation, then 1507 // we won't stop for a potential safepoint in omAlloc. 1508 m = omAlloc(Self, cause); 1509 } else { 1510 // If AsyncDeflateIdleMonitors and not an internal inflation, then 1511 // we may stop for a safepoint in omAlloc() so protect object. 1512 Handle h_obj(Self, object); 1513 m = omAlloc(Self, cause); 1514 object = h_obj(); // Refresh object. 1515 } 1516 // Optimistically prepare the objectmonitor - anticipate successful CAS 1517 // We do this before the CAS in order to minimize the length of time 1518 // in which INFLATING appears in the mark. 1519 m->Recycle(); 1520 m->_Responsible = NULL; 1521 m->_recursions = 0; 1522 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1523 1524 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark); 1525 if (cmp != mark) { 1526 omRelease(Self, m, true); 1527 continue; // Interference -- just retry 1528 } 1529 1530 // We've successfully installed INFLATING (0) into the mark-word. 1531 // This is the only case where 0 will appear in a mark-word. 1532 // Only the singular thread that successfully swings the mark-word 1533 // to 0 can perform (or more precisely, complete) inflation. 1534 // 1535 // Why do we CAS a 0 into the mark-word instead of just CASing the 1536 // mark-word from the stack-locked value directly to the new inflated state? 1537 // Consider what happens when a thread unlocks a stack-locked object. 1538 // It attempts to use CAS to swing the displaced header value from the 1539 // on-stack basiclock back into the object header. Recall also that the 1540 // header value (hashcode, etc) can reside in (a) the object header, or 1541 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1542 // header in an objectMonitor. The inflate() routine must copy the header 1543 // value from the basiclock on the owner's stack to the objectMonitor, all 1544 // the while preserving the hashCode stability invariants. If the owner 1545 // decides to release the lock while the value is 0, the unlock will fail 1546 // and control will eventually pass from slow_exit() to inflate. The owner 1547 // will then spin, waiting for the 0 value to disappear. Put another way, 1548 // the 0 causes the owner to stall if the owner happens to try to 1549 // drop the lock (restoring the header from the basiclock to the object) 1550 // while inflation is in-progress. This protocol avoids races that might 1551 // would otherwise permit hashCode values to change or "flicker" for an object. 1552 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1553 // 0 serves as a "BUSY" inflate-in-progress indicator. 1554 1555 1556 // fetch the displaced mark from the owner's stack. 1557 // The owner can't die or unwind past the lock while our INFLATING 1558 // object is in the mark. Furthermore the owner can't complete 1559 // an unlock on the object, either. 1560 markOop dmw = mark->displaced_mark_helper(); 1561 assert(dmw->is_neutral(), "invariant"); 1562 1563 // Setup monitor fields to proper values -- prepare the monitor 1564 m->set_header(dmw); 1565 1566 // Optimization: if the mark->locker stack address is associated 1567 // with this thread we could simply set m->_owner = Self. 1568 // Note that a thread can inflate an object 1569 // that it has stack-locked -- as might happen in wait() -- directly 1570 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1571 m->set_owner(mark->locker()); 1572 m->set_object(object); 1573 // TODO-FIXME: assert BasicLock->dhw != 0. 1574 1575 // Must preserve store ordering. The monitor state must 1576 // be stable at the time of publishing the monitor address. 1577 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1578 object->release_set_mark(markOopDesc::encode(m)); 1579 1580 // Hopefully the performance counters are allocated on distinct cache lines 1581 // to avoid false sharing on MP systems ... 1582 OM_PERFDATA_OP(Inflations, inc()); 1583 if (log_is_enabled(Trace, monitorinflation)) { 1584 ResourceMark rm(Self); 1585 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1586 INTPTR_FORMAT ", type='%s'", p2i(object), 1587 p2i(object->mark()), object->klass()->external_name()); 1588 } 1589 if (event.should_commit()) { 1590 post_monitor_inflate_event(&event, object, cause); 1591 } 1592 assert(!m->is_free(), "post-condition"); 1593 omh_p->set_om_ptr(m); 1594 return; 1595 } 1596 1597 // CASE: neutral 1598 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1599 // If we know we're inflating for entry it's better to inflate by swinging a 1600 // pre-locked objectMonitor pointer into the object header. A successful 1601 // CAS inflates the object *and* confers ownership to the inflating thread. 1602 // In the current implementation we use a 2-step mechanism where we CAS() 1603 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1604 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1605 // would be useful. 1606 1607 assert(mark->is_neutral(), "invariant"); 1608 ObjectMonitor * m; 1609 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { 1610 // If !AsyncDeflateIdleMonitors or if an internal inflation, then 1611 // we won't stop for a potential safepoint in omAlloc. 1612 m = omAlloc(Self, cause); 1613 } else { 1614 // If AsyncDeflateIdleMonitors and not an internal inflation, then 1615 // we may stop for a safepoint in omAlloc() so protect object. 1616 Handle h_obj(Self, object); 1617 m = omAlloc(Self, cause); 1618 object = h_obj(); // Refresh object. 1619 } 1620 // prepare m for installation - set monitor to initial state 1621 m->Recycle(); 1622 m->set_header(mark); 1623 m->set_owner(NULL); 1624 m->set_object(object); 1625 m->_recursions = 0; 1626 m->_Responsible = NULL; 1627 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1628 1629 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { 1630 m->set_header(NULL); 1631 m->set_object(NULL); 1632 m->Recycle(); 1633 omRelease(Self, m, true); 1634 m = NULL; 1635 continue; 1636 // interference - the markword changed - just retry. 1637 // The state-transitions are one-way, so there's no chance of 1638 // live-lock -- "Inflated" is an absorbing state. 1639 } 1640 1641 // Hopefully the performance counters are allocated on distinct 1642 // cache lines to avoid false sharing on MP systems ... 1643 OM_PERFDATA_OP(Inflations, inc()); 1644 if (log_is_enabled(Trace, monitorinflation)) { 1645 ResourceMark rm(Self); 1646 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1647 INTPTR_FORMAT ", type='%s'", p2i(object), 1648 p2i(object->mark()), object->klass()->external_name()); 1649 } 1650 if (event.should_commit()) { 1651 post_monitor_inflate_event(&event, object, cause); 1652 } 1653 omh_p->set_om_ptr(m); 1654 return; 1655 } 1656 } 1657 1658 1659 // We create a list of in-use monitors for each thread. 1660 // 1661 // deflate_thread_local_monitors() scans a single thread's in-use list, while 1662 // deflate_idle_monitors() scans only a global list of in-use monitors which 1663 // is populated only as a thread dies (see omFlush()). 1664 // 1665 // These operations are called at all safepoints, immediately after mutators 1666 // are stopped, but before any objects have moved. Collectively they traverse 1667 // the population of in-use monitors, deflating where possible. The scavenged 1668 // monitors are returned to the monitor free list. 1669 // 1670 // Beware that we scavenge at *every* stop-the-world point. Having a large 1671 // number of monitors in-use could negatively impact performance. We also want 1672 // to minimize the total # of monitors in circulation, as they incur a small 1673 // footprint penalty. 1674 // 1675 // Perversely, the heap size -- and thus the STW safepoint rate -- 1676 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1677 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1678 // This is an unfortunate aspect of this design. 1679 1680 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) { 1681 if (!AsyncDeflateIdleMonitors) { 1682 // Use the older mechanism for the global in-use list. 1683 ObjectSynchronizer::deflate_idle_monitors(_counters); 1684 return; 1685 } 1686 1687 assert(_counters == NULL, "not used with AsyncDeflateIdleMonitors"); 1688 1689 log_debug(monitorinflation)("requesting deflation of idle monitors."); 1690 // Request deflation of global idle monitors by the ServiceThread: 1691 _gOmShouldDeflateIdleMonitors = true; 1692 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 1693 Service_lock->notify_all(); 1694 1695 // Request deflation of per-thread idle monitors by each JavaThread: 1696 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1697 if (jt->omInUseCount > 0) { 1698 // This JavaThread is using monitors so check it. 1699 jt->omShouldDeflateIdleMonitors = true; 1700 } 1701 } 1702 } 1703 1704 // Deflate a single monitor if not in-use 1705 // Return true if deflated, false if in-use 1706 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1707 ObjectMonitor** freeHeadp, 1708 ObjectMonitor** freeTailp) { 1709 bool deflated; 1710 // Normal case ... The monitor is associated with obj. 1711 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1712 guarantee(mid == obj->mark()->monitor(), "invariant"); 1713 guarantee(mid->header()->is_neutral(), "invariant"); 1714 1715 if (mid->is_busy()) { 1716 deflated = false; 1717 } else { 1718 // Deflate the monitor if it is no longer being used 1719 // It's idle - scavenge and return to the global free list 1720 // plain old deflation ... 1721 if (log_is_enabled(Trace, monitorinflation)) { 1722 ResourceMark rm; 1723 log_trace(monitorinflation)("deflate_monitor: " 1724 "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'", 1725 p2i(obj), p2i(obj->mark()), 1726 obj->klass()->external_name()); 1727 } 1728 1729 // Restore the header back to obj 1730 obj->release_set_mark(mid->header()); 1731 mid->clear(); 1732 1733 assert(mid->object() == NULL, "invariant"); 1734 assert(mid->is_free(), "invariant"); 1735 1736 // Move the object to the working free list defined by freeHeadp, freeTailp 1737 if (*freeHeadp == NULL) *freeHeadp = mid; 1738 if (*freeTailp != NULL) { 1739 ObjectMonitor * prevtail = *freeTailp; 1740 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1741 prevtail->FreeNext = mid; 1742 } 1743 *freeTailp = mid; 1744 deflated = true; 1745 } 1746 return deflated; 1747 } 1748 1749 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 1750 // Returns true if it was deflated and false otherwise. 1751 // 1752 // The async deflation protocol sets _owner to DEFLATER_MARKER and 1753 // makes _count negative as signals to contending threads that an 1754 // async deflation is in progress. There are a number of checks as 1755 // part of the protocol to make sure that the calling thread has 1756 // not lost the race to a contending thread. 1757 // 1758 // The ObjectMonitor has been successfully async deflated when: 1759 // (_owner == DEFLATER_MARKER && _count < 0). Contending threads that 1760 // see those values know to retry their operation. 1761 // 1762 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 1763 ObjectMonitor** freeHeadp, 1764 ObjectMonitor** freeTailp) { 1765 assert(AsyncDeflateIdleMonitors, "sanity check"); 1766 assert(Thread::current()->is_Java_thread(), "precondition"); 1767 // A newly allocated ObjectMonitor should not be seen here so we 1768 // avoid an endless inflate/deflate cycle. 1769 assert(mid->is_old(), "precondition"); 1770 1771 if (mid->is_busy() || mid->ref_count() != 0) { 1772 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 1773 // is in use so no deflation. 1774 return false; 1775 } 1776 1777 if (Atomic::cmpxchg(DEFLATER_MARKER, &mid->_owner, (void*)NULL) == NULL) { 1778 // ObjectMonitor is not owned by another thread. Our setting 1779 // _owner to DEFLATER_MARKER forces any contending thread through 1780 // the slow path. This is just the first part of the async 1781 // deflation dance. 1782 1783 if (mid->_waiters != 0 || mid->ref_count() != 0) { 1784 // Another thread has raced to enter the ObjectMonitor after 1785 // mid->is_busy() above and has already waited on it which 1786 // makes it busy so no deflation. Or the ObjectMonitor* is 1787 // in use for some other operation like inflate(). Restore 1788 // _owner to NULL if it is still DEFLATER_MARKER. 1789 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); 1790 return false; 1791 } 1792 1793 if (Atomic::cmpxchg(-max_jint, &mid->_count, (jint)0) == 0) { 1794 // Make _count negative to force racing threads to retry. 1795 // This is the second part of the async deflation dance. 1796 1797 if (mid->_owner == DEFLATER_MARKER) { 1798 // If _owner is still DEFLATER_MARKER, then we have successfully 1799 // signaled any racing threads to retry. If it is not, then we 1800 // have lost the race to another thread and the ObjectMonitor is 1801 // now busy. This is the third and final part of the async 1802 // deflation dance. 1803 // Note: This _owner check solves the ABA problem with _count 1804 // where another thread acquired the ObjectMonitor, finished 1805 // using it and restored the _count to zero. 1806 1807 // Sanity checks for the races: 1808 guarantee(mid->_waiters == 0, "should be no waiters"); 1809 guarantee(mid->_cxq == NULL, "should be no contending threads"); 1810 guarantee(mid->_EntryList == NULL, "should be no entering threads"); 1811 1812 if (log_is_enabled(Trace, monitorinflation)) { 1813 oop obj = (oop) mid->object(); 1814 assert(obj != NULL, "sanity check"); 1815 if (obj->is_instance()) { 1816 ResourceMark rm; 1817 log_trace(monitorinflation)("deflate_monitor_using_JT: " 1818 "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'", 1819 p2i(obj), p2i(obj->mark()), 1820 obj->klass()->external_name()); 1821 } 1822 } 1823 1824 // Install the old mark word if nobody else has already done it. 1825 mid->install_displaced_markword_in_object(); 1826 mid->clear_using_JT(); 1827 1828 assert(mid->object() == NULL, "invariant"); 1829 assert(mid->is_free(), "invariant"); 1830 1831 // Move the deflated ObjectMonitor to the working free list 1832 // defined by freeHeadp and freeTailp. 1833 if (*freeHeadp == NULL) { 1834 // First one on the list. 1835 *freeHeadp = mid; 1836 } 1837 if (*freeTailp != NULL) { 1838 // We append to the list so the caller can use mid->FreeNext 1839 // to fix the linkages in its context. 1840 ObjectMonitor * prevtail = *freeTailp; 1841 assert(prevtail->FreeNext == NULL, "not cleaned up by the caller"); 1842 prevtail->FreeNext = mid; 1843 } 1844 *freeTailp = mid; 1845 1846 // At this point, mid->FreeNext still refers to its current 1847 // value and another ObjectMonitor's FreeNext field still 1848 // refers to this ObjectMonitor. Those linkages have to be 1849 // cleaned up by the caller who has the complete context. 1850 1851 // We leave _owner == DEFLATER_MARKER and _count < 0 to 1852 // force any racing threads to retry. 1853 return true; // Success, ObjectMonitor has been deflated. 1854 } 1855 1856 // The _owner was changed from DEFLATER_MARKER so we lost the 1857 // race since the ObjectMonitor is now busy. Add back max_jint 1858 // to restore the _count field to its proper value (which may 1859 // not be what we saw above). 1860 Atomic::add(max_jint, &mid->_count); 1861 1862 assert(mid->_count >= 0, "_count should not be negative"); 1863 } 1864 1865 // The _count was no longer 0 so we lost the race since the 1866 // ObjectMonitor is now busy. 1867 assert(mid->_owner != DEFLATER_MARKER, "should no longer be set"); 1868 } 1869 1870 // The _owner field is no longer NULL so we lost the race since the 1871 // ObjectMonitor is now busy. 1872 return false; 1873 } 1874 1875 // Walk a given monitor list, and deflate idle monitors 1876 // The given list could be a per-thread list or a global list 1877 // Caller acquires gListLock as needed. 1878 // 1879 // In the case of parallel processing of thread local monitor lists, 1880 // work is done by Threads::parallel_threads_do() which ensures that 1881 // each Java thread is processed by exactly one worker thread, and 1882 // thus avoid conflicts that would arise when worker threads would 1883 // process the same monitor lists concurrently. 1884 // 1885 // See also ParallelSPCleanupTask and 1886 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 1887 // Threads::parallel_java_threads_do() in thread.cpp. 1888 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1889 ObjectMonitor** freeHeadp, 1890 ObjectMonitor** freeTailp) { 1891 ObjectMonitor* mid; 1892 ObjectMonitor* next; 1893 ObjectMonitor* cur_mid_in_use = NULL; 1894 int deflated_count = 0; 1895 1896 for (mid = *listHeadp; mid != NULL;) { 1897 oop obj = (oop) mid->object(); 1898 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1899 // if deflate_monitor succeeded, 1900 // extract from per-thread in-use list 1901 if (mid == *listHeadp) { 1902 *listHeadp = mid->FreeNext; 1903 } else if (cur_mid_in_use != NULL) { 1904 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1905 } 1906 next = mid->FreeNext; 1907 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1908 mid = next; 1909 deflated_count++; 1910 } else { 1911 cur_mid_in_use = mid; 1912 mid = mid->FreeNext; 1913 } 1914 } 1915 return deflated_count; 1916 } 1917 1918 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 1919 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 1920 // list could be a per-thread in-use list or the global in-use list. 1921 // Caller acquires gListLock as appropriate. If a safepoint has started, 1922 // then we save state via savedMidInUsep and return to the caller to 1923 // honor the safepoint. 1924 // 1925 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** listHeadp, 1926 ObjectMonitor** freeHeadp, 1927 ObjectMonitor** freeTailp, 1928 ObjectMonitor** savedMidInUsep) { 1929 assert(AsyncDeflateIdleMonitors, "sanity check"); 1930 assert(Thread::current()->is_Java_thread(), "precondition"); 1931 1932 ObjectMonitor* mid; 1933 ObjectMonitor* next; 1934 ObjectMonitor* cur_mid_in_use = NULL; 1935 int deflated_count = 0; 1936 1937 if (*savedMidInUsep == NULL) { 1938 // No saved state so start at the beginning. 1939 mid = *listHeadp; 1940 } else { 1941 // We're restarting after a safepoint so restore the necessary state 1942 // before we resume. 1943 cur_mid_in_use = *savedMidInUsep; 1944 mid = cur_mid_in_use->FreeNext; 1945 } 1946 while (mid != NULL) { 1947 // Only try to deflate if there is an associated Java object and if 1948 // mid is old (is not newly allocated and is not newly freed). 1949 if (mid->object() != NULL && mid->is_old() && 1950 deflate_monitor_using_JT(mid, freeHeadp, freeTailp)) { 1951 // Deflation succeeded so update the in-use list. 1952 if (mid == *listHeadp) { 1953 *listHeadp = mid->FreeNext; 1954 } else if (cur_mid_in_use != NULL) { 1955 // Maintain the current in-use list. 1956 cur_mid_in_use->FreeNext = mid->FreeNext; 1957 } 1958 next = mid->FreeNext; 1959 mid->FreeNext = NULL; 1960 // At this point mid is disconnected from the in-use list 1961 // and is the current tail in the freeHeadp list. 1962 mid = next; 1963 deflated_count++; 1964 } else { 1965 // mid is considered in-use if it does not have an associated 1966 // Java object or mid is not old or deflation did not succeed. 1967 // A mid->is_new() node can be seen here when it is freshly returned 1968 // by omAlloc() (and skips the deflation code path). 1969 // A mid->is_old() node can be seen here when deflation failed. 1970 // A mid->is_free() node can be seen here when a fresh node from 1971 // omAlloc() is released by omRelease() due to losing the race 1972 // in inflate(). 1973 1974 if (mid->object() != NULL && mid->is_new()) { 1975 // mid has an associated Java object and has now been seen 1976 // as newly allocated so mark it as "old". 1977 mid->set_allocation_state(ObjectMonitor::Old); 1978 } 1979 cur_mid_in_use = mid; 1980 mid = mid->FreeNext; 1981 1982 if (SafepointSynchronize::is_synchronizing() && 1983 cur_mid_in_use != *listHeadp && cur_mid_in_use->is_old()) { 1984 // If a safepoint has started and cur_mid_in_use is not the list 1985 // head and is old, then it is safe to use as saved state. Return 1986 // to the caller so gListLock can be dropped as appropriate 1987 // before blocking. 1988 *savedMidInUsep = cur_mid_in_use; 1989 return deflated_count; 1990 } 1991 } 1992 } 1993 // We finished the list without a safepoint starting so there's 1994 // no need to save state. 1995 *savedMidInUsep = NULL; 1996 return deflated_count; 1997 } 1998 1999 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2000 counters->nInuse = 0; // currently associated with objects 2001 counters->nInCirculation = 0; // extant 2002 counters->nScavenged = 0; // reclaimed (global and per-thread) 2003 counters->perThreadScavenged = 0; // per-thread scavenge total 2004 counters->perThreadTimes = 0.0; // per-thread scavenge times 2005 } 2006 2007 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2008 assert(!AsyncDeflateIdleMonitors, "sanity check"); 2009 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2010 bool deflated = false; 2011 2012 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 2013 ObjectMonitor * freeTailp = NULL; 2014 elapsedTimer timer; 2015 2016 if (log_is_enabled(Info, monitorinflation)) { 2017 timer.start(); 2018 } 2019 2020 // Prevent omFlush from changing mids in Thread dtor's during deflation 2021 // And in case the vm thread is acquiring a lock during a safepoint 2022 // See e.g. 6320749 2023 Thread::muxAcquire(&gListLock, "deflate_idle_monitors"); 2024 2025 // Note: the thread-local monitors lists get deflated in 2026 // a separate pass. See deflate_thread_local_monitors(). 2027 2028 // For moribund threads, scan gOmInUseList 2029 int deflated_count = 0; 2030 if (gOmInUseList) { 2031 counters->nInCirculation += gOmInUseCount; 2032 deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 2033 gOmInUseCount -= deflated_count; 2034 counters->nScavenged += deflated_count; 2035 counters->nInuse += gOmInUseCount; 2036 } 2037 2038 // Move the scavenged monitors back to the global free list. 2039 if (freeHeadp != NULL) { 2040 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant"); 2041 assert(freeTailp->FreeNext == NULL, "invariant"); 2042 // constant-time list splice - prepend scavenged segment to gFreeList 2043 freeTailp->FreeNext = gFreeList; 2044 gFreeList = freeHeadp; 2045 } 2046 Thread::muxRelease(&gListLock); 2047 timer.stop(); 2048 2049 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2050 LogStreamHandle(Info, monitorinflation) lsh_info; 2051 LogStream * ls = NULL; 2052 if (log_is_enabled(Debug, monitorinflation)) { 2053 ls = &lsh_debug; 2054 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2055 ls = &lsh_info; 2056 } 2057 if (ls != NULL) { 2058 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2059 } 2060 } 2061 2062 // Deflate global idle ObjectMonitors using a JavaThread. 2063 // 2064 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2065 assert(AsyncDeflateIdleMonitors, "sanity check"); 2066 assert(Thread::current()->is_Java_thread(), "precondition"); 2067 JavaThread * cur_jt = JavaThread::current(); 2068 2069 _gOmShouldDeflateIdleMonitors = false; 2070 2071 int deflated_count = 0; 2072 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors 2073 ObjectMonitor * freeTailp = NULL; 2074 ObjectMonitor * savedMidInUsep = NULL; 2075 elapsedTimer timer; 2076 2077 if (log_is_enabled(Info, monitorinflation)) { 2078 timer.start(); 2079 } 2080 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)"); 2081 OM_PERFDATA_OP(MonExtant, set_value(gOmInUseCount)); 2082 2083 do { 2084 int local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp, &savedMidInUsep); 2085 gOmInUseCount -= local_deflated_count; 2086 deflated_count += local_deflated_count; 2087 2088 if (freeHeadp != NULL) { 2089 // Move the scavenged ObjectMonitors to the global free list. 2090 guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count); 2091 assert(freeTailp->FreeNext == NULL, "invariant"); 2092 2093 // Constant-time list splice - prepend scavenged segment to gFreeList. 2094 freeTailp->FreeNext = gFreeList; 2095 gFreeList = freeHeadp; 2096 2097 gMonitorFreeCount += local_deflated_count; 2098 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2099 } 2100 2101 if (savedMidInUsep != NULL) { 2102 // deflate_monitor_list_using_JT() detected a safepoint starting. 2103 Thread::muxRelease(&gListLock); 2104 timer.stop(); 2105 { 2106 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2107 assert(SafepointSynchronize::is_synchronizing(), "sanity check"); 2108 ThreadBlockInVM blocker(cur_jt); 2109 } 2110 // Prepare for another loop after the safepoint. 2111 freeHeadp = NULL; 2112 freeTailp = NULL; 2113 if (log_is_enabled(Info, monitorinflation)) { 2114 timer.start(); 2115 } 2116 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(2)"); 2117 } 2118 } while (savedMidInUsep != NULL); 2119 Thread::muxRelease(&gListLock); 2120 timer.stop(); 2121 2122 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2123 LogStreamHandle(Info, monitorinflation) lsh_info; 2124 LogStream * ls = NULL; 2125 if (log_is_enabled(Debug, monitorinflation)) { 2126 ls = &lsh_debug; 2127 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2128 ls = &lsh_info; 2129 } 2130 if (ls != NULL) { 2131 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2132 } 2133 } 2134 2135 // Deflate per-thread idle ObjectMonitors using a JavaThread. 2136 // 2137 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() { 2138 assert(AsyncDeflateIdleMonitors, "sanity check"); 2139 assert(Thread::current()->is_Java_thread(), "precondition"); 2140 JavaThread * cur_jt = JavaThread::current(); 2141 2142 cur_jt->omShouldDeflateIdleMonitors = false; 2143 2144 int deflated_count = 0; 2145 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors 2146 ObjectMonitor * freeTailp = NULL; 2147 ObjectMonitor * savedMidInUsep = NULL; 2148 elapsedTimer timer; 2149 2150 if (log_is_enabled(Info, monitorinflation)) { 2151 timer.start(); 2152 } 2153 2154 OM_PERFDATA_OP(MonExtant, inc(cur_jt->omInUseCount)); 2155 do { 2156 int local_deflated_count = deflate_monitor_list_using_JT(cur_jt->omInUseList_addr(), &freeHeadp, &freeTailp, &savedMidInUsep); 2157 cur_jt->omInUseCount -= local_deflated_count; 2158 deflated_count += local_deflated_count; 2159 2160 if (freeHeadp != NULL) { 2161 // Move the scavenged ObjectMonitors to the global free list. 2162 Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT"); 2163 guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count); 2164 assert(freeTailp->FreeNext == NULL, "invariant"); 2165 2166 // Constant-time list splice - prepend scavenged segment to gFreeList. 2167 freeTailp->FreeNext = gFreeList; 2168 gFreeList = freeHeadp; 2169 2170 gMonitorFreeCount += local_deflated_count; 2171 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2172 Thread::muxRelease(&gListLock); 2173 // Prepare for another loop on the current JavaThread. 2174 freeHeadp = NULL; 2175 freeTailp = NULL; 2176 } 2177 timer.stop(); 2178 2179 if (savedMidInUsep != NULL) { 2180 // deflate_monitor_list_using_JT() detected a safepoint starting. 2181 { 2182 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(cur_jt)); 2183 assert(SafepointSynchronize::is_synchronizing(), "sanity check"); 2184 ThreadBlockInVM blocker(cur_jt); 2185 } 2186 // Prepare for another loop on the current JavaThread after 2187 // the safepoint. 2188 if (log_is_enabled(Info, monitorinflation)) { 2189 timer.start(); 2190 } 2191 } 2192 } while (savedMidInUsep != NULL); 2193 2194 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2195 LogStreamHandle(Info, monitorinflation) lsh_info; 2196 LogStream * ls = NULL; 2197 if (log_is_enabled(Debug, monitorinflation)) { 2198 ls = &lsh_debug; 2199 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2200 ls = &lsh_info; 2201 } 2202 if (ls != NULL) { 2203 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(cur_jt), timer.seconds(), deflated_count); 2204 } 2205 } 2206 2207 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2208 // Report the cumulative time for deflating each thread's idle 2209 // monitors. Note: if the work is split among more than one 2210 // worker thread, then the reported time will likely be more 2211 // than a beginning to end measurement of the phase. 2212 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle 2213 // monitors at a safepoint when a special cleanup has been requested. 2214 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged); 2215 2216 bool needs_special_cleanup = is_cleanup_requested(); 2217 if (!AsyncDeflateIdleMonitors || needs_special_cleanup) { 2218 // AsyncDeflateIdleMonitors does not use these counters unless 2219 // there is a special cleanup request. 2220 2221 gMonitorFreeCount += counters->nScavenged; 2222 2223 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); 2224 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); 2225 } 2226 2227 if (log_is_enabled(Debug, monitorinflation)) { 2228 // exit_globals()'s call to audit_and_print_stats() is done 2229 // at the Info level. 2230 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2231 } else if (log_is_enabled(Info, monitorinflation)) { 2232 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors"); 2233 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, " 2234 "gMonitorFreeCount=%d", gMonitorPopulation, 2235 gOmInUseCount, gMonitorFreeCount); 2236 Thread::muxRelease(&gListLock); 2237 } 2238 2239 ForceMonitorScavenge = 0; // Reset 2240 GVars.stwRandom = os::random(); 2241 GVars.stwCycle++; 2242 if (needs_special_cleanup) { 2243 set_is_cleanup_requested(false); // special clean up is done 2244 } 2245 } 2246 2247 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2248 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2249 2250 if (AsyncDeflateIdleMonitors) { 2251 // Nothing to do when idle ObjectMonitors are deflated using a 2252 // JavaThread unless a special cleanup has been requested. 2253 if (!is_cleanup_requested()) { 2254 return; 2255 } 2256 } 2257 2258 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 2259 ObjectMonitor * freeTailp = NULL; 2260 elapsedTimer timer; 2261 2262 if (log_is_enabled(Info, safepoint, cleanup) || 2263 log_is_enabled(Info, monitorinflation)) { 2264 timer.start(); 2265 } 2266 2267 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp); 2268 2269 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors(1)"); 2270 2271 // Adjust counters 2272 counters->nInCirculation += thread->omInUseCount; 2273 thread->omInUseCount -= deflated_count; 2274 counters->nScavenged += deflated_count; 2275 counters->nInuse += thread->omInUseCount; 2276 counters->perThreadScavenged += deflated_count; 2277 2278 // Move the scavenged monitors back to the global free list. 2279 if (freeHeadp != NULL) { 2280 guarantee(freeTailp != NULL && deflated_count > 0, "invariant"); 2281 assert(freeTailp->FreeNext == NULL, "invariant"); 2282 2283 // constant-time list splice - prepend scavenged segment to gFreeList 2284 freeTailp->FreeNext = gFreeList; 2285 gFreeList = freeHeadp; 2286 } 2287 Thread::muxRelease(&gListLock); 2288 2289 timer.stop(); 2290 if (log_is_enabled(Info, safepoint, cleanup)) { 2291 // Only safepoint logging cares about cumulative perThreadTimes 2292 // and we don't count this muxAcquire() that we have to do in 2293 // order to safely update perThreadTimes. 2294 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors(2)"); 2295 counters->perThreadTimes += timer.seconds(); 2296 Thread::muxRelease(&gListLock); 2297 } 2298 2299 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2300 LogStreamHandle(Info, monitorinflation) lsh_info; 2301 LogStream * ls = NULL; 2302 if (log_is_enabled(Debug, monitorinflation)) { 2303 ls = &lsh_debug; 2304 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2305 ls = &lsh_info; 2306 } 2307 if (ls != NULL) { 2308 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2309 } 2310 } 2311 2312 // Monitor cleanup on JavaThread::exit 2313 2314 // Iterate through monitor cache and attempt to release thread's monitors 2315 // Gives up on a particular monitor if an exception occurs, but continues 2316 // the overall iteration, swallowing the exception. 2317 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2318 private: 2319 TRAPS; 2320 2321 public: 2322 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2323 void do_monitor(ObjectMonitor* mid) { 2324 if (mid->owner() == THREAD) { 2325 (void)mid->complete_exit(CHECK); 2326 } 2327 } 2328 }; 2329 2330 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2331 // ignored. This is meant to be called during JNI thread detach which assumes 2332 // all remaining monitors are heavyweight. All exceptions are swallowed. 2333 // Scanning the extant monitor list can be time consuming. 2334 // A simple optimization is to add a per-thread flag that indicates a thread 2335 // called jni_monitorenter() during its lifetime. 2336 // 2337 // Instead of No_Savepoint_Verifier it might be cheaper to 2338 // use an idiom of the form: 2339 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2340 // <code that must not run at safepoint> 2341 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2342 // Since the tests are extremely cheap we could leave them enabled 2343 // for normal product builds. 2344 2345 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2346 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2347 NoSafepointVerifier nsv; 2348 ReleaseJavaMonitorsClosure rjmc(THREAD); 2349 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 2350 ObjectSynchronizer::monitors_iterate(&rjmc); 2351 Thread::muxRelease(&gListLock); 2352 THREAD->clear_pending_exception(); 2353 } 2354 2355 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2356 switch (cause) { 2357 case inflate_cause_vm_internal: return "VM Internal"; 2358 case inflate_cause_monitor_enter: return "Monitor Enter"; 2359 case inflate_cause_wait: return "Monitor Wait"; 2360 case inflate_cause_notify: return "Monitor Notify"; 2361 case inflate_cause_hash_code: return "Monitor Hash Code"; 2362 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2363 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2364 default: 2365 ShouldNotReachHere(); 2366 } 2367 return "Unknown"; 2368 } 2369 2370 //------------------------------------------------------------------------------ 2371 // Debugging code 2372 2373 u_char* ObjectSynchronizer::get_gvars_addr() { 2374 return (u_char*)&GVars; 2375 } 2376 2377 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() { 2378 return (u_char*)&GVars.hcSequence; 2379 } 2380 2381 size_t ObjectSynchronizer::get_gvars_size() { 2382 return sizeof(SharedGlobals); 2383 } 2384 2385 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() { 2386 return (u_char*)&GVars.stwRandom; 2387 } 2388 2389 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2390 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2391 2392 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2393 LogStreamHandle(Info, monitorinflation) lsh_info; 2394 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2395 LogStream * ls = NULL; 2396 if (log_is_enabled(Trace, monitorinflation)) { 2397 ls = &lsh_trace; 2398 } else if (log_is_enabled(Debug, monitorinflation)) { 2399 ls = &lsh_debug; 2400 } else if (log_is_enabled(Info, monitorinflation)) { 2401 ls = &lsh_info; 2402 } 2403 assert(ls != NULL, "sanity check"); 2404 2405 if (!on_exit) { 2406 // Not at VM exit so grab the global list lock. 2407 Thread::muxAcquire(&gListLock, "audit_and_print_stats"); 2408 } 2409 2410 // Log counts for the global and per-thread monitor lists: 2411 int chkMonitorPopulation = log_monitor_list_counts(ls); 2412 int error_cnt = 0; 2413 2414 ls->print_cr("Checking global lists:"); 2415 2416 // Check gMonitorPopulation: 2417 if (gMonitorPopulation == chkMonitorPopulation) { 2418 ls->print_cr("gMonitorPopulation=%d equals chkMonitorPopulation=%d", 2419 gMonitorPopulation, chkMonitorPopulation); 2420 } else { 2421 ls->print_cr("ERROR: gMonitorPopulation=%d is not equal to " 2422 "chkMonitorPopulation=%d", gMonitorPopulation, 2423 chkMonitorPopulation); 2424 error_cnt++; 2425 } 2426 2427 // Check gOmInUseList and gOmInUseCount: 2428 chk_global_in_use_list_and_count(ls, &error_cnt); 2429 2430 // Check gFreeList and gMonitorFreeCount: 2431 chk_global_free_list_and_count(ls, &error_cnt); 2432 2433 if (!on_exit) { 2434 Thread::muxRelease(&gListLock); 2435 } 2436 2437 ls->print_cr("Checking per-thread lists:"); 2438 2439 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2440 // Check omInUseList and omInUseCount: 2441 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2442 2443 // Check omFreeList and omFreeCount: 2444 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2445 } 2446 2447 if (error_cnt == 0) { 2448 ls->print_cr("No errors found in monitor list checks."); 2449 } else { 2450 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2451 } 2452 2453 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2454 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2455 // When exiting this log output is at the Info level. When called 2456 // at a safepoint, this log output is at the Trace level since 2457 // there can be a lot of it. 2458 log_in_use_monitor_details(ls, on_exit); 2459 } 2460 2461 ls->flush(); 2462 2463 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2464 } 2465 2466 // Check a free monitor entry; log any errors. 2467 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n, 2468 outputStream * out, int *error_cnt_p) { 2469 if ((!AsyncDeflateIdleMonitors && n->is_busy()) || 2470 (AsyncDeflateIdleMonitors && n->is_busy_async())) { 2471 if (jt != NULL) { 2472 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2473 ": free per-thread monitor must not be busy.", p2i(jt), 2474 p2i(n)); 2475 } else { 2476 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2477 "must not be busy.", p2i(n)); 2478 } 2479 *error_cnt_p = *error_cnt_p + 1; 2480 } 2481 if (n->header() != NULL) { 2482 if (jt != NULL) { 2483 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2484 ": free per-thread monitor must have NULL _header " 2485 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 2486 p2i(n->header())); 2487 } else { 2488 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2489 "must have NULL _header field: _header=" INTPTR_FORMAT, 2490 p2i(n), p2i(n->header())); 2491 } 2492 *error_cnt_p = *error_cnt_p + 1; 2493 } 2494 if (n->object() != NULL) { 2495 if (jt != NULL) { 2496 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2497 ": free per-thread monitor must have NULL _object " 2498 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 2499 p2i(n->object())); 2500 } else { 2501 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2502 "must have NULL _object field: _object=" INTPTR_FORMAT, 2503 p2i(n), p2i(n->object())); 2504 } 2505 *error_cnt_p = *error_cnt_p + 1; 2506 } 2507 } 2508 2509 // Check the global free list and count; log the results of the checks. 2510 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 2511 int *error_cnt_p) { 2512 int chkMonitorFreeCount = 0; 2513 for (ObjectMonitor * n = gFreeList; n != NULL; n = n->FreeNext) { 2514 chk_free_entry(NULL /* jt */, n, out, error_cnt_p); 2515 chkMonitorFreeCount++; 2516 } 2517 if (gMonitorFreeCount == chkMonitorFreeCount) { 2518 out->print_cr("gMonitorFreeCount=%d equals chkMonitorFreeCount=%d", 2519 gMonitorFreeCount, chkMonitorFreeCount); 2520 } else { 2521 out->print_cr("ERROR: gMonitorFreeCount=%d is not equal to " 2522 "chkMonitorFreeCount=%d", gMonitorFreeCount, 2523 chkMonitorFreeCount); 2524 *error_cnt_p = *error_cnt_p + 1; 2525 } 2526 } 2527 2528 // Check the global in-use list and count; log the results of the checks. 2529 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 2530 int *error_cnt_p) { 2531 int chkOmInUseCount = 0; 2532 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 2533 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p); 2534 chkOmInUseCount++; 2535 } 2536 if (gOmInUseCount == chkOmInUseCount) { 2537 out->print_cr("gOmInUseCount=%d equals chkOmInUseCount=%d", gOmInUseCount, 2538 chkOmInUseCount); 2539 } else { 2540 out->print_cr("ERROR: gOmInUseCount=%d is not equal to chkOmInUseCount=%d", 2541 gOmInUseCount, chkOmInUseCount); 2542 *error_cnt_p = *error_cnt_p + 1; 2543 } 2544 } 2545 2546 // Check an in-use monitor entry; log any errors. 2547 void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n, 2548 outputStream * out, int *error_cnt_p) { 2549 if (n->header() == NULL) { 2550 if (jt != NULL) { 2551 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2552 ": in-use per-thread monitor must have non-NULL _header " 2553 "field.", p2i(jt), p2i(n)); 2554 } else { 2555 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2556 "must have non-NULL _header field.", p2i(n)); 2557 } 2558 *error_cnt_p = *error_cnt_p + 1; 2559 } 2560 if (n->object() == NULL) { 2561 if (jt != NULL) { 2562 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2563 ": in-use per-thread monitor must have non-NULL _object " 2564 "field.", p2i(jt), p2i(n)); 2565 } else { 2566 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2567 "must have non-NULL _object field.", p2i(n)); 2568 } 2569 *error_cnt_p = *error_cnt_p + 1; 2570 } 2571 const oop obj = (oop)n->object(); 2572 const markOop mark = obj->mark(); 2573 if (!mark->has_monitor()) { 2574 if (jt != NULL) { 2575 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2576 ": in-use per-thread monitor's object does not think " 2577 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 2578 INTPTR_FORMAT, p2i(jt), p2i(n), p2i((address)obj), 2579 p2i((address)mark)); 2580 } else { 2581 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2582 "monitor's object does not think it has a monitor: obj=" 2583 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 2584 p2i((address)obj), p2i((address)mark)); 2585 } 2586 *error_cnt_p = *error_cnt_p + 1; 2587 } 2588 ObjectMonitor * const obj_mon = mark->monitor(); 2589 if (n != obj_mon) { 2590 if (jt != NULL) { 2591 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2592 ": in-use per-thread monitor's object does not refer " 2593 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 2594 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 2595 p2i(n), p2i((address)obj), p2i((address)mark), 2596 p2i((address)obj_mon)); 2597 } else { 2598 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2599 "monitor's object does not refer to the same monitor: obj=" 2600 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 2601 INTPTR_FORMAT, p2i(n), p2i((address)obj), 2602 p2i((address)mark), p2i((address)obj_mon)); 2603 } 2604 *error_cnt_p = *error_cnt_p + 1; 2605 } 2606 } 2607 2608 // Check the thread's free list and count; log the results of the checks. 2609 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 2610 outputStream * out, 2611 int *error_cnt_p) { 2612 int chkOmFreeCount = 0; 2613 for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) { 2614 chk_free_entry(jt, n, out, error_cnt_p); 2615 chkOmFreeCount++; 2616 } 2617 if (jt->omFreeCount == chkOmFreeCount) { 2618 out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals " 2619 "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount); 2620 } else { 2621 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not " 2622 "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, 2623 chkOmFreeCount); 2624 *error_cnt_p = *error_cnt_p + 1; 2625 } 2626 } 2627 2628 // Check the thread's in-use list and count; log the results of the checks. 2629 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 2630 outputStream * out, 2631 int *error_cnt_p) { 2632 int chkOmInUseCount = 0; 2633 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2634 chk_in_use_entry(jt, n, out, error_cnt_p); 2635 chkOmInUseCount++; 2636 } 2637 if (jt->omInUseCount == chkOmInUseCount) { 2638 out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals " 2639 "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2640 chkOmInUseCount); 2641 } else { 2642 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not " 2643 "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2644 chkOmInUseCount); 2645 *error_cnt_p = *error_cnt_p + 1; 2646 } 2647 } 2648 2649 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 2650 // flags indicate why the entry is in-use, 'object' and 'object type' 2651 // indicate the associated object and its type. 2652 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out, 2653 bool on_exit) { 2654 if (!on_exit) { 2655 // Not at VM exit so grab the global list lock. 2656 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details"); 2657 } 2658 2659 if (gOmInUseCount > 0) { 2660 out->print_cr("In-use global monitor info:"); 2661 out->print_cr("(B -> is_busy, H -> has hashcode, L -> lock status)"); 2662 out->print_cr("%18s %s %7s %18s %18s", 2663 "monitor", "BHL", "ref_cnt", "object", "object type"); 2664 out->print_cr("================== === ======= ================== =================="); 2665 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 2666 const oop obj = (oop) n->object(); 2667 const markOop mark = n->header(); 2668 ResourceMark rm; 2669 out->print_cr(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", 2670 p2i(n), n->is_busy() != 0, mark->hash() != 0, 2671 n->owner() != NULL, (int)n->ref_count(), p2i(obj), 2672 obj->klass()->external_name()); 2673 } 2674 } 2675 2676 if (!on_exit) { 2677 Thread::muxRelease(&gListLock); 2678 } 2679 2680 out->print_cr("In-use per-thread monitor info:"); 2681 out->print_cr("(B -> is_busy, H -> has hashcode, L -> lock status)"); 2682 out->print_cr("%18s %18s %s %7s %18s %18s", 2683 "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); 2684 out->print_cr("================== ================== === ======= ================== =================="); 2685 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2686 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2687 const oop obj = (oop) n->object(); 2688 const markOop mark = n->header(); 2689 ResourceMark rm; 2690 out->print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " 2691 INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0, 2692 mark->hash() != 0, n->owner() != NULL, (int)n->ref_count(), 2693 p2i(obj), obj->klass()->external_name()); 2694 } 2695 } 2696 2697 out->flush(); 2698 } 2699 2700 // Log counts for the global and per-thread monitor lists and return 2701 // the population count. 2702 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 2703 int popCount = 0; 2704 out->print_cr("%18s %10s %10s %10s", 2705 "Global Lists:", "InUse", "Free", "Total"); 2706 out->print_cr("================== ========== ========== =========="); 2707 out->print_cr("%18s %10d %10d %10d", "", 2708 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation); 2709 popCount += gOmInUseCount + gMonitorFreeCount; 2710 2711 out->print_cr("%18s %10s %10s %10s", 2712 "Per-Thread Lists:", "InUse", "Free", "Provision"); 2713 out->print_cr("================== ========== ========== =========="); 2714 2715 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2716 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 2717 jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision); 2718 popCount += jt->omInUseCount + jt->omFreeCount; 2719 } 2720 return popCount; 2721 } 2722 2723 #ifndef PRODUCT 2724 2725 // Check if monitor belongs to the monitor cache 2726 // The list is grow-only so it's *relatively* safe to traverse 2727 // the list of extant blocks without taking a lock. 2728 2729 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 2730 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 2731 while (block != NULL) { 2732 assert(block->object() == CHAINMARKER, "must be a block header"); 2733 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 2734 address mon = (address)monitor; 2735 address blk = (address)block; 2736 size_t diff = mon - blk; 2737 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 2738 return 1; 2739 } 2740 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 2741 } 2742 return 0; 2743 } 2744 2745 #endif