1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/interfaceSupport.inline.hpp" 41 #include "runtime/mutexLocker.hpp" 42 #include "runtime/objectMonitor.hpp" 43 #include "runtime/objectMonitor.inline.hpp" 44 #include "runtime/osThread.hpp" 45 #include "runtime/safepointVerifiers.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "runtime/synchronizer.hpp" 49 #include "runtime/thread.inline.hpp" 50 #include "runtime/timer.hpp" 51 #include "runtime/vframe.hpp" 52 #include "runtime/vmThread.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/dtrace.hpp" 55 #include "utilities/events.hpp" 56 #include "utilities/preserveException.hpp" 57 58 // The "core" versions of monitor enter and exit reside in this file. 59 // The interpreter and compilers contain specialized transliterated 60 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 61 // for instance. If you make changes here, make sure to modify the 62 // interpreter, and both C1 and C2 fast-path inline locking code emission. 63 // 64 // ----------------------------------------------------------------------------- 65 66 #ifdef DTRACE_ENABLED 67 68 // Only bother with this argument setup if dtrace is available 69 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 70 71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 72 char* bytes = NULL; \ 73 int len = 0; \ 74 jlong jtid = SharedRuntime::get_java_tid(thread); \ 75 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 76 if (klassname != NULL) { \ 77 bytes = (char*)klassname->bytes(); \ 78 len = klassname->utf8_length(); \ 79 } 80 81 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 82 { \ 83 if (DTraceMonitorProbes) { \ 84 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 85 HOTSPOT_MONITOR_WAIT(jtid, \ 86 (uintptr_t)(monitor), bytes, len, (millis)); \ 87 } \ 88 } 89 90 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 91 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 92 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 93 94 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 95 { \ 96 if (DTraceMonitorProbes) { \ 97 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 98 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 99 (uintptr_t)(monitor), bytes, len); \ 100 } \ 101 } 102 103 #else // ndef DTRACE_ENABLED 104 105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 107 108 #endif // ndef DTRACE_ENABLED 109 110 // This exists only as a workaround of dtrace bug 6254741 111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 113 return 0; 114 } 115 116 #define NINFLATIONLOCKS 256 117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 118 119 // global list of blocks of monitors 120 PaddedObjectMonitor* volatile ObjectSynchronizer::g_block_list = NULL; 121 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 122 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; 123 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 124 125 // Global ObjectMonitor free list. Newly allocated and deflated 126 // ObjectMonitors are prepended here. 127 static ObjectMonitor* volatile g_free_list = NULL; 128 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 129 // ObjectMonitors on its per-thread in-use list are prepended here. 130 static ObjectMonitor* volatile g_om_in_use_list = NULL; 131 132 static volatile intptr_t gListLock = 0; // protects global monitor lists 133 static volatile int g_om_free_count = 0; // # on g_free_list 134 static volatile int g_om_in_use_count = 0; // # on g_om_in_use_list 135 static volatile int g_om_population = 0; // # Extant -- in circulation 136 137 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 138 139 140 // =====================> Quick functions 141 142 // The quick_* forms are special fast-path variants used to improve 143 // performance. In the simplest case, a "quick_*" implementation could 144 // simply return false, in which case the caller will perform the necessary 145 // state transitions and call the slow-path form. 146 // The fast-path is designed to handle frequently arising cases in an efficient 147 // manner and is just a degenerate "optimistic" variant of the slow-path. 148 // returns true -- to indicate the call was satisfied. 149 // returns false -- to indicate the call needs the services of the slow-path. 150 // A no-loitering ordinance is in effect for code in the quick_* family 151 // operators: safepoints or indefinite blocking (blocking that might span a 152 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 153 // entry. 154 // 155 // Consider: An interesting optimization is to have the JIT recognize the 156 // following common idiom: 157 // synchronized (someobj) { .... ; notify(); } 158 // That is, we find a notify() or notifyAll() call that immediately precedes 159 // the monitorexit operation. In that case the JIT could fuse the operations 160 // into a single notifyAndExit() runtime primitive. 161 162 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 163 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 164 assert(self->is_Java_thread(), "invariant"); 165 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 166 NoSafepointVerifier nsv; 167 if (obj == NULL) return false; // slow-path for invalid obj 168 const markWord mark = obj->mark(); 169 170 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 171 // Degenerate notify 172 // stack-locked by caller so by definition the implied waitset is empty. 173 return true; 174 } 175 176 if (mark.has_monitor()) { 177 ObjectMonitor* const mon = mark.monitor(); 178 assert(oopDesc::equals((oop) mon->object(), obj), "invariant"); 179 if (mon->owner() != self) return false; // slow-path for IMS exception 180 181 if (mon->first_waiter() != NULL) { 182 // We have one or more waiters. Since this is an inflated monitor 183 // that we own, we can transfer one or more threads from the waitset 184 // to the entrylist here and now, avoiding the slow-path. 185 if (all) { 186 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 187 } else { 188 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 189 } 190 int free_count = 0; 191 do { 192 mon->INotify(self); 193 ++free_count; 194 } while (mon->first_waiter() != NULL && all); 195 OM_PERFDATA_OP(Notifications, inc(free_count)); 196 } 197 return true; 198 } 199 200 // biased locking and any other IMS exception states take the slow-path 201 return false; 202 } 203 204 205 // The LockNode emitted directly at the synchronization site would have 206 // been too big if it were to have included support for the cases of inflated 207 // recursive enter and exit, so they go here instead. 208 // Note that we can't safely call AsyncPrintJavaStack() from within 209 // quick_enter() as our thread state remains _in_Java. 210 211 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 212 BasicLock * lock) { 213 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 214 assert(self->is_Java_thread(), "invariant"); 215 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 216 NoSafepointVerifier nsv; 217 if (obj == NULL) return false; // Need to throw NPE 218 219 while (true) { 220 const markWord mark = obj->mark(); 221 222 if (mark.has_monitor()) { 223 ObjectMonitorHandle omh; 224 if (!omh.save_om_ptr(obj, mark)) { 225 // Lost a race with async deflation so try again. 226 assert(AsyncDeflateIdleMonitors, "sanity check"); 227 continue; 228 } 229 ObjectMonitor* const m = omh.om_ptr(); 230 assert(oopDesc::equals((oop) m->object(), obj), "invariant"); 231 Thread* const owner = (Thread *) m->_owner; 232 233 // Lock contention and Transactional Lock Elision (TLE) diagnostics 234 // and observability 235 // Case: light contention possibly amenable to TLE 236 // Case: TLE inimical operations such as nested/recursive synchronization 237 238 if (owner == self) { 239 m->_recursions++; 240 return true; 241 } 242 243 // This Java Monitor is inflated so obj's header will never be 244 // displaced to this thread's BasicLock. Make the displaced header 245 // non-NULL so this BasicLock is not seen as recursive nor as 246 // being locked. We do this unconditionally so that this thread's 247 // BasicLock cannot be mis-interpreted by any stack walkers. For 248 // performance reasons, stack walkers generally first check for 249 // Biased Locking in the object's header, the second check is for 250 // stack-locking in the object's header, the third check is for 251 // recursive stack-locking in the displaced header in the BasicLock, 252 // and last are the inflated Java Monitor (ObjectMonitor) checks. 253 lock->set_displaced_header(markWord::unused_mark()); 254 255 if (owner == NULL && Atomic::replace_if_null(self, &(m->_owner))) { 256 assert(m->_recursions == 0, "invariant"); 257 return true; 258 } 259 260 if (AsyncDeflateIdleMonitors && 261 Atomic::cmpxchg(self, &m->_owner, DEFLATER_MARKER) == DEFLATER_MARKER) { 262 // The deflation protocol finished the first part (setting owner), 263 // but it failed the second part (making ref_count negative) and 264 // bailed. Or the ObjectMonitor was async deflated and reused. 265 // Acquired the monitor. 266 assert(m->_recursions == 0, "invariant"); 267 return true; 268 } 269 } 270 break; 271 } 272 273 // Note that we could inflate in quick_enter. 274 // This is likely a useful optimization 275 // Critically, in quick_enter() we must not: 276 // -- perform bias revocation, or 277 // -- block indefinitely, or 278 // -- reach a safepoint 279 280 return false; // revert to slow-path 281 } 282 283 // ----------------------------------------------------------------------------- 284 // Fast Monitor Enter/Exit 285 // This the fast monitor enter. The interpreter and compiler use 286 // some assembly copies of this code. Make sure update those code 287 // if the following function is changed. The implementation is 288 // extremely sensitive to race condition. Be careful. 289 290 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 291 bool attempt_rebias, TRAPS) { 292 if (UseBiasedLocking) { 293 if (!SafepointSynchronize::is_at_safepoint()) { 294 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 295 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 296 return; 297 } 298 } else { 299 assert(!attempt_rebias, "can not rebias toward VM thread"); 300 BiasedLocking::revoke_at_safepoint(obj); 301 } 302 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 303 } 304 305 slow_enter(obj, lock, THREAD); 306 } 307 308 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 309 markWord mark = object->mark(); 310 // We cannot check for Biased Locking if we are racing an inflation. 311 assert(mark == markWord::INFLATING() || 312 !mark.has_bias_pattern(), "should not see bias pattern here"); 313 314 markWord dhw = lock->displaced_header(); 315 if (dhw.value() == 0) { 316 // If the displaced header is NULL, then this exit matches up with 317 // a recursive enter. No real work to do here except for diagnostics. 318 #ifndef PRODUCT 319 if (mark != markWord::INFLATING()) { 320 // Only do diagnostics if we are not racing an inflation. Simply 321 // exiting a recursive enter of a Java Monitor that is being 322 // inflated is safe; see the has_monitor() comment below. 323 assert(!mark.is_neutral(), "invariant"); 324 assert(!mark.has_locker() || 325 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 326 if (mark.has_monitor()) { 327 // The BasicLock's displaced_header is marked as a recursive 328 // enter and we have an inflated Java Monitor (ObjectMonitor). 329 // This is a special case where the Java Monitor was inflated 330 // after this thread entered the stack-lock recursively. When a 331 // Java Monitor is inflated, we cannot safely walk the Java 332 // Monitor owner's stack and update the BasicLocks because a 333 // Java Monitor can be asynchronously inflated by a thread that 334 // does not own the Java Monitor. 335 ObjectMonitor* m = mark.monitor(); 336 assert(((oop)(m->object()))->mark() == mark, "invariant"); 337 assert(m->is_entered(THREAD), "invariant"); 338 } 339 } 340 #endif 341 return; 342 } 343 344 if (mark == markWord::from_pointer(lock)) { 345 // If the object is stack-locked by the current thread, try to 346 // swing the displaced header from the BasicLock back to the mark. 347 assert(dhw.is_neutral(), "invariant"); 348 if (object->cas_set_mark(dhw, mark) == mark) { 349 return; 350 } 351 } 352 353 // We have to take the slow-path of possible inflation and then exit. 354 ObjectMonitorHandle omh; 355 inflate(&omh, THREAD, object, inflate_cause_vm_internal); 356 omh.om_ptr()->exit(true, THREAD); 357 } 358 359 // ----------------------------------------------------------------------------- 360 // Interpreter/Compiler Slow Case 361 // This routine is used to handle interpreter/compiler slow case 362 // We don't need to use fast path here, because it must have been 363 // failed in the interpreter/compiler code. 364 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 365 markWord mark = obj->mark(); 366 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 367 368 if (mark.is_neutral()) { 369 // Anticipate successful CAS -- the ST of the displaced mark must 370 // be visible <= the ST performed by the CAS. 371 lock->set_displaced_header(mark); 372 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 373 return; 374 } 375 // Fall through to inflate() ... 376 } else if (mark.has_locker() && 377 THREAD->is_lock_owned((address)mark.locker())) { 378 assert(lock != mark.locker(), "must not re-lock the same lock"); 379 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 380 lock->set_displaced_header(markWord::from_pointer(NULL)); 381 return; 382 } 383 384 // The object header will never be displaced to this lock, 385 // so it does not matter what the value is, except that it 386 // must be non-zero to avoid looking like a re-entrant lock, 387 // and must not look locked either. 388 lock->set_displaced_header(markWord::unused_mark()); 389 ObjectMonitorHandle omh; 390 inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); 391 omh.om_ptr()->enter(THREAD); 392 } 393 394 // This routine is used to handle interpreter/compiler slow case 395 // We don't need to use fast path here, because it must have 396 // failed in the interpreter/compiler code. Simply use the heavy 397 // weight monitor should be ok, unless someone find otherwise. 398 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 399 fast_exit(object, lock, THREAD); 400 } 401 402 // ----------------------------------------------------------------------------- 403 // Class Loader support to workaround deadlocks on the class loader lock objects 404 // Also used by GC 405 // complete_exit()/reenter() are used to wait on a nested lock 406 // i.e. to give up an outer lock completely and then re-enter 407 // Used when holding nested locks - lock acquisition order: lock1 then lock2 408 // 1) complete_exit lock1 - saving recursion count 409 // 2) wait on lock2 410 // 3) when notified on lock2, unlock lock2 411 // 4) reenter lock1 with original recursion count 412 // 5) lock lock2 413 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 414 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 415 if (UseBiasedLocking) { 416 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 417 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 418 } 419 420 ObjectMonitorHandle omh; 421 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 422 intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD); 423 return ret_code; 424 } 425 426 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 427 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 428 if (UseBiasedLocking) { 429 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 430 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 431 } 432 433 ObjectMonitorHandle omh; 434 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 435 omh.om_ptr()->reenter(recursion, THREAD); 436 } 437 // ----------------------------------------------------------------------------- 438 // JNI locks on java objects 439 // NOTE: must use heavy weight monitor to handle jni monitor enter 440 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 441 // the current locking is from JNI instead of Java code 442 if (UseBiasedLocking) { 443 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 444 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 445 } 446 THREAD->set_current_pending_monitor_is_from_java(false); 447 ObjectMonitorHandle omh; 448 inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); 449 omh.om_ptr()->enter(THREAD); 450 THREAD->set_current_pending_monitor_is_from_java(true); 451 } 452 453 // NOTE: must use heavy weight monitor to handle jni monitor exit 454 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 455 if (UseBiasedLocking) { 456 Handle h_obj(THREAD, obj); 457 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 458 obj = h_obj(); 459 } 460 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 461 462 ObjectMonitorHandle omh; 463 inflate(&omh, THREAD, obj, inflate_cause_jni_exit); 464 ObjectMonitor* monitor = omh.om_ptr(); 465 // If this thread has locked the object, exit the monitor. We 466 // intentionally do not use CHECK here because we must exit the 467 // monitor even if an exception is pending. 468 if (monitor->check_owner(THREAD)) { 469 monitor->exit(true, THREAD); 470 } 471 } 472 473 // ----------------------------------------------------------------------------- 474 // Internal VM locks on java objects 475 // standard constructor, allows locking failures 476 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 477 _dolock = do_lock; 478 _thread = thread; 479 _thread->check_for_valid_safepoint_state(false); 480 _obj = obj; 481 482 if (_dolock) { 483 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 484 } 485 } 486 487 ObjectLocker::~ObjectLocker() { 488 if (_dolock) { 489 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 490 } 491 } 492 493 494 // ----------------------------------------------------------------------------- 495 // Wait/Notify/NotifyAll 496 // NOTE: must use heavy weight monitor to handle wait() 497 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 498 if (UseBiasedLocking) { 499 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 500 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 501 } 502 if (millis < 0) { 503 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 504 } 505 ObjectMonitorHandle omh; 506 inflate(&omh, THREAD, obj(), inflate_cause_wait); 507 ObjectMonitor* monitor = omh.om_ptr(); 508 509 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 510 monitor->wait(millis, true, THREAD); 511 512 // This dummy call is in place to get around dtrace bug 6254741. Once 513 // that's fixed we can uncomment the following line, remove the call 514 // and change this function back into a "void" func. 515 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 516 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 517 return ret_code; 518 } 519 520 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 521 if (UseBiasedLocking) { 522 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 523 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 524 } 525 if (millis < 0) { 526 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 527 } 528 ObjectMonitorHandle omh; 529 inflate(&omh, THREAD, obj(), inflate_cause_wait); 530 omh.om_ptr()->wait(millis, false, THREAD); 531 } 532 533 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 534 if (UseBiasedLocking) { 535 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 536 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 537 } 538 539 markWord mark = obj->mark(); 540 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 541 return; 542 } 543 ObjectMonitorHandle omh; 544 inflate(&omh, THREAD, obj(), inflate_cause_notify); 545 omh.om_ptr()->notify(THREAD); 546 } 547 548 // NOTE: see comment of notify() 549 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 550 if (UseBiasedLocking) { 551 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 552 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 553 } 554 555 markWord mark = obj->mark(); 556 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 557 return; 558 } 559 ObjectMonitorHandle omh; 560 inflate(&omh, THREAD, obj(), inflate_cause_notify); 561 omh.om_ptr()->notifyAll(THREAD); 562 } 563 564 // ----------------------------------------------------------------------------- 565 // Hash Code handling 566 // 567 // Performance concern: 568 // OrderAccess::storestore() calls release() which at one time stored 0 569 // into the global volatile OrderAccess::dummy variable. This store was 570 // unnecessary for correctness. Many threads storing into a common location 571 // causes considerable cache migration or "sloshing" on large SMP systems. 572 // As such, I avoided using OrderAccess::storestore(). In some cases 573 // OrderAccess::fence() -- which incurs local latency on the executing 574 // processor -- is a better choice as it scales on SMP systems. 575 // 576 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 577 // a discussion of coherency costs. Note that all our current reference 578 // platforms provide strong ST-ST order, so the issue is moot on IA32, 579 // x64, and SPARC. 580 // 581 // As a general policy we use "volatile" to control compiler-based reordering 582 // and explicit fences (barriers) to control for architectural reordering 583 // performed by the CPU(s) or platform. 584 585 struct SharedGlobals { 586 char _pad_prefix[OM_CACHE_LINE_SIZE]; 587 // These are highly shared mostly-read variables. 588 // To avoid false-sharing they need to be the sole occupants of a cache line. 589 volatile int stw_random; 590 volatile int stw_cycle; 591 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 592 // Hot RW variable -- Sequester to avoid false-sharing 593 volatile int hc_sequence; 594 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 595 }; 596 597 static SharedGlobals GVars; 598 static int MonitorScavengeThreshold = 1000000; 599 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 600 601 static markWord read_stable_mark(oop obj) { 602 markWord mark = obj->mark(); 603 if (!mark.is_being_inflated()) { 604 return mark; // normal fast-path return 605 } 606 607 int its = 0; 608 for (;;) { 609 markWord mark = obj->mark(); 610 if (!mark.is_being_inflated()) { 611 return mark; // normal fast-path return 612 } 613 614 // The object is being inflated by some other thread. 615 // The caller of read_stable_mark() must wait for inflation to complete. 616 // Avoid live-lock 617 // TODO: consider calling SafepointSynchronize::do_call_back() while 618 // spinning to see if there's a safepoint pending. If so, immediately 619 // yielding or blocking would be appropriate. Avoid spinning while 620 // there is a safepoint pending. 621 // TODO: add inflation contention performance counters. 622 // TODO: restrict the aggregate number of spinners. 623 624 ++its; 625 if (its > 10000 || !os::is_MP()) { 626 if (its & 1) { 627 os::naked_yield(); 628 } else { 629 // Note that the following code attenuates the livelock problem but is not 630 // a complete remedy. A more complete solution would require that the inflating 631 // thread hold the associated inflation lock. The following code simply restricts 632 // the number of spinners to at most one. We'll have N-2 threads blocked 633 // on the inflationlock, 1 thread holding the inflation lock and using 634 // a yield/park strategy, and 1 thread in the midst of inflation. 635 // A more refined approach would be to change the encoding of INFLATING 636 // to allow encapsulation of a native thread pointer. Threads waiting for 637 // inflation to complete would use CAS to push themselves onto a singly linked 638 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 639 // and calling park(). When inflation was complete the thread that accomplished inflation 640 // would detach the list and set the markword to inflated with a single CAS and 641 // then for each thread on the list, set the flag and unpark() the thread. 642 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 643 // wakes at most one thread whereas we need to wake the entire list. 644 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 645 int YieldThenBlock = 0; 646 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 647 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 648 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 649 while (obj->mark() == markWord::INFLATING()) { 650 // Beware: NakedYield() is advisory and has almost no effect on some platforms 651 // so we periodically call self->_ParkEvent->park(1). 652 // We use a mixed spin/yield/block mechanism. 653 if ((YieldThenBlock++) >= 16) { 654 Thread::current()->_ParkEvent->park(1); 655 } else { 656 os::naked_yield(); 657 } 658 } 659 Thread::muxRelease(gInflationLocks + ix); 660 } 661 } else { 662 SpinPause(); // SMP-polite spinning 663 } 664 } 665 } 666 667 // hashCode() generation : 668 // 669 // Possibilities: 670 // * MD5Digest of {obj,stw_random} 671 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 672 // * A DES- or AES-style SBox[] mechanism 673 // * One of the Phi-based schemes, such as: 674 // 2654435761 = 2^32 * Phi (golden ratio) 675 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 676 // * A variation of Marsaglia's shift-xor RNG scheme. 677 // * (obj ^ stw_random) is appealing, but can result 678 // in undesirable regularity in the hashCode values of adjacent objects 679 // (objects allocated back-to-back, in particular). This could potentially 680 // result in hashtable collisions and reduced hashtable efficiency. 681 // There are simple ways to "diffuse" the middle address bits over the 682 // generated hashCode values: 683 684 static inline intptr_t get_next_hash(Thread* self, oop obj) { 685 intptr_t value = 0; 686 if (hashCode == 0) { 687 // This form uses global Park-Miller RNG. 688 // On MP system we'll have lots of RW access to a global, so the 689 // mechanism induces lots of coherency traffic. 690 value = os::random(); 691 } else if (hashCode == 1) { 692 // This variation has the property of being stable (idempotent) 693 // between STW operations. This can be useful in some of the 1-0 694 // synchronization schemes. 695 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 696 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 697 } else if (hashCode == 2) { 698 value = 1; // for sensitivity testing 699 } else if (hashCode == 3) { 700 value = ++GVars.hc_sequence; 701 } else if (hashCode == 4) { 702 value = cast_from_oop<intptr_t>(obj); 703 } else { 704 // Marsaglia's xor-shift scheme with thread-specific state 705 // This is probably the best overall implementation -- we'll 706 // likely make this the default in future releases. 707 unsigned t = self->_hashStateX; 708 t ^= (t << 11); 709 self->_hashStateX = self->_hashStateY; 710 self->_hashStateY = self->_hashStateZ; 711 self->_hashStateZ = self->_hashStateW; 712 unsigned v = self->_hashStateW; 713 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 714 self->_hashStateW = v; 715 value = v; 716 } 717 718 value &= markWord::hash_mask; 719 if (value == 0) value = 0xBAD; 720 assert(value != markWord::no_hash, "invariant"); 721 return value; 722 } 723 724 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 725 if (UseBiasedLocking) { 726 // NOTE: many places throughout the JVM do not expect a safepoint 727 // to be taken here, in particular most operations on perm gen 728 // objects. However, we only ever bias Java instances and all of 729 // the call sites of identity_hash that might revoke biases have 730 // been checked to make sure they can handle a safepoint. The 731 // added check of the bias pattern is to avoid useless calls to 732 // thread-local storage. 733 if (obj->mark().has_bias_pattern()) { 734 // Handle for oop obj in case of STW safepoint 735 Handle hobj(self, obj); 736 // Relaxing assertion for bug 6320749. 737 assert(Universe::verify_in_progress() || 738 !SafepointSynchronize::is_at_safepoint(), 739 "biases should not be seen by VM thread here"); 740 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 741 obj = hobj(); 742 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 743 } 744 } 745 746 // hashCode() is a heap mutator ... 747 // Relaxing assertion for bug 6320749. 748 assert(Universe::verify_in_progress() || DumpSharedSpaces || 749 !SafepointSynchronize::is_at_safepoint(), "invariant"); 750 assert(Universe::verify_in_progress() || DumpSharedSpaces || 751 self->is_Java_thread() , "invariant"); 752 assert(Universe::verify_in_progress() || DumpSharedSpaces || 753 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 754 755 while (true) { 756 ObjectMonitor* monitor = NULL; 757 markWord temp, test; 758 intptr_t hash; 759 markWord mark = read_stable_mark(obj); 760 761 // object should remain ineligible for biased locking 762 assert(!mark.has_bias_pattern(), "invariant"); 763 764 if (mark.is_neutral()) { 765 hash = mark.hash(); // this is a normal header 766 if (hash != 0) { // if it has hash, just return it 767 return hash; 768 } 769 hash = get_next_hash(self, obj); // allocate a new hash code 770 temp = mark.copy_set_hash(hash); // merge the hash code into header 771 // use (machine word version) atomic operation to install the hash 772 test = obj->cas_set_mark(temp, mark); 773 if (test == mark) { 774 return hash; 775 } 776 // If atomic operation failed, we must inflate the header 777 // into heavy weight monitor. We could add more code here 778 // for fast path, but it does not worth the complexity. 779 } else if (mark.has_monitor()) { 780 ObjectMonitorHandle omh; 781 if (!omh.save_om_ptr(obj, mark)) { 782 // Lost a race with async deflation so try again. 783 assert(AsyncDeflateIdleMonitors, "sanity check"); 784 continue; 785 } 786 monitor = omh.om_ptr(); 787 temp = monitor->header(); 788 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 789 hash = temp.hash(); 790 if (hash != 0) { 791 return hash; 792 } 793 // Skip to the following code to reduce code size 794 } else if (self->is_lock_owned((address)mark.locker())) { 795 temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned 796 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 797 hash = temp.hash(); // by current thread, check if the displaced 798 if (hash != 0) { // header contains hash code 799 return hash; 800 } 801 // WARNING: 802 // The displaced header in the BasicLock on a thread's stack 803 // is strictly immutable. It CANNOT be changed in ANY cases. 804 // So we have to inflate the stack lock into an ObjectMonitor 805 // even if the current thread owns the lock. The BasicLock on 806 // a thread's stack can be asynchronously read by other threads 807 // during an inflate() call so any change to that stack memory 808 // may not propagate to other threads correctly. 809 } 810 811 // Inflate the monitor to set hash code 812 ObjectMonitorHandle omh; 813 inflate(&omh, self, obj, inflate_cause_hash_code); 814 monitor = omh.om_ptr(); 815 // Load displaced header and check it has hash code 816 mark = monitor->header(); 817 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 818 hash = mark.hash(); 819 if (hash == 0) { 820 hash = get_next_hash(self, obj); 821 temp = mark.copy_set_hash(hash); // merge hash code into header 822 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 823 uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value()); 824 test = markWord(v); 825 if (test != mark) { 826 // The only non-deflation update to the ObjectMonitor's 827 // header/dmw field is to merge in the hash code. If someone 828 // adds a new usage of the header/dmw field, please update 829 // this code. 830 // ObjectMonitor::install_displaced_markword_in_object() 831 // does mark the header/dmw field as part of async deflation, 832 // but that protocol cannot happen now due to the 833 // ObjectMonitorHandle above. 834 hash = test.hash(); 835 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 836 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 837 } 838 } 839 // We finally get the hash 840 return hash; 841 } 842 } 843 844 // Deprecated -- use FastHashCode() instead. 845 846 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 847 return FastHashCode(Thread::current(), obj()); 848 } 849 850 851 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 852 Handle h_obj) { 853 if (UseBiasedLocking) { 854 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 855 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 856 } 857 858 assert(thread == JavaThread::current(), "Can only be called on current thread"); 859 oop obj = h_obj(); 860 861 while (true) { 862 markWord mark = read_stable_mark(obj); 863 864 // Uncontended case, header points to stack 865 if (mark.has_locker()) { 866 return thread->is_lock_owned((address)mark.locker()); 867 } 868 // Contended case, header points to ObjectMonitor (tagged pointer) 869 if (mark.has_monitor()) { 870 ObjectMonitorHandle omh; 871 if (!omh.save_om_ptr(obj, mark)) { 872 // Lost a race with async deflation so try again. 873 assert(AsyncDeflateIdleMonitors, "sanity check"); 874 continue; 875 } 876 bool ret_code = omh.om_ptr()->is_entered(thread) != 0; 877 return ret_code; 878 } 879 // Unlocked case, header in place 880 assert(mark.is_neutral(), "sanity check"); 881 return false; 882 } 883 } 884 885 // Be aware of this method could revoke bias of the lock object. 886 // This method queries the ownership of the lock handle specified by 'h_obj'. 887 // If the current thread owns the lock, it returns owner_self. If no 888 // thread owns the lock, it returns owner_none. Otherwise, it will return 889 // owner_other. 890 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 891 (JavaThread *self, Handle h_obj) { 892 // The caller must beware this method can revoke bias, and 893 // revocation can result in a safepoint. 894 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 895 assert(self->thread_state() != _thread_blocked, "invariant"); 896 897 // Possible mark states: neutral, biased, stack-locked, inflated 898 899 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 900 // CASE: biased 901 BiasedLocking::revoke_and_rebias(h_obj, false, self); 902 assert(!h_obj->mark().has_bias_pattern(), 903 "biases should be revoked by now"); 904 } 905 906 assert(self == JavaThread::current(), "Can only be called on current thread"); 907 oop obj = h_obj(); 908 909 while (true) { 910 markWord mark = read_stable_mark(obj); 911 912 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 913 if (mark.has_locker()) { 914 return self->is_lock_owned((address)mark.locker()) ? 915 owner_self : owner_other; 916 } 917 918 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 919 // The Object:ObjectMonitor relationship is stable as long as we're 920 // not at a safepoint and AsyncDeflateIdleMonitors is false. 921 if (mark.has_monitor()) { 922 ObjectMonitorHandle omh; 923 if (!omh.save_om_ptr(obj, mark)) { 924 // Lost a race with async deflation so try again. 925 assert(AsyncDeflateIdleMonitors, "sanity check"); 926 continue; 927 } 928 ObjectMonitor* monitor = omh.om_ptr(); 929 void* owner = monitor->_owner; 930 if (owner == NULL) return owner_none; 931 return (owner == self || 932 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 933 } 934 935 // CASE: neutral 936 assert(mark.is_neutral(), "sanity check"); 937 return owner_none; // it's unlocked 938 } 939 } 940 941 // FIXME: jvmti should call this 942 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 943 if (UseBiasedLocking) { 944 if (SafepointSynchronize::is_at_safepoint()) { 945 BiasedLocking::revoke_at_safepoint(h_obj); 946 } else { 947 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 948 } 949 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 950 } 951 952 oop obj = h_obj(); 953 954 while (true) { 955 address owner = NULL; 956 markWord mark = read_stable_mark(obj); 957 958 // Uncontended case, header points to stack 959 if (mark.has_locker()) { 960 owner = (address) mark.locker(); 961 } 962 963 // Contended case, header points to ObjectMonitor (tagged pointer) 964 else if (mark.has_monitor()) { 965 ObjectMonitorHandle omh; 966 if (!omh.save_om_ptr(obj, mark)) { 967 // Lost a race with async deflation so try again. 968 assert(AsyncDeflateIdleMonitors, "sanity check"); 969 continue; 970 } 971 ObjectMonitor* monitor = omh.om_ptr(); 972 assert(monitor != NULL, "monitor should be non-null"); 973 owner = (address) monitor->owner(); 974 } 975 976 if (owner != NULL) { 977 // owning_thread_from_monitor_owner() may also return NULL here 978 return Threads::owning_thread_from_monitor_owner(t_list, owner); 979 } 980 981 // Unlocked case, header in place 982 // Cannot have assertion since this object may have been 983 // locked by another thread when reaching here. 984 // assert(mark.is_neutral(), "sanity check"); 985 986 return NULL; 987 } 988 } 989 990 // Visitors ... 991 992 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 993 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); 994 while (block != NULL) { 995 assert(block->object() == CHAINMARKER, "must be a block header"); 996 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 997 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 998 if (mid->is_active()) { 999 ObjectMonitorHandle omh(mid); 1000 1001 if (mid->object() == NULL || 1002 (AsyncDeflateIdleMonitors && mid->ref_count() < 0)) { 1003 // Only process with closure if the object is set. 1004 // For async deflation, race here if monitor is not owned! 1005 // The above ref_count bump (in ObjectMonitorHandle ctr) 1006 // will cause subsequent async deflation to skip it. 1007 // However, previous or concurrent async deflation is a race 1008 // so skip this ObjectMonitor if it is being async deflated. 1009 continue; 1010 } 1011 closure->do_monitor(mid); 1012 } 1013 } 1014 block = (PaddedObjectMonitor*)block->_next_om; 1015 } 1016 } 1017 1018 static bool monitors_used_above_threshold() { 1019 if (g_om_population == 0) { 1020 return false; 1021 } 1022 if (MonitorUsedDeflationThreshold > 0) { 1023 int monitors_used = g_om_population - g_om_free_count; 1024 int monitor_usage = (monitors_used * 100LL) / g_om_population; 1025 return monitor_usage > MonitorUsedDeflationThreshold; 1026 } 1027 return false; 1028 } 1029 1030 // Returns true if MonitorBound is set (> 0) and if the specified 1031 // cnt is > MonitorBound. Otherwise returns false. 1032 static bool is_MonitorBound_exceeded(const int cnt) { 1033 const int mx = MonitorBound; 1034 return mx > 0 && cnt > mx; 1035 } 1036 1037 bool ObjectSynchronizer::is_async_deflation_needed() { 1038 if (!AsyncDeflateIdleMonitors) { 1039 return false; 1040 } 1041 if (is_async_deflation_requested()) { 1042 // Async deflation request. 1043 return true; 1044 } 1045 if (AsyncDeflationInterval > 0 && 1046 time_since_last_async_deflation_ms() > AsyncDeflationInterval && 1047 monitors_used_above_threshold()) { 1048 // It's been longer than our specified deflate interval and there 1049 // are too many monitors in use. We don't deflate more frequently 1050 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1051 // in order to not swamp the ServiceThread. 1052 _last_async_deflation_time_ns = os::javaTimeNanos(); 1053 return true; 1054 } 1055 if (is_MonitorBound_exceeded(g_om_population - g_om_free_count)) { 1056 // Not enough ObjectMonitors on the global free list. 1057 return true; 1058 } 1059 return false; 1060 } 1061 1062 bool ObjectSynchronizer::is_safepoint_deflation_needed() { 1063 if (!AsyncDeflateIdleMonitors) { 1064 if (monitors_used_above_threshold()) { 1065 // Too many monitors in use. 1066 return true; 1067 } 1068 return false; 1069 } 1070 if (is_special_deflation_requested()) { 1071 // For AsyncDeflateIdleMonitors only do a safepoint deflation 1072 // if there is a special deflation request. 1073 return true; 1074 } 1075 return false; 1076 } 1077 1078 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1079 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); 1080 } 1081 1082 void ObjectSynchronizer::oops_do(OopClosure* f) { 1083 // We only scan the global used list here (for moribund threads), and 1084 // the thread-local monitors in Thread::oops_do(). 1085 global_used_oops_do(f); 1086 } 1087 1088 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1089 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1090 list_oops_do(g_om_in_use_list, f); 1091 } 1092 1093 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1094 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1095 list_oops_do(thread->om_in_use_list, f); 1096 } 1097 1098 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1099 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1100 // The oops_do() phase does not overlap with monitor deflation 1101 // so no need to update the ObjectMonitor's ref_count for this 1102 // ObjectMonitor* use. 1103 for (ObjectMonitor* mid = list; mid != NULL; mid = mid->_next_om) { 1104 if (mid->object() != NULL) { 1105 f->do_oop((oop*)mid->object_addr()); 1106 } 1107 } 1108 } 1109 1110 1111 // ----------------------------------------------------------------------------- 1112 // ObjectMonitor Lifecycle 1113 // ----------------------- 1114 // Inflation unlinks monitors from the global g_free_list and 1115 // associates them with objects. Deflation -- which occurs at 1116 // STW-time -- disassociates idle monitors from objects. Such 1117 // scavenged monitors are returned to the g_free_list. 1118 // 1119 // The global list is protected by gListLock. All the critical sections 1120 // are short and operate in constant-time. 1121 // 1122 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1123 // 1124 // Lifecycle: 1125 // -- unassigned and on the global free list 1126 // -- unassigned and on a thread's private om_free_list 1127 // -- assigned to an object. The object is inflated and the mark refers 1128 // to the objectmonitor. 1129 1130 1131 // Constraining monitor pool growth via MonitorBound ... 1132 // 1133 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled. 1134 // 1135 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors): 1136 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1137 // the rate of scavenging is driven primarily by GC. As such, we can find 1138 // an inordinate number of monitors in circulation. 1139 // To avoid that scenario we can artificially induce a STW safepoint 1140 // if the pool appears to be growing past some reasonable bound. 1141 // Generally we favor time in space-time tradeoffs, but as there's no 1142 // natural back-pressure on the # of extant monitors we need to impose some 1143 // type of limit. Beware that if MonitorBound is set to too low a value 1144 // we could just loop. In addition, if MonitorBound is set to a low value 1145 // we'll incur more safepoints, which are harmful to performance. 1146 // See also: GuaranteedSafepointInterval 1147 // 1148 // The current implementation uses asynchronous VM operations. 1149 // 1150 // When safepoint deflation is being used and MonitorBound is set, the 1151 // boundry applies to 1152 // (g_om_population - g_om_free_count) 1153 // i.e., if there are not enough ObjectMonitors on the global free list, 1154 // then a safepoint deflation is induced. Picking a good MonitorBound value 1155 // is non-trivial. 1156 // 1157 // When async deflation is being used: 1158 // The monitor pool is still grow-only. Async deflation is requested 1159 // by a safepoint's cleanup phase or by the ServiceThread at periodic 1160 // intervals when is_async_deflation_needed() returns true. In 1161 // addition to other policies that are checked, if there are not 1162 // enough ObjectMonitors on the global free list, then 1163 // is_async_deflation_needed() will return true. The ServiceThread 1164 // calls deflate_global_idle_monitors_using_JT() and also sets the 1165 // per-thread om_request_deflation flag as needed. 1166 1167 static void InduceScavenge(Thread* self, const char * Whence) { 1168 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation"); 1169 1170 // Induce STW safepoint to trim monitors 1171 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1172 // More precisely, trigger an asynchronous STW safepoint as the number 1173 // of active monitors passes the specified threshold. 1174 // TODO: assert thread state is reasonable 1175 1176 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1177 // Induce a 'null' safepoint to scavenge monitors 1178 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1179 // to the VMthread and have a lifespan longer than that of this activation record. 1180 // The VMThread will delete the op when completed. 1181 VMThread::execute(new VM_ScavengeMonitors()); 1182 } 1183 } 1184 1185 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self, 1186 const InflateCause cause) { 1187 // A large MAXPRIVATE value reduces both list lock contention 1188 // and list coherency traffic, but also tends to increase the 1189 // number of ObjectMonitors in circulation as well as the STW 1190 // scavenge costs. As usual, we lean toward time in space-time 1191 // tradeoffs. 1192 const int MAXPRIVATE = 1024; 1193 1194 if (AsyncDeflateIdleMonitors) { 1195 JavaThread* jt = (JavaThread *)self; 1196 if (jt->om_request_deflation && jt->om_in_use_count > 0 && 1197 cause != inflate_cause_vm_internal) { 1198 // Deflate any per-thread idle monitors for this JavaThread if 1199 // this is not an internal inflation; internal inflations can 1200 // occur in places where it is not safe to pause for a safepoint. 1201 // Clean up your own mess (Gibbs Rule 45). Otherwise, skip this 1202 // deflation. deflate_global_idle_monitors_using_JT() is called 1203 // by the ServiceThread. Per-thread async deflation is triggered 1204 // by the ServiceThread via om_request_deflation. 1205 debug_only(jt->check_for_valid_safepoint_state(false);) 1206 ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(jt); 1207 } 1208 } 1209 1210 stringStream ss; 1211 for (;;) { 1212 ObjectMonitor* m; 1213 1214 // 1: try to allocate from the thread's local om_free_list. 1215 // Threads will attempt to allocate first from their local list, then 1216 // from the global list, and only after those attempts fail will the thread 1217 // attempt to instantiate new monitors. Thread-local free lists take 1218 // heat off the gListLock and improve allocation latency, as well as reducing 1219 // coherency traffic on the shared global list. 1220 m = self->om_free_list; 1221 if (m != NULL) { 1222 self->om_free_list = m->_next_om; 1223 self->om_free_count--; 1224 guarantee(m->object() == NULL, "invariant"); 1225 m->set_allocation_state(ObjectMonitor::New); 1226 m->_next_om = self->om_in_use_list; 1227 self->om_in_use_list = m; 1228 self->om_in_use_count++; 1229 return m; 1230 } 1231 1232 // 2: try to allocate from the global g_free_list 1233 // CONSIDER: use muxTry() instead of muxAcquire(). 1234 // If the muxTry() fails then drop immediately into case 3. 1235 // If we're using thread-local free lists then try 1236 // to reprovision the caller's free list. 1237 if (g_free_list != NULL) { 1238 // Reprovision the thread's om_free_list. 1239 // Use bulk transfers to reduce the allocation rate and heat 1240 // on various locks. 1241 Thread::muxAcquire(&gListLock, "om_alloc(1)"); 1242 for (int i = self->om_free_provision; --i >= 0 && g_free_list != NULL;) { 1243 g_om_free_count--; 1244 ObjectMonitor* take = g_free_list; 1245 g_free_list = take->_next_om; 1246 guarantee(take->object() == NULL, "invariant"); 1247 if (AsyncDeflateIdleMonitors) { 1248 // We allowed 3 field values to linger during async deflation. 1249 // We clear header and restore ref_count here, but we leave 1250 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor 1251 // enter optimization can no longer race with async deflation 1252 // and reuse. 1253 take->set_header(markWord::zero()); 1254 if (take->ref_count() < 0) { 1255 // Add back max_jint to restore the ref_count field to its 1256 // proper value. 1257 Atomic::add(max_jint, &take->_ref_count); 1258 1259 assert(take->ref_count() >= 0, "must not be negative: ref_count=%d", 1260 take->ref_count()); 1261 } 1262 } 1263 take->Recycle(); 1264 assert(take->is_free(), "invariant"); 1265 om_release(self, take, false); 1266 } 1267 Thread::muxRelease(&gListLock); 1268 self->om_free_provision += 1 + (self->om_free_provision/2); 1269 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1270 1271 if (!AsyncDeflateIdleMonitors && 1272 is_MonitorBound_exceeded(g_om_population - g_om_free_count)) { 1273 // Not enough ObjectMonitors on the global free list. 1274 // We can't safely induce a STW safepoint from om_alloc() as our thread 1275 // state may not be appropriate for such activities and callers may hold 1276 // naked oops, so instead we defer the action. 1277 InduceScavenge(self, "om_alloc"); 1278 } 1279 continue; 1280 } 1281 1282 // 3: allocate a block of new ObjectMonitors 1283 // Both the local and global free lists are empty -- resort to malloc(). 1284 // In the current implementation ObjectMonitors are TSM - immortal. 1285 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1286 // each ObjectMonitor to start at the beginning of a cache line, 1287 // so we use align_up(). 1288 // A better solution would be to use C++ placement-new. 1289 // BEWARE: As it stands currently, we don't run the ctors! 1290 assert(_BLOCKSIZE > 1, "invariant"); 1291 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1292 PaddedObjectMonitor* temp; 1293 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1294 void* real_malloc_addr = (void*)NEW_C_HEAP_ARRAY(char, aligned_size, 1295 mtInternal); 1296 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1297 1298 // NOTE: (almost) no way to recover if allocation failed. 1299 // We might be able to induce a STW safepoint and scavenge enough 1300 // ObjectMonitors to permit progress. 1301 if (temp == NULL) { 1302 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1303 "Allocate ObjectMonitors"); 1304 } 1305 (void)memset((void *) temp, 0, neededsize); 1306 1307 // Format the block. 1308 // initialize the linked list, each monitor points to its next 1309 // forming the single linked free list, the very first monitor 1310 // will points to next block, which forms the block list. 1311 // The trick of using the 1st element in the block as g_block_list 1312 // linkage should be reconsidered. A better implementation would 1313 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1314 1315 for (int i = 1; i < _BLOCKSIZE; i++) { 1316 temp[i]._next_om = (ObjectMonitor *)&temp[i+1]; 1317 assert(temp[i].is_free(), "invariant"); 1318 } 1319 1320 // terminate the last monitor as the end of list 1321 temp[_BLOCKSIZE - 1]._next_om = NULL; 1322 1323 // Element [0] is reserved for global list linkage 1324 temp[0].set_object(CHAINMARKER); 1325 1326 // Consider carving out this thread's current request from the 1327 // block in hand. This avoids some lock traffic and redundant 1328 // list activity. 1329 1330 // Acquire the gListLock to manipulate g_block_list and g_free_list. 1331 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1332 Thread::muxAcquire(&gListLock, "om_alloc(2)"); 1333 g_om_population += _BLOCKSIZE-1; 1334 g_om_free_count += _BLOCKSIZE-1; 1335 1336 // Add the new block to the list of extant blocks (g_block_list). 1337 // The very first ObjectMonitor in a block is reserved and dedicated. 1338 // It serves as blocklist "next" linkage. 1339 temp[0]._next_om = g_block_list; 1340 // There are lock-free uses of g_block_list so make sure that 1341 // the previous stores happen before we update g_block_list. 1342 OrderAccess::release_store(&g_block_list, temp); 1343 1344 // Add the new string of ObjectMonitors to the global free list 1345 temp[_BLOCKSIZE - 1]._next_om = g_free_list; 1346 g_free_list = temp + 1; 1347 Thread::muxRelease(&gListLock); 1348 } 1349 } 1350 1351 // Place "m" on the caller's private per-thread om_free_list. 1352 // In practice there's no need to clamp or limit the number of 1353 // monitors on a thread's om_free_list as the only non-allocation time 1354 // we'll call om_release() is to return a monitor to the free list after 1355 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1356 // accumulate on a thread's free list. 1357 // 1358 // Key constraint: all ObjectMonitors on a thread's free list and the global 1359 // free list must have their object field set to null. This prevents the 1360 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1361 // -- from reclaiming them while we are trying to release them. 1362 1363 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1364 bool from_per_thread_alloc) { 1365 guarantee(m->header().value() == 0, "invariant"); 1366 guarantee(m->object() == NULL, "invariant"); 1367 stringStream ss; 1368 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " 1369 "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss), 1370 m->_recursions); 1371 m->set_allocation_state(ObjectMonitor::Free); 1372 // _next_om is used for both per-thread in-use and free lists so 1373 // we have to remove 'm' from the in-use list first (as needed). 1374 if (from_per_thread_alloc) { 1375 // Need to remove 'm' from om_in_use_list. 1376 ObjectMonitor* cur_mid_in_use = NULL; 1377 bool extracted = false; 1378 for (ObjectMonitor* mid = self->om_in_use_list; mid != NULL; cur_mid_in_use = mid, mid = mid->_next_om) { 1379 if (m == mid) { 1380 // extract from per-thread in-use list 1381 if (mid == self->om_in_use_list) { 1382 self->om_in_use_list = mid->_next_om; 1383 } else if (cur_mid_in_use != NULL) { 1384 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list 1385 } 1386 extracted = true; 1387 self->om_in_use_count--; 1388 break; 1389 } 1390 } 1391 assert(extracted, "Should have extracted from in-use list"); 1392 } 1393 1394 m->_next_om = self->om_free_list; 1395 guarantee(m->is_free(), "invariant"); 1396 self->om_free_list = m; 1397 self->om_free_count++; 1398 } 1399 1400 // Return ObjectMonitors on a moribund thread's free and in-use 1401 // lists to the appropriate global lists. The ObjectMonitors on the 1402 // per-thread in-use list may still be in use by other threads. 1403 // 1404 // We currently call om_flush() from Threads::remove() before the 1405 // thread has been excised from the thread list and is no longer a 1406 // mutator. This means that om_flush() cannot run concurrently with 1407 // a safepoint and interleave with deflate_idle_monitors(). In 1408 // particular, this ensures that the thread's in-use monitors are 1409 // scanned by a GC safepoint, either via Thread::oops_do() (before 1410 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1411 // om_flush() is called). 1412 // 1413 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1414 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1415 // run at the same time as om_flush() so we have to be careful. 1416 1417 void ObjectSynchronizer::om_flush(Thread* self) { 1418 int in_use_count = 0; 1419 ObjectMonitor* in_use_list = self->om_in_use_list; 1420 ObjectMonitor* in_use_tail = NULL; 1421 if (in_use_list != NULL) { 1422 // The thread is going away, however the ObjectMonitors on the 1423 // om_in_use_list may still be in-use by other threads. Link 1424 // them to in_use_tail, which will be linked into the global 1425 // in-use list g_om_in_use_list below, under the gListLock. 1426 for (ObjectMonitor* cur_om = in_use_list; cur_om != NULL; cur_om = cur_om->_next_om) { 1427 in_use_tail = cur_om; 1428 in_use_count++; 1429 ADIM_guarantee(cur_om->is_active(), "invariant"); 1430 } 1431 guarantee(in_use_tail != NULL, "invariant"); 1432 ADIM_guarantee(self->om_in_use_count == in_use_count, "in-use count off"); 1433 self->om_in_use_list = NULL; 1434 self->om_in_use_count = 0; 1435 } 1436 1437 int free_count = 0; 1438 ObjectMonitor* free_list = self->om_free_list; 1439 ObjectMonitor* free_tail = NULL; 1440 if (free_list != NULL) { 1441 // The thread is going away. Set 'free_tail' to the last per-thread free 1442 // monitor which will be linked to g_free_list below under the gListLock. 1443 stringStream ss; 1444 for (ObjectMonitor* s = free_list; s != NULL; s = s->_next_om) { 1445 free_count++; 1446 free_tail = s; 1447 guarantee(s->object() == NULL, "invariant"); 1448 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); 1449 } 1450 guarantee(free_tail != NULL, "invariant"); 1451 ADIM_guarantee(self->om_free_count == free_count, "free-count off"); 1452 self->om_free_list = NULL; 1453 self->om_free_count = 0; 1454 } 1455 1456 Thread::muxAcquire(&gListLock, "om_flush"); 1457 if (free_tail != NULL) { 1458 free_tail->_next_om = g_free_list; 1459 g_free_list = free_list; 1460 g_om_free_count += free_count; 1461 } 1462 1463 if (in_use_tail != NULL) { 1464 in_use_tail->_next_om = g_om_in_use_list; 1465 g_om_in_use_list = in_use_list; 1466 g_om_in_use_count += in_use_count; 1467 } 1468 1469 Thread::muxRelease(&gListLock); 1470 1471 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1472 LogStreamHandle(Info, monitorinflation) lsh_info; 1473 LogStream* ls = NULL; 1474 if (log_is_enabled(Debug, monitorinflation)) { 1475 ls = &lsh_debug; 1476 } else if ((free_count != 0 || in_use_count != 0) && 1477 log_is_enabled(Info, monitorinflation)) { 1478 ls = &lsh_info; 1479 } 1480 if (ls != NULL) { 1481 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1482 ", in_use_count=%d" ", om_free_provision=%d", 1483 p2i(self), free_count, in_use_count, self->om_free_provision); 1484 } 1485 } 1486 1487 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1488 const oop obj, 1489 ObjectSynchronizer::InflateCause cause) { 1490 assert(event != NULL, "invariant"); 1491 assert(event->should_commit(), "invariant"); 1492 event->set_monitorClass(obj->klass()); 1493 event->set_address((uintptr_t)(void*)obj); 1494 event->set_cause((u1)cause); 1495 event->commit(); 1496 } 1497 1498 // Fast path code shared by multiple functions 1499 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) { 1500 while (true) { 1501 markWord mark = obj->mark(); 1502 if (mark.has_monitor()) { 1503 if (!omh_p->save_om_ptr(obj, mark)) { 1504 // Lost a race with async deflation so try again. 1505 assert(AsyncDeflateIdleMonitors, "sanity check"); 1506 continue; 1507 } 1508 ObjectMonitor* monitor = omh_p->om_ptr(); 1509 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid"); 1510 markWord dmw = monitor->header(); 1511 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1512 return; 1513 } 1514 inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal); 1515 return; 1516 } 1517 } 1518 1519 void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self, 1520 oop object, const InflateCause cause) { 1521 // Inflate mutates the heap ... 1522 // Relaxing assertion for bug 6320749. 1523 assert(Universe::verify_in_progress() || 1524 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1525 1526 EventJavaMonitorInflate event; 1527 1528 for (;;) { 1529 const markWord mark = object->mark(); 1530 assert(!mark.has_bias_pattern(), "invariant"); 1531 1532 // The mark can be in one of the following states: 1533 // * Inflated - just return 1534 // * Stack-locked - coerce it to inflated 1535 // * INFLATING - busy wait for conversion to complete 1536 // * Neutral - aggressively inflate the object. 1537 // * BIASED - Illegal. We should never see this 1538 1539 // CASE: inflated 1540 if (mark.has_monitor()) { 1541 if (!omh_p->save_om_ptr(object, mark)) { 1542 // Lost a race with async deflation so try again. 1543 assert(AsyncDeflateIdleMonitors, "sanity check"); 1544 continue; 1545 } 1546 ObjectMonitor* inf = omh_p->om_ptr(); 1547 markWord dmw = inf->header(); 1548 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1549 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); 1550 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1551 return; 1552 } 1553 1554 // CASE: inflation in progress - inflating over a stack-lock. 1555 // Some other thread is converting from stack-locked to inflated. 1556 // Only that thread can complete inflation -- other threads must wait. 1557 // The INFLATING value is transient. 1558 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1559 // We could always eliminate polling by parking the thread on some auxiliary list. 1560 if (mark == markWord::INFLATING()) { 1561 read_stable_mark(object); 1562 continue; 1563 } 1564 1565 // CASE: stack-locked 1566 // Could be stack-locked either by this thread or by some other thread. 1567 // 1568 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1569 // to install INFLATING into the mark word. We originally installed INFLATING, 1570 // allocated the objectmonitor, and then finally STed the address of the 1571 // objectmonitor into the mark. This was correct, but artificially lengthened 1572 // the interval in which INFLATED appeared in the mark, thus increasing 1573 // the odds of inflation contention. 1574 // 1575 // We now use per-thread private objectmonitor free lists. 1576 // These list are reprovisioned from the global free list outside the 1577 // critical INFLATING...ST interval. A thread can transfer 1578 // multiple objectmonitors en-mass from the global free list to its local free list. 1579 // This reduces coherency traffic and lock contention on the global free list. 1580 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1581 // before or after the CAS(INFLATING) operation. 1582 // See the comments in om_alloc(). 1583 1584 LogStreamHandle(Trace, monitorinflation) lsh; 1585 1586 if (mark.has_locker()) { 1587 ObjectMonitor* m; 1588 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { 1589 // If !AsyncDeflateIdleMonitors or if an internal inflation, then 1590 // we won't stop for a potential safepoint in om_alloc. 1591 m = om_alloc(self, cause); 1592 } else { 1593 // If AsyncDeflateIdleMonitors and not an internal inflation, then 1594 // we may stop for a safepoint in om_alloc() so protect object. 1595 Handle h_obj(self, object); 1596 m = om_alloc(self, cause); 1597 object = h_obj(); // Refresh object. 1598 } 1599 // Optimistically prepare the objectmonitor - anticipate successful CAS 1600 // We do this before the CAS in order to minimize the length of time 1601 // in which INFLATING appears in the mark. 1602 m->Recycle(); 1603 m->_Responsible = NULL; 1604 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1605 1606 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1607 if (cmp != mark) { 1608 om_release(self, m, true); 1609 continue; // Interference -- just retry 1610 } 1611 1612 // We've successfully installed INFLATING (0) into the mark-word. 1613 // This is the only case where 0 will appear in a mark-word. 1614 // Only the singular thread that successfully swings the mark-word 1615 // to 0 can perform (or more precisely, complete) inflation. 1616 // 1617 // Why do we CAS a 0 into the mark-word instead of just CASing the 1618 // mark-word from the stack-locked value directly to the new inflated state? 1619 // Consider what happens when a thread unlocks a stack-locked object. 1620 // It attempts to use CAS to swing the displaced header value from the 1621 // on-stack BasicLock back into the object header. Recall also that the 1622 // header value (hash code, etc) can reside in (a) the object header, or 1623 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1624 // header in an ObjectMonitor. The inflate() routine must copy the header 1625 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1626 // the while preserving the hashCode stability invariants. If the owner 1627 // decides to release the lock while the value is 0, the unlock will fail 1628 // and control will eventually pass from slow_exit() to inflate. The owner 1629 // will then spin, waiting for the 0 value to disappear. Put another way, 1630 // the 0 causes the owner to stall if the owner happens to try to 1631 // drop the lock (restoring the header from the BasicLock to the object) 1632 // while inflation is in-progress. This protocol avoids races that might 1633 // would otherwise permit hashCode values to change or "flicker" for an object. 1634 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1635 // 0 serves as a "BUSY" inflate-in-progress indicator. 1636 1637 1638 // fetch the displaced mark from the owner's stack. 1639 // The owner can't die or unwind past the lock while our INFLATING 1640 // object is in the mark. Furthermore the owner can't complete 1641 // an unlock on the object, either. 1642 markWord dmw = mark.displaced_mark_helper(); 1643 // Catch if the object's header is not neutral (not locked and 1644 // not marked is what we care about here). 1645 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1646 1647 // Setup monitor fields to proper values -- prepare the monitor 1648 m->set_header(dmw); 1649 1650 // Optimization: if the mark.locker stack address is associated 1651 // with this thread we could simply set m->_owner = self. 1652 // Note that a thread can inflate an object 1653 // that it has stack-locked -- as might happen in wait() -- directly 1654 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1655 m->set_owner(mark.locker()); 1656 m->set_object(object); 1657 // TODO-FIXME: assert BasicLock->dhw != 0. 1658 1659 omh_p->set_om_ptr(m); 1660 assert(m->is_new(), "freshly allocated monitor must be new"); 1661 m->set_allocation_state(ObjectMonitor::Old); 1662 1663 // Must preserve store ordering. The monitor state must 1664 // be stable at the time of publishing the monitor address. 1665 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1666 object->release_set_mark(markWord::encode(m)); 1667 1668 // Hopefully the performance counters are allocated on distinct cache lines 1669 // to avoid false sharing on MP systems ... 1670 OM_PERFDATA_OP(Inflations, inc()); 1671 if (log_is_enabled(Trace, monitorinflation)) { 1672 ResourceMark rm(self); 1673 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1674 INTPTR_FORMAT ", type='%s'", p2i(object), 1675 object->mark().value(), object->klass()->external_name()); 1676 } 1677 if (event.should_commit()) { 1678 post_monitor_inflate_event(&event, object, cause); 1679 } 1680 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 1681 return; 1682 } 1683 1684 // CASE: neutral 1685 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1686 // If we know we're inflating for entry it's better to inflate by swinging a 1687 // pre-locked ObjectMonitor pointer into the object header. A successful 1688 // CAS inflates the object *and* confers ownership to the inflating thread. 1689 // In the current implementation we use a 2-step mechanism where we CAS() 1690 // to inflate and then CAS() again to try to swing _owner from NULL to self. 1691 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1692 // would be useful. 1693 1694 // Catch if the object's header is not neutral (not locked and 1695 // not marked is what we care about here). 1696 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1697 ObjectMonitor* m; 1698 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { 1699 // If !AsyncDeflateIdleMonitors or if an internal inflation, then 1700 // we won't stop for a potential safepoint in om_alloc. 1701 m = om_alloc(self, cause); 1702 } else { 1703 // If AsyncDeflateIdleMonitors and not an internal inflation, then 1704 // we may stop for a safepoint in om_alloc() so protect object. 1705 Handle h_obj(self, object); 1706 m = om_alloc(self, cause); 1707 object = h_obj(); // Refresh object. 1708 } 1709 // prepare m for installation - set monitor to initial state 1710 m->Recycle(); 1711 m->set_header(mark); 1712 // If we leave _owner == DEFLATER_MARKER here, then the simple C2 1713 // ObjectMonitor enter optimization can no longer race with async 1714 // deflation and reuse. 1715 m->set_object(object); 1716 m->_Responsible = NULL; 1717 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1718 1719 omh_p->set_om_ptr(m); 1720 assert(m->is_new(), "freshly allocated monitor must be new"); 1721 m->set_allocation_state(ObjectMonitor::Old); 1722 1723 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 1724 m->set_header(markWord::zero()); 1725 m->set_object(NULL); 1726 m->Recycle(); 1727 omh_p->set_om_ptr(NULL); 1728 // om_release() will reset the allocation state 1729 om_release(self, m, true); 1730 m = NULL; 1731 continue; 1732 // interference - the markword changed - just retry. 1733 // The state-transitions are one-way, so there's no chance of 1734 // live-lock -- "Inflated" is an absorbing state. 1735 } 1736 1737 // Hopefully the performance counters are allocated on distinct 1738 // cache lines to avoid false sharing on MP systems ... 1739 OM_PERFDATA_OP(Inflations, inc()); 1740 if (log_is_enabled(Trace, monitorinflation)) { 1741 ResourceMark rm(self); 1742 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1743 INTPTR_FORMAT ", type='%s'", p2i(object), 1744 object->mark().value(), object->klass()->external_name()); 1745 } 1746 if (event.should_commit()) { 1747 post_monitor_inflate_event(&event, object, cause); 1748 } 1749 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 1750 return; 1751 } 1752 } 1753 1754 1755 // We maintain a list of in-use monitors for each thread. 1756 // 1757 // For safepoint based deflation: 1758 // deflate_thread_local_monitors() scans a single thread's in-use list, while 1759 // deflate_idle_monitors() scans only a global list of in-use monitors which 1760 // is populated only as a thread dies (see om_flush()). 1761 // 1762 // These operations are called at all safepoints, immediately after mutators 1763 // are stopped, but before any objects have moved. Collectively they traverse 1764 // the population of in-use monitors, deflating where possible. The scavenged 1765 // monitors are returned to the global monitor free list. 1766 // 1767 // Beware that we scavenge at *every* stop-the-world point. Having a large 1768 // number of monitors in-use could negatively impact performance. We also want 1769 // to minimize the total # of monitors in circulation, as they incur a small 1770 // footprint penalty. 1771 // 1772 // Perversely, the heap size -- and thus the STW safepoint rate -- 1773 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1774 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 1775 // This is an unfortunate aspect of this design. 1776 // 1777 // For async deflation: 1778 // If a special deflation request is made, then the safepoint based 1779 // deflation mechanism is used. Otherwise, an async deflation request 1780 // is registered with the ServiceThread and it is notified. 1781 1782 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) { 1783 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1784 1785 // The per-thread in-use lists are handled in 1786 // ParallelSPCleanupThreadClosure::do_thread(). 1787 1788 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { 1789 // Use the older mechanism for the global in-use list or if a 1790 // special deflation has been requested before the safepoint. 1791 ObjectSynchronizer::deflate_idle_monitors(_counters); 1792 return; 1793 } 1794 1795 log_debug(monitorinflation)("requesting async deflation of idle monitors."); 1796 // Request deflation of idle monitors by the ServiceThread: 1797 set_is_async_deflation_requested(true); 1798 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 1799 ml.notify_all(); 1800 } 1801 1802 // Deflate a single monitor if not in-use 1803 // Return true if deflated, false if in-use 1804 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1805 ObjectMonitor** free_head_p, 1806 ObjectMonitor** free_tail_p) { 1807 bool deflated; 1808 // Normal case ... The monitor is associated with obj. 1809 const markWord mark = obj->mark(); 1810 guarantee(mark == markWord::encode(mid), "should match: mark=" 1811 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 1812 markWord::encode(mid).value()); 1813 // Make sure that mark.monitor() and markWord::encode() agree: 1814 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 1815 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 1816 const markWord dmw = mid->header(); 1817 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1818 1819 if (mid->is_busy() || mid->ref_count() != 0) { 1820 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 1821 // is in use so no deflation. 1822 deflated = false; 1823 } else { 1824 // Deflate the monitor if it is no longer being used 1825 // It's idle - scavenge and return to the global free list 1826 // plain old deflation ... 1827 if (log_is_enabled(Trace, monitorinflation)) { 1828 ResourceMark rm; 1829 log_trace(monitorinflation)("deflate_monitor: " 1830 "object=" INTPTR_FORMAT ", mark=" 1831 INTPTR_FORMAT ", type='%s'", p2i(obj), 1832 mark.value(), obj->klass()->external_name()); 1833 } 1834 1835 // Restore the header back to obj 1836 obj->release_set_mark(dmw); 1837 if (AsyncDeflateIdleMonitors) { 1838 // clear() expects the owner field to be NULL and we won't race 1839 // with the simple C2 ObjectMonitor enter optimization since 1840 // we're at a safepoint. 1841 mid->set_owner(NULL); 1842 } 1843 mid->clear(); 1844 1845 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 1846 p2i(mid->object())); 1847 assert(mid->is_free(), "invariant"); 1848 1849 // Move the deflated ObjectMonitor to the working free list 1850 // defined by free_head_p and free_tail_p. 1851 if (*free_head_p == NULL) *free_head_p = mid; 1852 if (*free_tail_p != NULL) { 1853 // We append to the list so the caller can use mid->_next_om 1854 // to fix the linkages in its context. 1855 ObjectMonitor* prevtail = *free_tail_p; 1856 // Should have been cleaned up by the caller: 1857 assert(prevtail->_next_om == NULL, "must be NULL: _next_om=" 1858 INTPTR_FORMAT, p2i(prevtail->_next_om)); 1859 prevtail->_next_om = mid; 1860 } 1861 *free_tail_p = mid; 1862 // At this point, mid->_next_om still refers to its current 1863 // value and another ObjectMonitor's _next_om field still 1864 // refers to this ObjectMonitor. Those linkages have to be 1865 // cleaned up by the caller who has the complete context. 1866 deflated = true; 1867 } 1868 return deflated; 1869 } 1870 1871 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 1872 // Returns true if it was deflated and false otherwise. 1873 // 1874 // The async deflation protocol sets owner to DEFLATER_MARKER and 1875 // makes ref_count negative as signals to contending threads that 1876 // an async deflation is in progress. There are a number of checks 1877 // as part of the protocol to make sure that the calling thread has 1878 // not lost the race to a contending thread or to a thread that just 1879 // wants to use the ObjectMonitor*. 1880 // 1881 // The ObjectMonitor has been successfully async deflated when: 1882 // (owner == DEFLATER_MARKER && ref_count < 0) 1883 // Contending threads or ObjectMonitor* using threads that see those 1884 // values know to retry their operation. 1885 // 1886 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 1887 ObjectMonitor** free_head_p, 1888 ObjectMonitor** free_tail_p) { 1889 assert(AsyncDeflateIdleMonitors, "sanity check"); 1890 assert(Thread::current()->is_Java_thread(), "precondition"); 1891 // A newly allocated ObjectMonitor should not be seen here so we 1892 // avoid an endless inflate/deflate cycle. 1893 assert(mid->is_old(), "must be old: allocation_state=%d", 1894 (int) mid->allocation_state()); 1895 1896 if (mid->is_busy() || mid->ref_count() != 0) { 1897 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 1898 // is in use so no deflation. 1899 return false; 1900 } 1901 1902 if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) { 1903 // ObjectMonitor is not owned by another thread. Our setting 1904 // owner to DEFLATER_MARKER forces any contending thread through 1905 // the slow path. This is just the first part of the async 1906 // deflation dance. 1907 1908 if (mid->_contentions != 0 || mid->_waiters != 0) { 1909 // Another thread has raced to enter the ObjectMonitor after 1910 // mid->is_busy() above or has already entered and waited on 1911 // it which makes it busy so no deflation. Restore owner to 1912 // NULL if it is still DEFLATER_MARKER. 1913 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); 1914 return false; 1915 } 1916 1917 if (Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) { 1918 // Make ref_count negative to force any contending threads or 1919 // ObjectMonitor* using threads to retry. This is the second 1920 // part of the async deflation dance. 1921 1922 if (mid->owner_is_DEFLATER_MARKER()) { 1923 // If owner is still DEFLATER_MARKER, then we have successfully 1924 // signaled any contending threads to retry. If it is not, then we 1925 // have lost the race to an entering thread and the ObjectMonitor 1926 // is now busy. This is the third and final part of the async 1927 // deflation dance. 1928 // Note: This owner check solves the ABA problem with ref_count 1929 // where another thread acquired the ObjectMonitor, finished 1930 // using it and restored the ref_count to zero. 1931 1932 // Sanity checks for the races: 1933 guarantee(mid->_contentions == 0, "must be 0: contentions=%d", 1934 mid->_contentions); 1935 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 1936 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 1937 INTPTR_FORMAT, p2i(mid->_cxq)); 1938 guarantee(mid->_EntryList == NULL, 1939 "must be no entering threads: EntryList=" INTPTR_FORMAT, 1940 p2i(mid->_EntryList)); 1941 1942 const oop obj = (oop) mid->object(); 1943 if (log_is_enabled(Trace, monitorinflation)) { 1944 ResourceMark rm; 1945 log_trace(monitorinflation)("deflate_monitor_using_JT: " 1946 "object=" INTPTR_FORMAT ", mark=" 1947 INTPTR_FORMAT ", type='%s'", 1948 p2i(obj), obj->mark().value(), 1949 obj->klass()->external_name()); 1950 } 1951 1952 // Install the old mark word if nobody else has already done it. 1953 mid->install_displaced_markword_in_object(obj); 1954 mid->clear_using_JT(); 1955 1956 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 1957 p2i(mid->object())); 1958 assert(mid->is_free(), "must be free: allocation_state=%d", 1959 (int) mid->allocation_state()); 1960 1961 // Move the deflated ObjectMonitor to the working free list 1962 // defined by free_head_p and free_tail_p. 1963 if (*free_head_p == NULL) { 1964 // First one on the list. 1965 *free_head_p = mid; 1966 } 1967 if (*free_tail_p != NULL) { 1968 // We append to the list so the caller can use mid->_next_om 1969 // to fix the linkages in its context. 1970 ObjectMonitor* prevtail = *free_tail_p; 1971 // Should have been cleaned up by the caller: 1972 assert(prevtail->_next_om == NULL, "must be NULL: _next_om=" 1973 INTPTR_FORMAT, p2i(prevtail->_next_om)); 1974 prevtail->_next_om = mid; 1975 } 1976 *free_tail_p = mid; 1977 1978 // At this point, mid->_next_om still refers to its current 1979 // value and another ObjectMonitor's _next_om field still 1980 // refers to this ObjectMonitor. Those linkages have to be 1981 // cleaned up by the caller who has the complete context. 1982 1983 // We leave owner == DEFLATER_MARKER and ref_count < 0 1984 // to force any racing threads to retry. 1985 return true; // Success, ObjectMonitor has been deflated. 1986 } 1987 1988 // The owner was changed from DEFLATER_MARKER so we lost the 1989 // race since the ObjectMonitor is now busy. 1990 1991 // Add back max_jint to restore the ref_count field to its 1992 // proper value (which may not be what we saw above): 1993 Atomic::add(max_jint, &mid->_ref_count); 1994 1995 assert(mid->ref_count() >= 0, "must not be negative: ref_count=%d", 1996 mid->ref_count()); 1997 return false; 1998 } 1999 2000 // The ref_count was no longer 0 so we lost the race since the 2001 // ObjectMonitor is now busy or the ObjectMonitor* is now is use. 2002 // Restore owner to NULL if it is still DEFLATER_MARKER: 2003 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); 2004 } 2005 2006 // The owner field is no longer NULL so we lost the race since the 2007 // ObjectMonitor is now busy. 2008 return false; 2009 } 2010 2011 // Walk a given monitor list, and deflate idle monitors 2012 // The given list could be a per-thread list or a global list 2013 // Caller acquires gListLock as needed. 2014 // 2015 // In the case of parallel processing of thread local monitor lists, 2016 // work is done by Threads::parallel_threads_do() which ensures that 2017 // each Java thread is processed by exactly one worker thread, and 2018 // thus avoid conflicts that would arise when worker threads would 2019 // process the same monitor lists concurrently. 2020 // 2021 // See also ParallelSPCleanupTask and 2022 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 2023 // Threads::parallel_java_threads_do() in thread.cpp. 2024 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 2025 int* count_p, 2026 ObjectMonitor** free_head_p, 2027 ObjectMonitor** free_tail_p) { 2028 ObjectMonitor* cur_mid_in_use = NULL; 2029 ObjectMonitor* mid; 2030 ObjectMonitor* next; 2031 int deflated_count = 0; 2032 2033 for (mid = *list_p; mid != NULL;) { 2034 oop obj = (oop) mid->object(); 2035 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 2036 // Deflation succeeded and already updated free_head_p and 2037 // free_tail_p as needed. Finish the move to the local free list 2038 // by unlinking mid from the global or per-thread in-use list. 2039 if (mid == *list_p) { 2040 *list_p = mid->_next_om; 2041 } else if (cur_mid_in_use != NULL) { 2042 cur_mid_in_use->_next_om = mid->_next_om; // maintain the current thread in-use list 2043 } 2044 next = mid->_next_om; 2045 mid->_next_om = NULL; // This mid is current tail in the free_head_p list 2046 mid = next; 2047 deflated_count++; 2048 *count_p = *count_p - 1; 2049 } else { 2050 cur_mid_in_use = mid; 2051 mid = mid->_next_om; 2052 } 2053 } 2054 return deflated_count; 2055 } 2056 2057 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 2058 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 2059 // list could be a per-thread in-use list or the global in-use list. 2060 // Caller acquires gListLock as appropriate. If a safepoint has started, 2061 // then we save state via saved_mid_in_use_p and return to the caller to 2062 // honor the safepoint. 2063 // 2064 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, 2065 int* count_p, 2066 ObjectMonitor** free_head_p, 2067 ObjectMonitor** free_tail_p, 2068 ObjectMonitor** saved_mid_in_use_p) { 2069 assert(AsyncDeflateIdleMonitors, "sanity check"); 2070 assert(Thread::current()->is_Java_thread(), "precondition"); 2071 2072 ObjectMonitor* cur_mid_in_use = NULL; 2073 ObjectMonitor* mid; 2074 ObjectMonitor* next; 2075 int deflated_count = 0; 2076 2077 if (*saved_mid_in_use_p == NULL) { 2078 // No saved state so start at the beginning. 2079 mid = *list_p; 2080 } else { 2081 // We're restarting after a safepoint so restore the necessary state 2082 // before we resume. 2083 cur_mid_in_use = *saved_mid_in_use_p; 2084 mid = cur_mid_in_use->_next_om; 2085 } 2086 while (mid != NULL) { 2087 // Only try to deflate if there is an associated Java object and if 2088 // mid is old (is not newly allocated and is not newly freed). 2089 if (mid->object() != NULL && mid->is_old() && 2090 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { 2091 // Deflation succeeded and already updated free_head_p and 2092 // free_tail_p as needed. Finish the move to the local free list 2093 // by unlinking mid from the global or per-thread in-use list. 2094 if (mid == *list_p) { 2095 *list_p = mid->_next_om; 2096 } else if (cur_mid_in_use != NULL) { 2097 // Maintain the current in-use list. 2098 cur_mid_in_use->_next_om = mid->_next_om; 2099 } 2100 next = mid->_next_om; 2101 mid->_next_om = NULL; 2102 // At this point mid is disconnected from the in-use list 2103 // and is the current tail in the free_head_p list. 2104 mid = next; 2105 deflated_count++; 2106 *count_p = *count_p - 1; 2107 } else { 2108 // mid is considered in-use if it does not have an associated 2109 // Java object or mid is not old or deflation did not succeed. 2110 // A mid->is_new() node can be seen here when it is freshly 2111 // returned by om_alloc() (and skips the deflation code path). 2112 // A mid->is_old() node can be seen here when deflation failed. 2113 // A mid->is_free() node can be seen here when a fresh node from 2114 // om_alloc() is released by om_release() due to losing the race 2115 // in inflate(). 2116 2117 cur_mid_in_use = mid; 2118 mid = mid->_next_om; 2119 2120 if (SafepointSynchronize::is_synchronizing() && 2121 cur_mid_in_use != *list_p && cur_mid_in_use->is_old()) { 2122 // If a safepoint has started and cur_mid_in_use is not the list 2123 // head and is old, then it is safe to use as saved state. Return 2124 // to the caller so gListLock can be dropped as appropriate 2125 // before blocking. 2126 *saved_mid_in_use_p = cur_mid_in_use; 2127 return deflated_count; 2128 } 2129 } 2130 } 2131 // We finished the list without a safepoint starting so there's 2132 // no need to save state. 2133 *saved_mid_in_use_p = NULL; 2134 return deflated_count; 2135 } 2136 2137 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2138 counters->n_in_use = 0; // currently associated with objects 2139 counters->n_in_circulation = 0; // extant 2140 counters->n_scavenged = 0; // reclaimed (global and per-thread) 2141 counters->per_thread_scavenged = 0; // per-thread scavenge total 2142 counters->per_thread_times = 0.0; // per-thread scavenge times 2143 } 2144 2145 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2146 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2147 2148 if (AsyncDeflateIdleMonitors) { 2149 // Nothing to do when global idle ObjectMonitors are deflated using 2150 // a JavaThread unless a special deflation has been requested. 2151 if (!is_special_deflation_requested()) { 2152 return; 2153 } 2154 } 2155 2156 bool deflated = false; 2157 2158 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2159 ObjectMonitor* free_tail_p = NULL; 2160 elapsedTimer timer; 2161 2162 if (log_is_enabled(Info, monitorinflation)) { 2163 timer.start(); 2164 } 2165 2166 // Prevent om_flush from changing mids in Thread dtor's during deflation 2167 // And in case the vm thread is acquiring a lock during a safepoint 2168 // See e.g. 6320749 2169 Thread::muxAcquire(&gListLock, "deflate_idle_monitors"); 2170 2171 // Note: the thread-local monitors lists get deflated in 2172 // a separate pass. See deflate_thread_local_monitors(). 2173 2174 // For moribund threads, scan g_om_in_use_list 2175 int deflated_count = 0; 2176 if (g_om_in_use_list != NULL) { 2177 // Update n_in_circulation before g_om_in_use_count is updated by deflation. 2178 counters->n_in_circulation += g_om_in_use_count; 2179 deflated_count = deflate_monitor_list((ObjectMonitor**)&g_om_in_use_list, (int*)&g_om_in_use_count, &free_head_p, &free_tail_p); 2180 counters->n_in_use += g_om_in_use_count; 2181 } 2182 2183 if (free_head_p != NULL) { 2184 // Move the deflated ObjectMonitors back to the global free list. 2185 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2186 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om=" 2187 INTPTR_FORMAT, p2i(free_tail_p->_next_om)); 2188 // constant-time list splice - prepend scavenged segment to g_free_list 2189 free_tail_p->_next_om = g_free_list; 2190 g_free_list = free_head_p; 2191 counters->n_scavenged += deflated_count; 2192 } 2193 Thread::muxRelease(&gListLock); 2194 timer.stop(); 2195 2196 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2197 LogStreamHandle(Info, monitorinflation) lsh_info; 2198 LogStream* ls = NULL; 2199 if (log_is_enabled(Debug, monitorinflation)) { 2200 ls = &lsh_debug; 2201 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2202 ls = &lsh_info; 2203 } 2204 if (ls != NULL) { 2205 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2206 } 2207 } 2208 2209 // Deflate global idle ObjectMonitors using a JavaThread. 2210 // 2211 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2212 assert(AsyncDeflateIdleMonitors, "sanity check"); 2213 assert(Thread::current()->is_Java_thread(), "precondition"); 2214 JavaThread* self = JavaThread::current(); 2215 2216 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2217 } 2218 2219 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread. 2220 // 2221 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) { 2222 assert(AsyncDeflateIdleMonitors, "sanity check"); 2223 assert(Thread::current()->is_Java_thread(), "precondition"); 2224 2225 target->om_request_deflation = false; 2226 2227 deflate_common_idle_monitors_using_JT(false /* !is_global */, target); 2228 } 2229 2230 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2231 // 2232 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) { 2233 JavaThread* self = JavaThread::current(); 2234 2235 int deflated_count = 0; 2236 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors 2237 ObjectMonitor* free_tail_p = NULL; 2238 ObjectMonitor* saved_mid_in_use_p = NULL; 2239 elapsedTimer timer; 2240 2241 if (log_is_enabled(Info, monitorinflation)) { 2242 timer.start(); 2243 } 2244 2245 if (is_global) { 2246 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)"); 2247 OM_PERFDATA_OP(MonExtant, set_value(g_om_in_use_count)); 2248 } else { 2249 OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count)); 2250 } 2251 2252 do { 2253 int local_deflated_count; 2254 if (is_global) { 2255 local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor**)&g_om_in_use_list, (int*)&g_om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); 2256 } else { 2257 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); 2258 } 2259 deflated_count += local_deflated_count; 2260 2261 if (free_head_p != NULL) { 2262 // Move the deflated ObjectMonitors to the global free list. 2263 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); 2264 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om=" 2265 INTPTR_FORMAT, p2i(free_tail_p->_next_om)); 2266 2267 if (!is_global) { 2268 Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)"); 2269 } 2270 // Constant-time list splice - prepend scavenged segment to g_free_list. 2271 free_tail_p->_next_om = g_free_list; 2272 g_free_list = free_head_p; 2273 2274 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2275 if (!is_global) { 2276 Thread::muxRelease(&gListLock); 2277 } 2278 } 2279 2280 if (saved_mid_in_use_p != NULL) { 2281 // deflate_monitor_list_using_JT() detected a safepoint starting. 2282 if (is_global) { 2283 Thread::muxRelease(&gListLock); 2284 } 2285 timer.stop(); 2286 { 2287 if (is_global) { 2288 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2289 } else { 2290 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); 2291 } 2292 assert(SafepointSynchronize::is_synchronizing(), "sanity check"); 2293 ThreadBlockInVM blocker(self); 2294 } 2295 // Prepare for another loop after the safepoint. 2296 free_head_p = NULL; 2297 free_tail_p = NULL; 2298 if (log_is_enabled(Info, monitorinflation)) { 2299 timer.start(); 2300 } 2301 if (is_global) { 2302 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(3)"); 2303 } 2304 } 2305 } while (saved_mid_in_use_p != NULL); 2306 if (is_global) { 2307 Thread::muxRelease(&gListLock); 2308 } 2309 timer.stop(); 2310 2311 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2312 LogStreamHandle(Info, monitorinflation) lsh_info; 2313 LogStream* ls = NULL; 2314 if (log_is_enabled(Debug, monitorinflation)) { 2315 ls = &lsh_debug; 2316 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2317 ls = &lsh_info; 2318 } 2319 if (ls != NULL) { 2320 if (is_global) { 2321 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2322 } else { 2323 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count); 2324 } 2325 } 2326 } 2327 2328 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2329 // Report the cumulative time for deflating each thread's idle 2330 // monitors. Note: if the work is split among more than one 2331 // worker thread, then the reported time will likely be more 2332 // than a beginning to end measurement of the phase. 2333 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle 2334 // monitors at a safepoint when a special deflation has been requested. 2335 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 2336 2337 bool needs_special_deflation = is_special_deflation_requested(); 2338 if (!AsyncDeflateIdleMonitors || needs_special_deflation) { 2339 // AsyncDeflateIdleMonitors does not use these counters unless 2340 // there is a special deflation request. 2341 2342 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 2343 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 2344 } 2345 2346 if (log_is_enabled(Debug, monitorinflation)) { 2347 // exit_globals()'s call to audit_and_print_stats() is done 2348 // at the Info level. 2349 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2350 } else if (log_is_enabled(Info, monitorinflation)) { 2351 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors"); 2352 log_info(monitorinflation)("g_om_population=%d, g_om_in_use_count=%d, " 2353 "g_om_free_count=%d", g_om_population, 2354 g_om_in_use_count, g_om_free_count); 2355 Thread::muxRelease(&gListLock); 2356 } 2357 2358 ForceMonitorScavenge = 0; // Reset 2359 GVars.stw_random = os::random(); 2360 GVars.stw_cycle++; 2361 if (needs_special_deflation) { 2362 set_is_special_deflation_requested(false); // special deflation is done 2363 } 2364 } 2365 2366 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2367 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2368 2369 if (AsyncDeflateIdleMonitors) { 2370 if (!is_special_deflation_requested()) { 2371 // Mark the JavaThread for idle monitor deflation if a special 2372 // deflation has NOT been requested. 2373 if (thread->om_in_use_count > 0) { 2374 // This JavaThread is using monitors so mark it. 2375 thread->om_request_deflation = true; 2376 } 2377 return; 2378 } 2379 } 2380 2381 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2382 ObjectMonitor* free_tail_p = NULL; 2383 elapsedTimer timer; 2384 2385 if (log_is_enabled(Info, safepoint, cleanup) || 2386 log_is_enabled(Info, monitorinflation)) { 2387 timer.start(); 2388 } 2389 2390 // Update n_in_circulation before om_in_use_count is updated by deflation. 2391 counters->n_in_circulation += thread->om_in_use_count; 2392 2393 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 2394 counters->n_in_use += thread->om_in_use_count; 2395 2396 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors"); 2397 2398 if (free_head_p != NULL) { 2399 // Move the deflated ObjectMonitors back to the global free list. 2400 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2401 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om=" 2402 INTPTR_FORMAT, p2i(free_tail_p->_next_om)); 2403 2404 // constant-time list splice - prepend scavenged segment to g_free_list 2405 free_tail_p->_next_om = g_free_list; 2406 g_free_list = free_head_p; 2407 counters->n_scavenged += deflated_count; 2408 counters->per_thread_scavenged += deflated_count; 2409 } 2410 2411 timer.stop(); 2412 // Safepoint logging cares about cumulative per_thread_times and 2413 // we'll capture most of the cost, but not the muxRelease() which 2414 // should be cheap. 2415 counters->per_thread_times += timer.seconds(); 2416 2417 Thread::muxRelease(&gListLock); 2418 2419 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2420 LogStreamHandle(Info, monitorinflation) lsh_info; 2421 LogStream* ls = NULL; 2422 if (log_is_enabled(Debug, monitorinflation)) { 2423 ls = &lsh_debug; 2424 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2425 ls = &lsh_info; 2426 } 2427 if (ls != NULL) { 2428 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2429 } 2430 } 2431 2432 // Monitor cleanup on JavaThread::exit 2433 2434 // Iterate through monitor cache and attempt to release thread's monitors 2435 // Gives up on a particular monitor if an exception occurs, but continues 2436 // the overall iteration, swallowing the exception. 2437 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2438 private: 2439 TRAPS; 2440 2441 public: 2442 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2443 void do_monitor(ObjectMonitor* mid) { 2444 if (mid->owner() == THREAD) { 2445 (void)mid->complete_exit(CHECK); 2446 } 2447 } 2448 }; 2449 2450 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2451 // ignored. This is meant to be called during JNI thread detach which assumes 2452 // all remaining monitors are heavyweight. All exceptions are swallowed. 2453 // Scanning the extant monitor list can be time consuming. 2454 // A simple optimization is to add a per-thread flag that indicates a thread 2455 // called jni_monitorenter() during its lifetime. 2456 // 2457 // Instead of No_Savepoint_Verifier it might be cheaper to 2458 // use an idiom of the form: 2459 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2460 // <code that must not run at safepoint> 2461 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2462 // Since the tests are extremely cheap we could leave them enabled 2463 // for normal product builds. 2464 2465 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2466 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2467 NoSafepointVerifier nsv; 2468 ReleaseJavaMonitorsClosure rjmc(THREAD); 2469 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 2470 ObjectSynchronizer::monitors_iterate(&rjmc); 2471 Thread::muxRelease(&gListLock); 2472 THREAD->clear_pending_exception(); 2473 } 2474 2475 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2476 switch (cause) { 2477 case inflate_cause_vm_internal: return "VM Internal"; 2478 case inflate_cause_monitor_enter: return "Monitor Enter"; 2479 case inflate_cause_wait: return "Monitor Wait"; 2480 case inflate_cause_notify: return "Monitor Notify"; 2481 case inflate_cause_hash_code: return "Monitor Hash Code"; 2482 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2483 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2484 default: 2485 ShouldNotReachHere(); 2486 } 2487 return "Unknown"; 2488 } 2489 2490 //------------------------------------------------------------------------------ 2491 // Debugging code 2492 2493 u_char* ObjectSynchronizer::get_gvars_addr() { 2494 return (u_char*)&GVars; 2495 } 2496 2497 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2498 return (u_char*)&GVars.hc_sequence; 2499 } 2500 2501 size_t ObjectSynchronizer::get_gvars_size() { 2502 return sizeof(SharedGlobals); 2503 } 2504 2505 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2506 return (u_char*)&GVars.stw_random; 2507 } 2508 2509 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2510 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2511 2512 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2513 LogStreamHandle(Info, monitorinflation) lsh_info; 2514 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2515 LogStream* ls = NULL; 2516 if (log_is_enabled(Trace, monitorinflation)) { 2517 ls = &lsh_trace; 2518 } else if (log_is_enabled(Debug, monitorinflation)) { 2519 ls = &lsh_debug; 2520 } else if (log_is_enabled(Info, monitorinflation)) { 2521 ls = &lsh_info; 2522 } 2523 assert(ls != NULL, "sanity check"); 2524 2525 if (!on_exit) { 2526 // Not at VM exit so grab the global list lock. 2527 Thread::muxAcquire(&gListLock, "audit_and_print_stats"); 2528 } 2529 2530 // Log counts for the global and per-thread monitor lists: 2531 int chk_om_population = log_monitor_list_counts(ls); 2532 int error_cnt = 0; 2533 2534 ls->print_cr("Checking global lists:"); 2535 2536 // Check g_om_population: 2537 if (g_om_population == chk_om_population) { 2538 ls->print_cr("g_om_population=%d equals chk_om_population=%d", 2539 g_om_population, chk_om_population); 2540 } else { 2541 ls->print_cr("ERROR: g_om_population=%d is not equal to " 2542 "chk_om_population=%d", g_om_population, 2543 chk_om_population); 2544 error_cnt++; 2545 } 2546 2547 // Check g_om_in_use_list and g_om_in_use_count: 2548 chk_global_in_use_list_and_count(ls, &error_cnt); 2549 2550 // Check g_free_list and g_om_free_count: 2551 chk_global_free_list_and_count(ls, &error_cnt); 2552 2553 if (!on_exit) { 2554 Thread::muxRelease(&gListLock); 2555 } 2556 2557 ls->print_cr("Checking per-thread lists:"); 2558 2559 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2560 // Check om_in_use_list and om_in_use_count: 2561 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2562 2563 // Check om_free_list and om_free_count: 2564 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2565 } 2566 2567 if (error_cnt == 0) { 2568 ls->print_cr("No errors found in monitor list checks."); 2569 } else { 2570 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2571 } 2572 2573 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2574 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2575 // When exiting this log output is at the Info level. When called 2576 // at a safepoint, this log output is at the Trace level since 2577 // there can be a lot of it. 2578 log_in_use_monitor_details(ls, on_exit); 2579 } 2580 2581 ls->flush(); 2582 2583 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2584 } 2585 2586 // Check a free monitor entry; log any errors. 2587 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 2588 outputStream * out, int *error_cnt_p) { 2589 stringStream ss; 2590 if (n->is_busy()) { 2591 if (jt != NULL) { 2592 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2593 ": free per-thread monitor must not be busy: %s", p2i(jt), 2594 p2i(n), n->is_busy_to_string(&ss)); 2595 } else { 2596 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2597 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 2598 } 2599 *error_cnt_p = *error_cnt_p + 1; 2600 } 2601 if (n->header().value() != 0) { 2602 if (jt != NULL) { 2603 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2604 ": free per-thread monitor must have NULL _header " 2605 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 2606 n->header().value()); 2607 *error_cnt_p = *error_cnt_p + 1; 2608 } else if (!AsyncDeflateIdleMonitors) { 2609 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2610 "must have NULL _header field: _header=" INTPTR_FORMAT, 2611 p2i(n), n->header().value()); 2612 *error_cnt_p = *error_cnt_p + 1; 2613 } 2614 } 2615 if (n->object() != NULL) { 2616 if (jt != NULL) { 2617 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2618 ": free per-thread monitor must have NULL _object " 2619 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 2620 p2i(n->object())); 2621 } else { 2622 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2623 "must have NULL _object field: _object=" INTPTR_FORMAT, 2624 p2i(n), p2i(n->object())); 2625 } 2626 *error_cnt_p = *error_cnt_p + 1; 2627 } 2628 } 2629 2630 // Check the global free list and count; log the results of the checks. 2631 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 2632 int *error_cnt_p) { 2633 int chk_om_free_count = 0; 2634 for (ObjectMonitor* n = g_free_list; n != NULL; n = n->_next_om) { 2635 chk_free_entry(NULL /* jt */, n, out, error_cnt_p); 2636 chk_om_free_count++; 2637 } 2638 if (g_om_free_count == chk_om_free_count) { 2639 out->print_cr("g_om_free_count=%d equals chk_om_free_count=%d", 2640 g_om_free_count, chk_om_free_count); 2641 } else { 2642 out->print_cr("ERROR: g_om_free_count=%d is not equal to " 2643 "chk_om_free_count=%d", g_om_free_count, 2644 chk_om_free_count); 2645 *error_cnt_p = *error_cnt_p + 1; 2646 } 2647 } 2648 2649 // Check the global in-use list and count; log the results of the checks. 2650 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 2651 int *error_cnt_p) { 2652 int chk_om_in_use_count = 0; 2653 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) { 2654 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p); 2655 chk_om_in_use_count++; 2656 } 2657 if (g_om_in_use_count == chk_om_in_use_count) { 2658 out->print_cr("g_om_in_use_count=%d equals chk_om_in_use_count=%d", g_om_in_use_count, 2659 chk_om_in_use_count); 2660 } else { 2661 out->print_cr("ERROR: g_om_in_use_count=%d is not equal to chk_om_in_use_count=%d", 2662 g_om_in_use_count, chk_om_in_use_count); 2663 *error_cnt_p = *error_cnt_p + 1; 2664 } 2665 } 2666 2667 // Check an in-use monitor entry; log any errors. 2668 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 2669 outputStream * out, int *error_cnt_p) { 2670 if (n->header().value() == 0) { 2671 if (jt != NULL) { 2672 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2673 ": in-use per-thread monitor must have non-NULL _header " 2674 "field.", p2i(jt), p2i(n)); 2675 } else { 2676 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2677 "must have non-NULL _header field.", p2i(n)); 2678 } 2679 *error_cnt_p = *error_cnt_p + 1; 2680 } 2681 if (n->object() == NULL) { 2682 if (jt != NULL) { 2683 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2684 ": in-use per-thread monitor must have non-NULL _object " 2685 "field.", p2i(jt), p2i(n)); 2686 } else { 2687 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2688 "must have non-NULL _object field.", p2i(n)); 2689 } 2690 *error_cnt_p = *error_cnt_p + 1; 2691 } 2692 const oop obj = (oop)n->object(); 2693 const markWord mark = obj->mark(); 2694 if (!mark.has_monitor()) { 2695 if (jt != NULL) { 2696 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2697 ": in-use per-thread monitor's object does not think " 2698 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 2699 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 2700 } else { 2701 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2702 "monitor's object does not think it has a monitor: obj=" 2703 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 2704 p2i(obj), mark.value()); 2705 } 2706 *error_cnt_p = *error_cnt_p + 1; 2707 } 2708 ObjectMonitor* const obj_mon = mark.monitor(); 2709 if (n != obj_mon) { 2710 if (jt != NULL) { 2711 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2712 ": in-use per-thread monitor's object does not refer " 2713 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 2714 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 2715 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 2716 } else { 2717 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2718 "monitor's object does not refer to the same monitor: obj=" 2719 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 2720 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 2721 } 2722 *error_cnt_p = *error_cnt_p + 1; 2723 } 2724 } 2725 2726 // Check the thread's free list and count; log the results of the checks. 2727 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 2728 outputStream * out, 2729 int *error_cnt_p) { 2730 int chk_om_free_count = 0; 2731 for (ObjectMonitor* n = jt->om_free_list; n != NULL; n = n->_next_om) { 2732 chk_free_entry(jt, n, out, error_cnt_p); 2733 chk_om_free_count++; 2734 } 2735 if (jt->om_free_count == chk_om_free_count) { 2736 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 2737 "chk_om_free_count=%d", p2i(jt), jt->om_free_count, chk_om_free_count); 2738 } else { 2739 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 2740 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count, 2741 chk_om_free_count); 2742 *error_cnt_p = *error_cnt_p + 1; 2743 } 2744 } 2745 2746 // Check the thread's in-use list and count; log the results of the checks. 2747 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 2748 outputStream * out, 2749 int *error_cnt_p) { 2750 int chk_om_in_use_count = 0; 2751 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) { 2752 chk_in_use_entry(jt, n, out, error_cnt_p); 2753 chk_om_in_use_count++; 2754 } 2755 if (jt->om_in_use_count == chk_om_in_use_count) { 2756 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 2757 "chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count, 2758 chk_om_in_use_count); 2759 } else { 2760 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 2761 "equal to chk_om_in_use_count=%d", p2i(jt), jt->om_in_use_count, 2762 chk_om_in_use_count); 2763 *error_cnt_p = *error_cnt_p + 1; 2764 } 2765 } 2766 2767 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 2768 // flags indicate why the entry is in-use, 'object' and 'object type' 2769 // indicate the associated object and its type. 2770 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out, 2771 bool on_exit) { 2772 if (!on_exit) { 2773 // Not at VM exit so grab the global list lock. 2774 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details"); 2775 } 2776 2777 stringStream ss; 2778 if (g_om_in_use_count > 0) { 2779 out->print_cr("In-use global monitor info:"); 2780 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2781 out->print_cr("%18s %s %7s %18s %18s", 2782 "monitor", "BHL", "ref_cnt", "object", "object type"); 2783 out->print_cr("================== === ======= ================== =================="); 2784 for (ObjectMonitor* n = g_om_in_use_list; n != NULL; n = n->_next_om) { 2785 const oop obj = (oop) n->object(); 2786 const markWord mark = n->header(); 2787 ResourceMark rm; 2788 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", 2789 p2i(n), n->is_busy() != 0, mark.hash() != 0, 2790 n->owner() != NULL, (int)n->ref_count(), p2i(obj), 2791 obj->klass()->external_name()); 2792 if (n->is_busy() != 0) { 2793 out->print(" (%s)", n->is_busy_to_string(&ss)); 2794 ss.reset(); 2795 } 2796 out->cr(); 2797 } 2798 } 2799 2800 if (!on_exit) { 2801 Thread::muxRelease(&gListLock); 2802 } 2803 2804 out->print_cr("In-use per-thread monitor info:"); 2805 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2806 out->print_cr("%18s %18s %s %7s %18s %18s", 2807 "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); 2808 out->print_cr("================== ================== === ======= ================== =================="); 2809 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2810 for (ObjectMonitor* n = jt->om_in_use_list; n != NULL; n = n->_next_om) { 2811 const oop obj = (oop) n->object(); 2812 const markWord mark = n->header(); 2813 ResourceMark rm; 2814 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " 2815 INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0, 2816 mark.hash() != 0, n->owner() != NULL, (int)n->ref_count(), 2817 p2i(obj), obj->klass()->external_name()); 2818 if (n->is_busy() != 0) { 2819 out->print(" (%s)", n->is_busy_to_string(&ss)); 2820 ss.reset(); 2821 } 2822 out->cr(); 2823 } 2824 } 2825 2826 out->flush(); 2827 } 2828 2829 // Log counts for the global and per-thread monitor lists and return 2830 // the population count. 2831 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 2832 int pop_count = 0; 2833 out->print_cr("%18s %10s %10s %10s", 2834 "Global Lists:", "InUse", "Free", "Total"); 2835 out->print_cr("================== ========== ========== =========="); 2836 out->print_cr("%18s %10d %10d %10d", "", 2837 g_om_in_use_count, g_om_free_count, g_om_population); 2838 pop_count += g_om_in_use_count + g_om_free_count; 2839 2840 out->print_cr("%18s %10s %10s %10s", 2841 "Per-Thread Lists:", "InUse", "Free", "Provision"); 2842 out->print_cr("================== ========== ========== =========="); 2843 2844 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2845 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 2846 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision); 2847 pop_count += jt->om_in_use_count + jt->om_free_count; 2848 } 2849 return pop_count; 2850 } 2851 2852 #ifndef PRODUCT 2853 2854 // Check if monitor belongs to the monitor cache 2855 // The list is grow-only so it's *relatively* safe to traverse 2856 // the list of extant blocks without taking a lock. 2857 2858 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 2859 PaddedObjectMonitor* block = OrderAccess::load_acquire(&g_block_list); 2860 while (block != NULL) { 2861 assert(block->object() == CHAINMARKER, "must be a block header"); 2862 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 2863 address mon = (address)monitor; 2864 address blk = (address)block; 2865 size_t diff = mon - blk; 2866 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 2867 return 1; 2868 } 2869 block = (PaddedObjectMonitor*)block->_next_om; 2870 } 2871 return 0; 2872 } 2873 2874 #endif