1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/markOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/interfaceSupport.inline.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "runtime/objectMonitor.hpp" 42 #include "runtime/objectMonitor.inline.hpp" 43 #include "runtime/osThread.hpp" 44 #include "runtime/safepointVerifiers.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/synchronizer.hpp" 48 #include "runtime/thread.inline.hpp" 49 #include "runtime/timer.hpp" 50 #include "runtime/vframe.hpp" 51 #include "runtime/vmThread.hpp" 52 #include "utilities/align.hpp" 53 #include "utilities/dtrace.hpp" 54 #include "utilities/events.hpp" 55 #include "utilities/preserveException.hpp" 56 57 // The "core" versions of monitor enter and exit reside in this file. 58 // The interpreter and compilers contain specialized transliterated 59 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 60 // for instance. If you make changes here, make sure to modify the 61 // interpreter, and both C1 and C2 fast-path inline locking code emission. 62 // 63 // ----------------------------------------------------------------------------- 64 65 #ifdef DTRACE_ENABLED 66 67 // Only bother with this argument setup if dtrace is available 68 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 69 70 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 71 char* bytes = NULL; \ 72 int len = 0; \ 73 jlong jtid = SharedRuntime::get_java_tid(thread); \ 74 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 75 if (klassname != NULL) { \ 76 bytes = (char*)klassname->bytes(); \ 77 len = klassname->utf8_length(); \ 78 } 79 80 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 81 { \ 82 if (DTraceMonitorProbes) { \ 83 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 84 HOTSPOT_MONITOR_WAIT(jtid, \ 85 (uintptr_t)(monitor), bytes, len, (millis)); \ 86 } \ 87 } 88 89 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 90 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 91 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 92 93 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 94 { \ 95 if (DTraceMonitorProbes) { \ 96 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 97 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 98 (uintptr_t)(monitor), bytes, len); \ 99 } \ 100 } 101 102 #else // ndef DTRACE_ENABLED 103 104 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 105 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 106 107 #endif // ndef DTRACE_ENABLED 108 109 // This exists only as a workaround of dtrace bug 6254741 110 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 111 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 112 return 0; 113 } 114 115 #define NINFLATIONLOCKS 256 116 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 117 118 // global list of blocks of monitors 119 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL; 120 // global monitor free list 121 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 122 // global monitor in-use list, for moribund threads, 123 // monitors they inflated need to be scanned for deflation 124 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 125 // count of entries in gOmInUseList 126 int ObjectSynchronizer::gOmInUseCount = 0; 127 bool ObjectSynchronizer::_gOmShouldDeflateIdleMonitors = false; 128 bool volatile ObjectSynchronizer::_is_cleanup_requested = false; 129 130 static volatile intptr_t gListLock = 0; // protects global monitor lists 131 static volatile int gMonitorFreeCount = 0; // # on gFreeList 132 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 133 134 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 135 136 137 // =====================> Quick functions 138 139 // The quick_* forms are special fast-path variants used to improve 140 // performance. In the simplest case, a "quick_*" implementation could 141 // simply return false, in which case the caller will perform the necessary 142 // state transitions and call the slow-path form. 143 // The fast-path is designed to handle frequently arising cases in an efficient 144 // manner and is just a degenerate "optimistic" variant of the slow-path. 145 // returns true -- to indicate the call was satisfied. 146 // returns false -- to indicate the call needs the services of the slow-path. 147 // A no-loitering ordinance is in effect for code in the quick_* family 148 // operators: safepoints or indefinite blocking (blocking that might span a 149 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 150 // entry. 151 // 152 // Consider: An interesting optimization is to have the JIT recognize the 153 // following common idiom: 154 // synchronized (someobj) { .... ; notify(); } 155 // That is, we find a notify() or notifyAll() call that immediately precedes 156 // the monitorexit operation. In that case the JIT could fuse the operations 157 // into a single notifyAndExit() runtime primitive. 158 159 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 161 assert(self->is_Java_thread(), "invariant"); 162 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 163 NoSafepointVerifier nsv; 164 if (obj == NULL) return false; // slow-path for invalid obj 165 const markOop mark = obj->mark(); 166 167 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 168 // Degenerate notify 169 // stack-locked by caller so by definition the implied waitset is empty. 170 return true; 171 } 172 173 if (mark->has_monitor()) { 174 ObjectMonitor * const mon = mark->monitor(); 175 assert(oopDesc::equals((oop) mon->object(), obj), "invariant"); 176 if (mon->owner() != self) return false; // slow-path for IMS exception 177 178 if (mon->first_waiter() != NULL) { 179 // We have one or more waiters. Since this is an inflated monitor 180 // that we own, we can transfer one or more threads from the waitset 181 // to the entrylist here and now, avoiding the slow-path. 182 if (all) { 183 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 184 } else { 185 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 186 } 187 int tally = 0; 188 do { 189 mon->INotify(self); 190 ++tally; 191 } while (mon->first_waiter() != NULL && all); 192 OM_PERFDATA_OP(Notifications, inc(tally)); 193 } 194 return true; 195 } 196 197 // biased locking and any other IMS exception states take the slow-path 198 return false; 199 } 200 201 202 // The LockNode emitted directly at the synchronization site would have 203 // been too big if it were to have included support for the cases of inflated 204 // recursive enter and exit, so they go here instead. 205 // Note that we can't safely call AsyncPrintJavaStack() from within 206 // quick_enter() as our thread state remains _in_Java. 207 208 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 209 BasicLock * lock) { 210 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 211 assert(Self->is_Java_thread(), "invariant"); 212 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 213 NoSafepointVerifier nsv; 214 if (obj == NULL) return false; // Need to throw NPE 215 216 while (true) { 217 const markOop mark = obj->mark(); 218 219 if (mark->has_monitor()) { 220 ObjectMonitorHandle omh; 221 if (!omh.save_om_ptr(obj, mark)) { 222 // Lost a race with async deflation so try again. 223 assert(AsyncDeflateIdleMonitors, "sanity check"); 224 continue; 225 } 226 ObjectMonitor * const m = omh.om_ptr(); 227 assert(oopDesc::equals((oop) m->object(), obj), "invariant"); 228 Thread * const owner = (Thread *) m->_owner; 229 230 // Lock contention and Transactional Lock Elision (TLE) diagnostics 231 // and observability 232 // Case: light contention possibly amenable to TLE 233 // Case: TLE inimical operations such as nested/recursive synchronization 234 235 if (owner == Self) { 236 m->_recursions++; 237 return true; 238 } 239 240 // This Java Monitor is inflated so obj's header will never be 241 // displaced to this thread's BasicLock. Make the displaced header 242 // non-NULL so this BasicLock is not seen as recursive nor as 243 // being locked. We do this unconditionally so that this thread's 244 // BasicLock cannot be mis-interpreted by any stack walkers. For 245 // performance reasons, stack walkers generally first check for 246 // Biased Locking in the object's header, the second check is for 247 // stack-locking in the object's header, the third check is for 248 // recursive stack-locking in the displaced header in the BasicLock, 249 // and last are the inflated Java Monitor (ObjectMonitor) checks. 250 lock->set_displaced_header(markOopDesc::unused_mark()); 251 252 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { 253 assert(m->_recursions == 0, "invariant"); 254 assert(m->_owner == Self, "invariant"); 255 return true; 256 } 257 } 258 break; 259 } 260 261 // Note that we could inflate in quick_enter. 262 // This is likely a useful optimization 263 // Critically, in quick_enter() we must not: 264 // -- perform bias revocation, or 265 // -- block indefinitely, or 266 // -- reach a safepoint 267 268 return false; // revert to slow-path 269 } 270 271 // ----------------------------------------------------------------------------- 272 // Fast Monitor Enter/Exit 273 // This the fast monitor enter. The interpreter and compiler use 274 // some assembly copies of this code. Make sure update those code 275 // if the following function is changed. The implementation is 276 // extremely sensitive to race condition. Be careful. 277 278 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 279 bool attempt_rebias, TRAPS) { 280 if (UseBiasedLocking) { 281 if (!SafepointSynchronize::is_at_safepoint()) { 282 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 283 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 284 return; 285 } 286 } else { 287 assert(!attempt_rebias, "can not rebias toward VM thread"); 288 BiasedLocking::revoke_at_safepoint(obj); 289 } 290 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 291 } 292 293 slow_enter(obj, lock, THREAD); 294 } 295 296 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 297 markOop mark = object->mark(); 298 // We cannot check for Biased Locking if we are racing an inflation. 299 assert(mark == markOopDesc::INFLATING() || 300 !mark->has_bias_pattern(), "should not see bias pattern here"); 301 302 markOop dhw = lock->displaced_header(); 303 if (dhw == NULL) { 304 // If the displaced header is NULL, then this exit matches up with 305 // a recursive enter. No real work to do here except for diagnostics. 306 #ifndef PRODUCT 307 if (mark != markOopDesc::INFLATING()) { 308 // Only do diagnostics if we are not racing an inflation. Simply 309 // exiting a recursive enter of a Java Monitor that is being 310 // inflated is safe; see the has_monitor() comment below. 311 assert(!mark->is_neutral(), "invariant"); 312 assert(!mark->has_locker() || 313 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 314 if (mark->has_monitor()) { 315 // The BasicLock's displaced_header is marked as a recursive 316 // enter and we have an inflated Java Monitor (ObjectMonitor). 317 // This is a special case where the Java Monitor was inflated 318 // after this thread entered the stack-lock recursively. When a 319 // Java Monitor is inflated, we cannot safely walk the Java 320 // Monitor owner's stack and update the BasicLocks because a 321 // Java Monitor can be asynchronously inflated by a thread that 322 // does not own the Java Monitor. 323 ObjectMonitor * m = mark->monitor(); 324 assert(((oop)(m->object()))->mark() == mark, "invariant"); 325 assert(m->is_entered(THREAD), "invariant"); 326 } 327 } 328 #endif 329 return; 330 } 331 332 if (mark == (markOop) lock) { 333 // If the object is stack-locked by the current thread, try to 334 // swing the displaced header from the BasicLock back to the mark. 335 assert(dhw->is_neutral(), "invariant"); 336 if (object->cas_set_mark(dhw, mark) == mark) { 337 return; 338 } 339 } 340 341 // We have to take the slow-path of possible inflation and then exit. 342 ObjectMonitorHandle omh; 343 inflate(&omh, THREAD, object, inflate_cause_vm_internal); 344 omh.om_ptr()->exit(true, THREAD); 345 } 346 347 // ----------------------------------------------------------------------------- 348 // Interpreter/Compiler Slow Case 349 // This routine is used to handle interpreter/compiler slow case 350 // We don't need to use fast path here, because it must have been 351 // failed in the interpreter/compiler code. 352 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 353 bool do_loop = true; 354 while (do_loop) { 355 markOop mark = obj->mark(); 356 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 357 358 if (mark->is_neutral()) { 359 // Anticipate successful CAS -- the ST of the displaced mark must 360 // be visible <= the ST performed by the CAS. 361 lock->set_displaced_header(mark); 362 if (mark == obj()->cas_set_mark((markOop) lock, mark)) { 363 return; 364 } 365 // Fall through to inflate() ... 366 } else if (mark->has_locker() && 367 THREAD->is_lock_owned((address)mark->locker())) { 368 assert(lock != mark->locker(), "must not re-lock the same lock"); 369 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 370 lock->set_displaced_header(NULL); 371 return; 372 } 373 374 // The object header will never be displaced to this lock, 375 // so it does not matter what the value is, except that it 376 // must be non-zero to avoid looking like a re-entrant lock, 377 // and must not look locked either. 378 lock->set_displaced_header(markOopDesc::unused_mark()); 379 ObjectMonitorHandle omh; 380 inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); 381 do_loop = !omh.om_ptr()->enter(THREAD); 382 } 383 } 384 385 // This routine is used to handle interpreter/compiler slow case 386 // We don't need to use fast path here, because it must have 387 // failed in the interpreter/compiler code. Simply use the heavy 388 // weight monitor should be ok, unless someone find otherwise. 389 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 390 fast_exit(object, lock, THREAD); 391 } 392 393 // ----------------------------------------------------------------------------- 394 // Class Loader support to workaround deadlocks on the class loader lock objects 395 // Also used by GC 396 // complete_exit()/reenter() are used to wait on a nested lock 397 // i.e. to give up an outer lock completely and then re-enter 398 // Used when holding nested locks - lock acquisition order: lock1 then lock2 399 // 1) complete_exit lock1 - saving recursion count 400 // 2) wait on lock2 401 // 3) when notified on lock2, unlock lock2 402 // 4) reenter lock1 with original recursion count 403 // 5) lock lock2 404 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 405 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 406 if (UseBiasedLocking) { 407 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 408 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 409 } 410 411 ObjectMonitorHandle omh; 412 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 413 intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD); 414 return ret_code; 415 } 416 417 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 418 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 419 if (UseBiasedLocking) { 420 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 421 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 422 } 423 424 bool do_loop = true; 425 while (do_loop) { 426 ObjectMonitorHandle omh; 427 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 428 do_loop = !omh.om_ptr()->reenter(recursion, THREAD); 429 } 430 } 431 // ----------------------------------------------------------------------------- 432 // JNI locks on java objects 433 // NOTE: must use heavy weight monitor to handle jni monitor enter 434 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 435 // the current locking is from JNI instead of Java code 436 if (UseBiasedLocking) { 437 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 438 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 439 } 440 THREAD->set_current_pending_monitor_is_from_java(false); 441 bool do_loop = true; 442 while (do_loop) { 443 ObjectMonitorHandle omh; 444 inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); 445 do_loop = !omh.om_ptr()->enter(THREAD); 446 } 447 THREAD->set_current_pending_monitor_is_from_java(true); 448 } 449 450 // NOTE: must use heavy weight monitor to handle jni monitor exit 451 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 452 if (UseBiasedLocking) { 453 Handle h_obj(THREAD, obj); 454 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 455 obj = h_obj(); 456 } 457 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 458 459 ObjectMonitorHandle omh; 460 inflate(&omh, THREAD, obj, inflate_cause_jni_exit); 461 ObjectMonitor * monitor = omh.om_ptr(); 462 // If this thread has locked the object, exit the monitor. Note: can't use 463 // monitor->check(CHECK); must exit even if an exception is pending. 464 if (monitor->check(THREAD)) { 465 monitor->exit(true, THREAD); 466 } 467 } 468 469 // ----------------------------------------------------------------------------- 470 // Internal VM locks on java objects 471 // standard constructor, allows locking failures 472 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 473 _dolock = doLock; 474 _thread = thread; 475 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 476 _obj = obj; 477 478 if (_dolock) { 479 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 480 } 481 } 482 483 ObjectLocker::~ObjectLocker() { 484 if (_dolock) { 485 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 486 } 487 } 488 489 490 // ----------------------------------------------------------------------------- 491 // Wait/Notify/NotifyAll 492 // NOTE: must use heavy weight monitor to handle wait() 493 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 494 if (UseBiasedLocking) { 495 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 496 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 497 } 498 if (millis < 0) { 499 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 500 } 501 ObjectMonitorHandle omh; 502 inflate(&omh, THREAD, obj(), inflate_cause_wait); 503 ObjectMonitor * monitor = omh.om_ptr(); 504 505 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 506 monitor->wait(millis, true, THREAD); 507 508 // This dummy call is in place to get around dtrace bug 6254741. Once 509 // that's fixed we can uncomment the following line, remove the call 510 // and change this function back into a "void" func. 511 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 512 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 513 return ret_code; 514 } 515 516 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 517 if (UseBiasedLocking) { 518 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 519 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 520 } 521 if (millis < 0) { 522 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 523 } 524 ObjectMonitorHandle omh; 525 inflate(&omh, THREAD, obj(), inflate_cause_wait); 526 omh.om_ptr()->wait(millis, false, THREAD); 527 } 528 529 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 530 if (UseBiasedLocking) { 531 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 532 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 533 } 534 535 markOop mark = obj->mark(); 536 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 537 return; 538 } 539 ObjectMonitorHandle omh; 540 inflate(&omh, THREAD, obj(), inflate_cause_notify); 541 omh.om_ptr()->notify(THREAD); 542 } 543 544 // NOTE: see comment of notify() 545 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 546 if (UseBiasedLocking) { 547 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 548 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 549 } 550 551 markOop mark = obj->mark(); 552 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 553 return; 554 } 555 ObjectMonitorHandle omh; 556 inflate(&omh, THREAD, obj(), inflate_cause_notify); 557 omh.om_ptr()->notifyAll(THREAD); 558 } 559 560 // ----------------------------------------------------------------------------- 561 // Hash Code handling 562 // 563 // Performance concern: 564 // OrderAccess::storestore() calls release() which at one time stored 0 565 // into the global volatile OrderAccess::dummy variable. This store was 566 // unnecessary for correctness. Many threads storing into a common location 567 // causes considerable cache migration or "sloshing" on large SMP systems. 568 // As such, I avoided using OrderAccess::storestore(). In some cases 569 // OrderAccess::fence() -- which incurs local latency on the executing 570 // processor -- is a better choice as it scales on SMP systems. 571 // 572 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 573 // a discussion of coherency costs. Note that all our current reference 574 // platforms provide strong ST-ST order, so the issue is moot on IA32, 575 // x64, and SPARC. 576 // 577 // As a general policy we use "volatile" to control compiler-based reordering 578 // and explicit fences (barriers) to control for architectural reordering 579 // performed by the CPU(s) or platform. 580 581 struct SharedGlobals { 582 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 583 // These are highly shared mostly-read variables. 584 // To avoid false-sharing they need to be the sole occupants of a cache line. 585 volatile int stwRandom; 586 volatile int stwCycle; 587 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 588 // Hot RW variable -- Sequester to avoid false-sharing 589 volatile int hcSequence; 590 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 591 }; 592 593 static SharedGlobals GVars; 594 static int MonitorScavengeThreshold = 1000000; 595 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 596 597 static markOop ReadStableMark(oop obj) { 598 markOop mark = obj->mark(); 599 if (!mark->is_being_inflated()) { 600 return mark; // normal fast-path return 601 } 602 603 int its = 0; 604 for (;;) { 605 markOop mark = obj->mark(); 606 if (!mark->is_being_inflated()) { 607 return mark; // normal fast-path return 608 } 609 610 // The object is being inflated by some other thread. 611 // The caller of ReadStableMark() must wait for inflation to complete. 612 // Avoid live-lock 613 // TODO: consider calling SafepointSynchronize::do_call_back() while 614 // spinning to see if there's a safepoint pending. If so, immediately 615 // yielding or blocking would be appropriate. Avoid spinning while 616 // there is a safepoint pending. 617 // TODO: add inflation contention performance counters. 618 // TODO: restrict the aggregate number of spinners. 619 620 ++its; 621 if (its > 10000 || !os::is_MP()) { 622 if (its & 1) { 623 os::naked_yield(); 624 } else { 625 // Note that the following code attenuates the livelock problem but is not 626 // a complete remedy. A more complete solution would require that the inflating 627 // thread hold the associated inflation lock. The following code simply restricts 628 // the number of spinners to at most one. We'll have N-2 threads blocked 629 // on the inflationlock, 1 thread holding the inflation lock and using 630 // a yield/park strategy, and 1 thread in the midst of inflation. 631 // A more refined approach would be to change the encoding of INFLATING 632 // to allow encapsulation of a native thread pointer. Threads waiting for 633 // inflation to complete would use CAS to push themselves onto a singly linked 634 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 635 // and calling park(). When inflation was complete the thread that accomplished inflation 636 // would detach the list and set the markword to inflated with a single CAS and 637 // then for each thread on the list, set the flag and unpark() the thread. 638 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 639 // wakes at most one thread whereas we need to wake the entire list. 640 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 641 int YieldThenBlock = 0; 642 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 643 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 644 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 645 while (obj->mark() == markOopDesc::INFLATING()) { 646 // Beware: NakedYield() is advisory and has almost no effect on some platforms 647 // so we periodically call Self->_ParkEvent->park(1). 648 // We use a mixed spin/yield/block mechanism. 649 if ((YieldThenBlock++) >= 16) { 650 Thread::current()->_ParkEvent->park(1); 651 } else { 652 os::naked_yield(); 653 } 654 } 655 Thread::muxRelease(gInflationLocks + ix); 656 } 657 } else { 658 SpinPause(); // SMP-polite spinning 659 } 660 } 661 } 662 663 // hashCode() generation : 664 // 665 // Possibilities: 666 // * MD5Digest of {obj,stwRandom} 667 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 668 // * A DES- or AES-style SBox[] mechanism 669 // * One of the Phi-based schemes, such as: 670 // 2654435761 = 2^32 * Phi (golden ratio) 671 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 672 // * A variation of Marsaglia's shift-xor RNG scheme. 673 // * (obj ^ stwRandom) is appealing, but can result 674 // in undesirable regularity in the hashCode values of adjacent objects 675 // (objects allocated back-to-back, in particular). This could potentially 676 // result in hashtable collisions and reduced hashtable efficiency. 677 // There are simple ways to "diffuse" the middle address bits over the 678 // generated hashCode values: 679 680 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 681 intptr_t value = 0; 682 if (hashCode == 0) { 683 // This form uses global Park-Miller RNG. 684 // On MP system we'll have lots of RW access to a global, so the 685 // mechanism induces lots of coherency traffic. 686 value = os::random(); 687 } else if (hashCode == 1) { 688 // This variation has the property of being stable (idempotent) 689 // between STW operations. This can be useful in some of the 1-0 690 // synchronization schemes. 691 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 692 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 693 } else if (hashCode == 2) { 694 value = 1; // for sensitivity testing 695 } else if (hashCode == 3) { 696 value = ++GVars.hcSequence; 697 } else if (hashCode == 4) { 698 value = cast_from_oop<intptr_t>(obj); 699 } else { 700 // Marsaglia's xor-shift scheme with thread-specific state 701 // This is probably the best overall implementation -- we'll 702 // likely make this the default in future releases. 703 unsigned t = Self->_hashStateX; 704 t ^= (t << 11); 705 Self->_hashStateX = Self->_hashStateY; 706 Self->_hashStateY = Self->_hashStateZ; 707 Self->_hashStateZ = Self->_hashStateW; 708 unsigned v = Self->_hashStateW; 709 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 710 Self->_hashStateW = v; 711 value = v; 712 } 713 714 value &= markOopDesc::hash_mask; 715 if (value == 0) value = 0xBAD; 716 assert(value != markOopDesc::no_hash, "invariant"); 717 return value; 718 } 719 720 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 721 if (UseBiasedLocking) { 722 // NOTE: many places throughout the JVM do not expect a safepoint 723 // to be taken here, in particular most operations on perm gen 724 // objects. However, we only ever bias Java instances and all of 725 // the call sites of identity_hash that might revoke biases have 726 // been checked to make sure they can handle a safepoint. The 727 // added check of the bias pattern is to avoid useless calls to 728 // thread-local storage. 729 if (obj->mark()->has_bias_pattern()) { 730 // Handle for oop obj in case of STW safepoint 731 Handle hobj(Self, obj); 732 // Relaxing assertion for bug 6320749. 733 assert(Universe::verify_in_progress() || 734 !SafepointSynchronize::is_at_safepoint(), 735 "biases should not be seen by VM thread here"); 736 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 737 obj = hobj(); 738 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 739 } 740 } 741 742 // hashCode() is a heap mutator ... 743 // Relaxing assertion for bug 6320749. 744 assert(Universe::verify_in_progress() || DumpSharedSpaces || 745 !SafepointSynchronize::is_at_safepoint(), "invariant"); 746 assert(Universe::verify_in_progress() || DumpSharedSpaces || 747 Self->is_Java_thread() , "invariant"); 748 assert(Universe::verify_in_progress() || DumpSharedSpaces || 749 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 750 751 while (true) { 752 ObjectMonitor* monitor = NULL; 753 markOop temp, test; 754 intptr_t hash; 755 markOop mark = ReadStableMark(obj); 756 757 // object should remain ineligible for biased locking 758 assert(!mark->has_bias_pattern(), "invariant"); 759 760 if (mark->is_neutral()) { 761 hash = mark->hash(); // this is a normal header 762 if (hash != 0) { // if it has hash, just return it 763 return hash; 764 } 765 hash = get_next_hash(Self, obj); // allocate a new hash code 766 temp = mark->copy_set_hash(hash); // merge the hash code into header 767 // use (machine word version) atomic operation to install the hash 768 test = obj->cas_set_mark(temp, mark); 769 if (test == mark) { 770 return hash; 771 } 772 // If atomic operation failed, we must inflate the header 773 // into heavy weight monitor. We could add more code here 774 // for fast path, but it does not worth the complexity. 775 } else if (mark->has_monitor()) { 776 ObjectMonitorHandle omh; 777 if (!omh.save_om_ptr(obj, mark)) { 778 // Lost a race with async deflation so try again. 779 assert(AsyncDeflateIdleMonitors, "sanity check"); 780 continue; 781 } 782 monitor = omh.om_ptr(); 783 temp = monitor->header(); 784 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); 785 hash = temp->hash(); 786 if (hash != 0) { 787 return hash; 788 } 789 // Skip to the following code to reduce code size 790 } else if (Self->is_lock_owned((address)mark->locker())) { 791 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 792 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); 793 hash = temp->hash(); // by current thread, check if the displaced 794 if (hash != 0) { // header contains hash code 795 return hash; 796 } 797 // WARNING: 798 // The displaced header in the BasicLock on a thread's stack 799 // is strictly immutable. It CANNOT be changed in ANY cases. 800 // So we have to inflate the stack lock into an ObjectMonitor 801 // even if the current thread owns the lock. The BasicLock on 802 // a thread's stack can be asynchronously read by other threads 803 // during an inflate() call so any change to that stack memory 804 // may not propagate to other threads correctly. 805 } 806 807 // Inflate the monitor to set hash code 808 ObjectMonitorHandle omh; 809 inflate(&omh, Self, obj, inflate_cause_hash_code); 810 monitor = omh.om_ptr(); 811 // Load displaced header and check it has hash code 812 mark = monitor->header(); 813 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); 814 hash = mark->hash(); 815 if (hash == 0) { 816 hash = get_next_hash(Self, obj); 817 temp = mark->copy_set_hash(hash); // merge hash code into header 818 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(temp)); 819 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); 820 if (test != mark) { 821 // The only non-deflation update to the ObjectMonitor's 822 // header/dmw field is to merge in the hash code. If someone 823 // adds a new usage of the header/dmw field, please update 824 // this code. 825 // ObjectMonitor::install_displaced_markword_in_object() 826 // does mark the header/dmw field as part of async deflation, 827 // but that protocol cannot happen now due to the 828 // ObjectMonitorHandle above. 829 hash = test->hash(); 830 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(test)); 831 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 832 } 833 } 834 // We finally get the hash 835 return hash; 836 } 837 } 838 839 // Deprecated -- use FastHashCode() instead. 840 841 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 842 return FastHashCode(Thread::current(), obj()); 843 } 844 845 846 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 847 Handle h_obj) { 848 if (UseBiasedLocking) { 849 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 850 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 851 } 852 853 assert(thread == JavaThread::current(), "Can only be called on current thread"); 854 oop obj = h_obj(); 855 856 while (true) { 857 markOop mark = ReadStableMark(obj); 858 859 // Uncontended case, header points to stack 860 if (mark->has_locker()) { 861 return thread->is_lock_owned((address)mark->locker()); 862 } 863 // Contended case, header points to ObjectMonitor (tagged pointer) 864 if (mark->has_monitor()) { 865 ObjectMonitorHandle omh; 866 if (!omh.save_om_ptr(obj, mark)) { 867 // Lost a race with async deflation so try again. 868 assert(AsyncDeflateIdleMonitors, "sanity check"); 869 continue; 870 } 871 bool ret_code = omh.om_ptr()->is_entered(thread) != 0; 872 return ret_code; 873 } 874 // Unlocked case, header in place 875 assert(mark->is_neutral(), "sanity check"); 876 return false; 877 } 878 } 879 880 // Be aware of this method could revoke bias of the lock object. 881 // This method queries the ownership of the lock handle specified by 'h_obj'. 882 // If the current thread owns the lock, it returns owner_self. If no 883 // thread owns the lock, it returns owner_none. Otherwise, it will return 884 // owner_other. 885 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 886 (JavaThread *self, Handle h_obj) { 887 // The caller must beware this method can revoke bias, and 888 // revocation can result in a safepoint. 889 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 890 assert(self->thread_state() != _thread_blocked, "invariant"); 891 892 // Possible mark states: neutral, biased, stack-locked, inflated 893 894 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 895 // CASE: biased 896 BiasedLocking::revoke_and_rebias(h_obj, false, self); 897 assert(!h_obj->mark()->has_bias_pattern(), 898 "biases should be revoked by now"); 899 } 900 901 assert(self == JavaThread::current(), "Can only be called on current thread"); 902 oop obj = h_obj(); 903 904 while (true) { 905 markOop mark = ReadStableMark(obj); 906 907 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 908 if (mark->has_locker()) { 909 return self->is_lock_owned((address)mark->locker()) ? 910 owner_self : owner_other; 911 } 912 913 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 914 // The Object:ObjectMonitor relationship is stable as long as we're 915 // not at a safepoint and AsyncDeflateIdleMonitors is false. 916 if (mark->has_monitor()) { 917 ObjectMonitorHandle omh; 918 if (!omh.save_om_ptr(obj, mark)) { 919 // Lost a race with async deflation so try again. 920 assert(AsyncDeflateIdleMonitors, "sanity check"); 921 continue; 922 } 923 ObjectMonitor * monitor = omh.om_ptr(); 924 void * owner = monitor->_owner; 925 if (owner == NULL) return owner_none; 926 return (owner == self || 927 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 928 } 929 930 // CASE: neutral 931 assert(mark->is_neutral(), "sanity check"); 932 return owner_none; // it's unlocked 933 } 934 } 935 936 // FIXME: jvmti should call this 937 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 938 if (UseBiasedLocking) { 939 if (SafepointSynchronize::is_at_safepoint()) { 940 BiasedLocking::revoke_at_safepoint(h_obj); 941 } else { 942 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 943 } 944 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 945 } 946 947 oop obj = h_obj(); 948 949 while (true) { 950 address owner = NULL; 951 markOop mark = ReadStableMark(obj); 952 953 // Uncontended case, header points to stack 954 if (mark->has_locker()) { 955 owner = (address) mark->locker(); 956 } 957 958 // Contended case, header points to ObjectMonitor (tagged pointer) 959 else if (mark->has_monitor()) { 960 ObjectMonitorHandle omh; 961 if (!omh.save_om_ptr(obj, mark)) { 962 // Lost a race with async deflation so try again. 963 assert(AsyncDeflateIdleMonitors, "sanity check"); 964 continue; 965 } 966 ObjectMonitor* monitor = omh.om_ptr(); 967 assert(monitor != NULL, "monitor should be non-null"); 968 owner = (address) monitor->owner(); 969 } 970 971 if (owner != NULL) { 972 // owning_thread_from_monitor_owner() may also return NULL here 973 return Threads::owning_thread_from_monitor_owner(t_list, owner); 974 } 975 976 // Unlocked case, header in place 977 // Cannot have assertion since this object may have been 978 // locked by another thread when reaching here. 979 // assert(mark->is_neutral(), "sanity check"); 980 981 return NULL; 982 } 983 } 984 985 // Visitors ... 986 987 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 988 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 989 while (block != NULL) { 990 assert(block->object() == CHAINMARKER, "must be a block header"); 991 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 992 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 993 if (mid->is_active()) { 994 ObjectMonitorHandle omh(mid); 995 996 if (mid->object() == NULL || 997 (AsyncDeflateIdleMonitors && mid->_owner == DEFLATER_MARKER)) { 998 // Only process with closure if the object is set. 999 // For async deflation, race here if monitor is not owned! 1000 // The above ref_count bump (in ObjectMonitorHandle ctr) 1001 // will cause subsequent async deflation to skip it. 1002 // However, previous or concurrent async deflation is a race. 1003 continue; 1004 } 1005 closure->do_monitor(mid); 1006 } 1007 } 1008 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1009 } 1010 } 1011 1012 // Get the next block in the block list. 1013 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) { 1014 assert(block->object() == CHAINMARKER, "must be a block header"); 1015 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext; 1016 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 1017 return block; 1018 } 1019 1020 static bool monitors_used_above_threshold() { 1021 if (gMonitorPopulation == 0) { 1022 return false; 1023 } 1024 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 1025 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; 1026 return monitor_usage > MonitorUsedDeflationThreshold; 1027 } 1028 1029 bool ObjectSynchronizer::is_cleanup_needed() { 1030 if (MonitorUsedDeflationThreshold > 0) { 1031 return monitors_used_above_threshold(); 1032 } 1033 return false; 1034 } 1035 1036 void ObjectSynchronizer::oops_do(OopClosure* f) { 1037 // We only scan the global used list here (for moribund threads), and 1038 // the thread-local monitors in Thread::oops_do(). 1039 global_used_oops_do(f); 1040 } 1041 1042 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1043 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1044 list_oops_do(gOmInUseList, f); 1045 } 1046 1047 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1048 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1049 list_oops_do(thread->omInUseList, f); 1050 } 1051 1052 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1053 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1054 ObjectMonitor* mid; 1055 for (mid = list; mid != NULL; mid = mid->FreeNext) { 1056 if (mid->object() != NULL) { 1057 f->do_oop((oop*)mid->object_addr()); 1058 } 1059 } 1060 } 1061 1062 1063 // ----------------------------------------------------------------------------- 1064 // ObjectMonitor Lifecycle 1065 // ----------------------- 1066 // Inflation unlinks monitors from the global gFreeList and 1067 // associates them with objects. Deflation -- which occurs at 1068 // STW-time -- disassociates idle monitors from objects. Such 1069 // scavenged monitors are returned to the gFreeList. 1070 // 1071 // The global list is protected by gListLock. All the critical sections 1072 // are short and operate in constant-time. 1073 // 1074 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1075 // 1076 // Lifecycle: 1077 // -- unassigned and on the global free list 1078 // -- unassigned and on a thread's private omFreeList 1079 // -- assigned to an object. The object is inflated and the mark refers 1080 // to the objectmonitor. 1081 1082 1083 // Constraining monitor pool growth via MonitorBound ... 1084 // 1085 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1086 // the rate of scavenging is driven primarily by GC. As such, we can find 1087 // an inordinate number of monitors in circulation. 1088 // To avoid that scenario we can artificially induce a STW safepoint 1089 // if the pool appears to be growing past some reasonable bound. 1090 // Generally we favor time in space-time tradeoffs, but as there's no 1091 // natural back-pressure on the # of extant monitors we need to impose some 1092 // type of limit. Beware that if MonitorBound is set to too low a value 1093 // we could just loop. In addition, if MonitorBound is set to a low value 1094 // we'll incur more safepoints, which are harmful to performance. 1095 // See also: GuaranteedSafepointInterval 1096 // 1097 // The current implementation uses asynchronous VM operations. 1098 1099 static void InduceScavenge(Thread * Self, const char * Whence) { 1100 // Induce STW safepoint to trim monitors 1101 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1102 // More precisely, trigger an asynchronous STW safepoint as the number 1103 // of active monitors passes the specified threshold. 1104 // TODO: assert thread state is reasonable 1105 1106 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1107 // Induce a 'null' safepoint to scavenge monitors 1108 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1109 // to the VMthread and have a lifespan longer than that of this activation record. 1110 // The VMThread will delete the op when completed. 1111 VMThread::execute(new VM_ScavengeMonitors()); 1112 } 1113 } 1114 1115 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self, 1116 const InflateCause cause) { 1117 // A large MAXPRIVATE value reduces both list lock contention 1118 // and list coherency traffic, but also tends to increase the 1119 // number of objectMonitors in circulation as well as the STW 1120 // scavenge costs. As usual, we lean toward time in space-time 1121 // tradeoffs. 1122 const int MAXPRIVATE = 1024; 1123 1124 if (AsyncDeflateIdleMonitors) { 1125 JavaThread * jt = (JavaThread *)Self; 1126 if (jt->omShouldDeflateIdleMonitors && jt->omInUseCount > 0 && 1127 cause != inflate_cause_vm_internal) { 1128 // Deflate any per-thread idle monitors for this JavaThread if 1129 // this is not an internal inflation. Clean up your own mess. 1130 // (Gibbs Rule 45) Otherwise, skip this cleanup. 1131 // deflate_global_idle_monitors_using_JT() is called by the ServiceThread. 1132 debug_only(jt->check_for_valid_safepoint_state(false);) 1133 ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(); 1134 } 1135 } 1136 1137 for (;;) { 1138 ObjectMonitor * m; 1139 1140 // 1: try to allocate from the thread's local omFreeList. 1141 // Threads will attempt to allocate first from their local list, then 1142 // from the global list, and only after those attempts fail will the thread 1143 // attempt to instantiate new monitors. Thread-local free lists take 1144 // heat off the gListLock and improve allocation latency, as well as reducing 1145 // coherency traffic on the shared global list. 1146 m = Self->omFreeList; 1147 if (m != NULL) { 1148 Self->omFreeList = m->FreeNext; 1149 Self->omFreeCount--; 1150 guarantee(m->object() == NULL, "invariant"); 1151 m->set_allocation_state(ObjectMonitor::New); 1152 m->FreeNext = Self->omInUseList; 1153 Self->omInUseList = m; 1154 Self->omInUseCount++; 1155 return m; 1156 } 1157 1158 // 2: try to allocate from the global gFreeList 1159 // CONSIDER: use muxTry() instead of muxAcquire(). 1160 // If the muxTry() fails then drop immediately into case 3. 1161 // If we're using thread-local free lists then try 1162 // to reprovision the caller's free list. 1163 if (gFreeList != NULL) { 1164 // Reprovision the thread's omFreeList. 1165 // Use bulk transfers to reduce the allocation rate and heat 1166 // on various locks. 1167 Thread::muxAcquire(&gListLock, "omAlloc(1)"); 1168 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1169 gMonitorFreeCount--; 1170 ObjectMonitor * take = gFreeList; 1171 gFreeList = take->FreeNext; 1172 guarantee(take->object() == NULL, "invariant"); 1173 if (AsyncDeflateIdleMonitors) { 1174 // Clear any values we allowed to linger during async deflation. 1175 take->_header = NULL; 1176 take->set_owner(NULL); 1177 take->_contentions = 0; 1178 1179 if (take->ref_count() < 0) { 1180 // Add back max_jint to restore the ref_count field to its 1181 // proper value. 1182 Atomic::add(max_jint, &take->_ref_count); 1183 1184 assert(take->ref_count() >= 0, "must not be negative: ref_count=%d", 1185 take->ref_count()); 1186 } 1187 } 1188 guarantee(!take->is_busy(), "invariant"); 1189 take->Recycle(); 1190 assert(take->is_free(), "invariant"); 1191 omRelease(Self, take, false); 1192 } 1193 Thread::muxRelease(&gListLock); 1194 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1195 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1196 1197 const int mx = MonitorBound; 1198 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1199 // We can't safely induce a STW safepoint from omAlloc() as our thread 1200 // state may not be appropriate for such activities and callers may hold 1201 // naked oops, so instead we defer the action. 1202 InduceScavenge(Self, "omAlloc"); 1203 } 1204 continue; 1205 } 1206 1207 // 3: allocate a block of new ObjectMonitors 1208 // Both the local and global free lists are empty -- resort to malloc(). 1209 // In the current implementation objectMonitors are TSM - immortal. 1210 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1211 // each ObjectMonitor to start at the beginning of a cache line, 1212 // so we use align_up(). 1213 // A better solution would be to use C++ placement-new. 1214 // BEWARE: As it stands currently, we don't run the ctors! 1215 assert(_BLOCKSIZE > 1, "invariant"); 1216 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1217 PaddedEnd<ObjectMonitor> * temp; 1218 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1219 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1220 mtInternal); 1221 temp = (PaddedEnd<ObjectMonitor> *) 1222 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); 1223 1224 // NOTE: (almost) no way to recover if allocation failed. 1225 // We might be able to induce a STW safepoint and scavenge enough 1226 // objectMonitors to permit progress. 1227 if (temp == NULL) { 1228 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1229 "Allocate ObjectMonitors"); 1230 } 1231 (void)memset((void *) temp, 0, neededsize); 1232 1233 // Format the block. 1234 // initialize the linked list, each monitor points to its next 1235 // forming the single linked free list, the very first monitor 1236 // will points to next block, which forms the block list. 1237 // The trick of using the 1st element in the block as gBlockList 1238 // linkage should be reconsidered. A better implementation would 1239 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1240 1241 for (int i = 1; i < _BLOCKSIZE; i++) { 1242 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1243 assert(temp[i].is_free(), "invariant"); 1244 } 1245 1246 // terminate the last monitor as the end of list 1247 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1248 1249 // Element [0] is reserved for global list linkage 1250 temp[0].set_object(CHAINMARKER); 1251 1252 // Consider carving out this thread's current request from the 1253 // block in hand. This avoids some lock traffic and redundant 1254 // list activity. 1255 1256 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1257 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1258 Thread::muxAcquire(&gListLock, "omAlloc(2)"); 1259 gMonitorPopulation += _BLOCKSIZE-1; 1260 gMonitorFreeCount += _BLOCKSIZE-1; 1261 1262 // Add the new block to the list of extant blocks (gBlockList). 1263 // The very first objectMonitor in a block is reserved and dedicated. 1264 // It serves as blocklist "next" linkage. 1265 temp[0].FreeNext = gBlockList; 1266 // There are lock-free uses of gBlockList so make sure that 1267 // the previous stores happen before we update gBlockList. 1268 OrderAccess::release_store(&gBlockList, temp); 1269 1270 // Add the new string of objectMonitors to the global free list 1271 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1272 gFreeList = temp + 1; 1273 Thread::muxRelease(&gListLock); 1274 } 1275 } 1276 1277 // Place "m" on the caller's private per-thread omFreeList. 1278 // In practice there's no need to clamp or limit the number of 1279 // monitors on a thread's omFreeList as the only time we'll call 1280 // omRelease is to return a monitor to the free list after a CAS 1281 // attempt failed. This doesn't allow unbounded #s of monitors to 1282 // accumulate on a thread's free list. 1283 // 1284 // Key constraint: all ObjectMonitors on a thread's free list and the global 1285 // free list must have their object field set to null. This prevents the 1286 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1287 // -- from reclaiming them while we are trying to release them. 1288 1289 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1290 bool fromPerThreadAlloc) { 1291 guarantee(m->header() == NULL, "invariant"); 1292 guarantee(m->object() == NULL, "invariant"); 1293 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1294 m->set_allocation_state(ObjectMonitor::Free); 1295 // Remove from omInUseList 1296 if (fromPerThreadAlloc) { 1297 ObjectMonitor* cur_mid_in_use = NULL; 1298 bool extracted = false; 1299 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1300 if (m == mid) { 1301 // extract from per-thread in-use list 1302 if (mid == Self->omInUseList) { 1303 Self->omInUseList = mid->FreeNext; 1304 } else if (cur_mid_in_use != NULL) { 1305 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1306 } 1307 extracted = true; 1308 Self->omInUseCount--; 1309 break; 1310 } 1311 } 1312 assert(extracted, "Should have extracted from in-use list"); 1313 } 1314 1315 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1316 m->FreeNext = Self->omFreeList; 1317 guarantee(m->is_free(), "invariant"); 1318 Self->omFreeList = m; 1319 Self->omFreeCount++; 1320 } 1321 1322 // Return the monitors of a moribund thread's local free list to 1323 // the global free list. Typically a thread calls omFlush() when 1324 // it's dying. We could also consider having the VM thread steal 1325 // monitors from threads that have not run java code over a few 1326 // consecutive STW safepoints. Relatedly, we might decay 1327 // omFreeProvision at STW safepoints. 1328 // 1329 // Also return the monitors of a moribund thread's omInUseList to 1330 // a global gOmInUseList under the global list lock so these 1331 // will continue to be scanned. 1332 // 1333 // We currently call omFlush() from Threads::remove() _before the thread 1334 // has been excised from the thread list and is no longer a mutator. 1335 // This means that omFlush() cannot run concurrently with a safepoint and 1336 // interleave with the deflate_idle_monitors scavenge operator. In particular, 1337 // this ensures that the thread's monitors are scanned by a GC safepoint, 1338 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via 1339 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1340 // monitors have been transferred to the global in-use list). 1341 // 1342 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1343 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1344 // run at the same time as omFlush() so we have to be careful. 1345 1346 void ObjectSynchronizer::omFlush(Thread * Self) { 1347 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1348 ObjectMonitor * tail = NULL; 1349 int tally = 0; 1350 if (list != NULL) { 1351 ObjectMonitor * s; 1352 // The thread is going away, the per-thread free monitors 1353 // are freed via set_owner(NULL) 1354 // Link them to tail, which will be linked into the global free list 1355 // gFreeList below, under the gListLock 1356 for (s = list; s != NULL; s = s->FreeNext) { 1357 tally++; 1358 tail = s; 1359 guarantee(s->object() == NULL, "invariant"); 1360 guarantee(!s->is_busy(), "invariant"); 1361 s->set_owner(NULL); // redundant but good hygiene 1362 } 1363 guarantee(tail != NULL, "invariant"); 1364 ADIM_guarantee(Self->omFreeCount == tally, "free-count off"); 1365 Self->omFreeList = NULL; 1366 Self->omFreeCount = 0; 1367 } 1368 1369 ObjectMonitor * inUseList = Self->omInUseList; 1370 ObjectMonitor * inUseTail = NULL; 1371 int inUseTally = 0; 1372 if (inUseList != NULL) { 1373 ObjectMonitor *cur_om; 1374 // The thread is going away, however the omInUseList inflated 1375 // monitors may still be in-use by other threads. 1376 // Link them to inUseTail, which will be linked into the global in-use list 1377 // gOmInUseList below, under the gListLock 1378 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1379 inUseTail = cur_om; 1380 inUseTally++; 1381 ADIM_guarantee(cur_om->is_active(), "invariant"); 1382 } 1383 guarantee(inUseTail != NULL, "invariant"); 1384 ADIM_guarantee(Self->omInUseCount == inUseTally, "in-use count off"); 1385 Self->omInUseList = NULL; 1386 Self->omInUseCount = 0; 1387 } 1388 1389 Thread::muxAcquire(&gListLock, "omFlush"); 1390 if (tail != NULL) { 1391 tail->FreeNext = gFreeList; 1392 gFreeList = list; 1393 gMonitorFreeCount += tally; 1394 } 1395 1396 if (inUseTail != NULL) { 1397 inUseTail->FreeNext = gOmInUseList; 1398 gOmInUseList = inUseList; 1399 gOmInUseCount += inUseTally; 1400 } 1401 1402 Thread::muxRelease(&gListLock); 1403 1404 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1405 LogStreamHandle(Info, monitorinflation) lsh_info; 1406 LogStream * ls = NULL; 1407 if (log_is_enabled(Debug, monitorinflation)) { 1408 ls = &lsh_debug; 1409 } else if ((tally != 0 || inUseTally != 0) && 1410 log_is_enabled(Info, monitorinflation)) { 1411 ls = &lsh_info; 1412 } 1413 if (ls != NULL) { 1414 ls->print_cr("omFlush: jt=" INTPTR_FORMAT ", free_monitor_tally=%d" 1415 ", in_use_monitor_tally=%d" ", omFreeProvision=%d", 1416 p2i(Self), tally, inUseTally, Self->omFreeProvision); 1417 } 1418 } 1419 1420 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1421 const oop obj, 1422 ObjectSynchronizer::InflateCause cause) { 1423 assert(event != NULL, "invariant"); 1424 assert(event->should_commit(), "invariant"); 1425 event->set_monitorClass(obj->klass()); 1426 event->set_address((uintptr_t)(void*)obj); 1427 event->set_cause((u1)cause); 1428 event->commit(); 1429 } 1430 1431 // Fast path code shared by multiple functions 1432 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle * omh_p, oop obj) { 1433 while (true) { 1434 markOop mark = obj->mark(); 1435 if (mark->has_monitor()) { 1436 if (!omh_p->save_om_ptr(obj, mark)) { 1437 // Lost a race with async deflation so try again. 1438 assert(AsyncDeflateIdleMonitors, "sanity check"); 1439 continue; 1440 } 1441 ObjectMonitor * monitor = omh_p->om_ptr(); 1442 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid"); 1443 markOop dmw = monitor->header(); 1444 assert(dmw->is_neutral(), "sanity check: header=" INTPTR_FORMAT, p2i(dmw)); 1445 return; 1446 } 1447 inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal); 1448 return; 1449 } 1450 } 1451 1452 void ObjectSynchronizer::inflate(ObjectMonitorHandle * omh_p, Thread * Self, 1453 oop object, const InflateCause cause) { 1454 // Inflate mutates the heap ... 1455 // Relaxing assertion for bug 6320749. 1456 assert(Universe::verify_in_progress() || 1457 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1458 1459 EventJavaMonitorInflate event; 1460 1461 for (;;) { 1462 const markOop mark = object->mark(); 1463 assert(!mark->has_bias_pattern(), "invariant"); 1464 1465 // The mark can be in one of the following states: 1466 // * Inflated - just return 1467 // * Stack-locked - coerce it to inflated 1468 // * INFLATING - busy wait for conversion to complete 1469 // * Neutral - aggressively inflate the object. 1470 // * BIASED - Illegal. We should never see this 1471 1472 // CASE: inflated 1473 if (mark->has_monitor()) { 1474 if (!omh_p->save_om_ptr(object, mark)) { 1475 // Lost a race with async deflation so try again. 1476 assert(AsyncDeflateIdleMonitors, "sanity check"); 1477 continue; 1478 } 1479 ObjectMonitor * inf = omh_p->om_ptr(); 1480 markOop dmw = inf->header(); 1481 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); 1482 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); 1483 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1484 return; 1485 } 1486 1487 // CASE: inflation in progress - inflating over a stack-lock. 1488 // Some other thread is converting from stack-locked to inflated. 1489 // Only that thread can complete inflation -- other threads must wait. 1490 // The INFLATING value is transient. 1491 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1492 // We could always eliminate polling by parking the thread on some auxiliary list. 1493 if (mark == markOopDesc::INFLATING()) { 1494 ReadStableMark(object); 1495 continue; 1496 } 1497 1498 // CASE: stack-locked 1499 // Could be stack-locked either by this thread or by some other thread. 1500 // 1501 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1502 // to install INFLATING into the mark word. We originally installed INFLATING, 1503 // allocated the objectmonitor, and then finally STed the address of the 1504 // objectmonitor into the mark. This was correct, but artificially lengthened 1505 // the interval in which INFLATED appeared in the mark, thus increasing 1506 // the odds of inflation contention. 1507 // 1508 // We now use per-thread private objectmonitor free lists. 1509 // These list are reprovisioned from the global free list outside the 1510 // critical INFLATING...ST interval. A thread can transfer 1511 // multiple objectmonitors en-mass from the global free list to its local free list. 1512 // This reduces coherency traffic and lock contention on the global free list. 1513 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1514 // before or after the CAS(INFLATING) operation. 1515 // See the comments in omAlloc(). 1516 1517 LogStreamHandle(Trace, monitorinflation) lsh; 1518 1519 if (mark->has_locker()) { 1520 ObjectMonitor * m; 1521 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { 1522 // If !AsyncDeflateIdleMonitors or if an internal inflation, then 1523 // we won't stop for a potential safepoint in omAlloc. 1524 m = omAlloc(Self, cause); 1525 } else { 1526 // If AsyncDeflateIdleMonitors and not an internal inflation, then 1527 // we may stop for a safepoint in omAlloc() so protect object. 1528 Handle h_obj(Self, object); 1529 m = omAlloc(Self, cause); 1530 object = h_obj(); // Refresh object. 1531 } 1532 // Optimistically prepare the objectmonitor - anticipate successful CAS 1533 // We do this before the CAS in order to minimize the length of time 1534 // in which INFLATING appears in the mark. 1535 m->Recycle(); 1536 m->_Responsible = NULL; 1537 m->_recursions = 0; 1538 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1539 1540 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark); 1541 if (cmp != mark) { 1542 omRelease(Self, m, true); 1543 continue; // Interference -- just retry 1544 } 1545 1546 // We've successfully installed INFLATING (0) into the mark-word. 1547 // This is the only case where 0 will appear in a mark-word. 1548 // Only the singular thread that successfully swings the mark-word 1549 // to 0 can perform (or more precisely, complete) inflation. 1550 // 1551 // Why do we CAS a 0 into the mark-word instead of just CASing the 1552 // mark-word from the stack-locked value directly to the new inflated state? 1553 // Consider what happens when a thread unlocks a stack-locked object. 1554 // It attempts to use CAS to swing the displaced header value from the 1555 // on-stack basiclock back into the object header. Recall also that the 1556 // header value (hash code, etc) can reside in (a) the object header, or 1557 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1558 // header in an objectMonitor. The inflate() routine must copy the header 1559 // value from the basiclock on the owner's stack to the objectMonitor, all 1560 // the while preserving the hashCode stability invariants. If the owner 1561 // decides to release the lock while the value is 0, the unlock will fail 1562 // and control will eventually pass from slow_exit() to inflate. The owner 1563 // will then spin, waiting for the 0 value to disappear. Put another way, 1564 // the 0 causes the owner to stall if the owner happens to try to 1565 // drop the lock (restoring the header from the basiclock to the object) 1566 // while inflation is in-progress. This protocol avoids races that might 1567 // would otherwise permit hashCode values to change or "flicker" for an object. 1568 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1569 // 0 serves as a "BUSY" inflate-in-progress indicator. 1570 1571 1572 // fetch the displaced mark from the owner's stack. 1573 // The owner can't die or unwind past the lock while our INFLATING 1574 // object is in the mark. Furthermore the owner can't complete 1575 // an unlock on the object, either. 1576 markOop dmw = mark->displaced_mark_helper(); 1577 // Catch if the object's header is not neutral (not locked and 1578 // not marked is what we care about here). 1579 ADIM_guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); 1580 1581 // Setup monitor fields to proper values -- prepare the monitor 1582 m->set_header(dmw); 1583 1584 // Optimization: if the mark->locker stack address is associated 1585 // with this thread we could simply set m->_owner = Self. 1586 // Note that a thread can inflate an object 1587 // that it has stack-locked -- as might happen in wait() -- directly 1588 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1589 m->set_owner(mark->locker()); 1590 m->set_object(object); 1591 // TODO-FIXME: assert BasicLock->dhw != 0. 1592 1593 omh_p->set_om_ptr(m); 1594 assert(m->is_new(), "freshly allocated monitor must be new"); 1595 m->set_allocation_state(ObjectMonitor::Old); 1596 1597 // Must preserve store ordering. The monitor state must 1598 // be stable at the time of publishing the monitor address. 1599 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1600 object->release_set_mark(markOopDesc::encode(m)); 1601 1602 // Hopefully the performance counters are allocated on distinct cache lines 1603 // to avoid false sharing on MP systems ... 1604 OM_PERFDATA_OP(Inflations, inc()); 1605 if (log_is_enabled(Trace, monitorinflation)) { 1606 ResourceMark rm(Self); 1607 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1608 INTPTR_FORMAT ", type='%s'", p2i(object), 1609 p2i(object->mark()), object->klass()->external_name()); 1610 } 1611 if (event.should_commit()) { 1612 post_monitor_inflate_event(&event, object, cause); 1613 } 1614 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 1615 return; 1616 } 1617 1618 // CASE: neutral 1619 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1620 // If we know we're inflating for entry it's better to inflate by swinging a 1621 // pre-locked objectMonitor pointer into the object header. A successful 1622 // CAS inflates the object *and* confers ownership to the inflating thread. 1623 // In the current implementation we use a 2-step mechanism where we CAS() 1624 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1625 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1626 // would be useful. 1627 1628 // Catch if the object's header is not neutral (not locked and 1629 // not marked is what we care about here). 1630 ADIM_guarantee(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(mark)); 1631 ObjectMonitor * m; 1632 if (!AsyncDeflateIdleMonitors || cause == inflate_cause_vm_internal) { 1633 // If !AsyncDeflateIdleMonitors or if an internal inflation, then 1634 // we won't stop for a potential safepoint in omAlloc. 1635 m = omAlloc(Self, cause); 1636 } else { 1637 // If AsyncDeflateIdleMonitors and not an internal inflation, then 1638 // we may stop for a safepoint in omAlloc() so protect object. 1639 Handle h_obj(Self, object); 1640 m = omAlloc(Self, cause); 1641 object = h_obj(); // Refresh object. 1642 } 1643 // prepare m for installation - set monitor to initial state 1644 m->Recycle(); 1645 m->set_header(mark); 1646 m->set_owner(NULL); 1647 m->set_object(object); 1648 m->_recursions = 0; 1649 m->_Responsible = NULL; 1650 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1651 1652 omh_p->set_om_ptr(m); 1653 assert(m->is_new(), "freshly allocated monitor must be new"); 1654 m->set_allocation_state(ObjectMonitor::Old); 1655 1656 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { 1657 m->set_header(NULL); 1658 m->set_object(NULL); 1659 m->Recycle(); 1660 omh_p->set_om_ptr(NULL); 1661 // omRelease() will reset the allocation state 1662 omRelease(Self, m, true); 1663 m = NULL; 1664 continue; 1665 // interference - the markword changed - just retry. 1666 // The state-transitions are one-way, so there's no chance of 1667 // live-lock -- "Inflated" is an absorbing state. 1668 } 1669 1670 // Hopefully the performance counters are allocated on distinct 1671 // cache lines to avoid false sharing on MP systems ... 1672 OM_PERFDATA_OP(Inflations, inc()); 1673 if (log_is_enabled(Trace, monitorinflation)) { 1674 ResourceMark rm(Self); 1675 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1676 INTPTR_FORMAT ", type='%s'", p2i(object), 1677 p2i(object->mark()), object->klass()->external_name()); 1678 } 1679 if (event.should_commit()) { 1680 post_monitor_inflate_event(&event, object, cause); 1681 } 1682 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 1683 return; 1684 } 1685 } 1686 1687 1688 // We maintain a list of in-use monitors for each thread. 1689 // 1690 // deflate_thread_local_monitors() scans a single thread's in-use list, while 1691 // deflate_idle_monitors() scans only a global list of in-use monitors which 1692 // is populated only as a thread dies (see omFlush()). 1693 // 1694 // These operations are called at all safepoints, immediately after mutators 1695 // are stopped, but before any objects have moved. Collectively they traverse 1696 // the population of in-use monitors, deflating where possible. The scavenged 1697 // monitors are returned to the global monitor free list. 1698 // 1699 // Beware that we scavenge at *every* stop-the-world point. Having a large 1700 // number of monitors in-use could negatively impact performance. We also want 1701 // to minimize the total # of monitors in circulation, as they incur a small 1702 // footprint penalty. 1703 // 1704 // Perversely, the heap size -- and thus the STW safepoint rate -- 1705 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1706 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 1707 // This is an unfortunate aspect of this design. 1708 1709 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* _counters) { 1710 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1711 1712 // The per-thread in-use lists are handled in 1713 // ParallelSPCleanupThreadClosure::do_thread(). 1714 1715 if (!AsyncDeflateIdleMonitors || is_cleanup_requested()) { 1716 // Use the older mechanism for the global in-use list or 1717 // if a special cleanup has been requested. 1718 ObjectSynchronizer::deflate_idle_monitors(_counters); 1719 return; 1720 } 1721 1722 log_debug(monitorinflation)("requesting deflation of idle monitors."); 1723 // Request deflation of global idle monitors by the ServiceThread: 1724 _gOmShouldDeflateIdleMonitors = true; 1725 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 1726 Service_lock->notify_all(); 1727 } 1728 1729 // Deflate a single monitor if not in-use 1730 // Return true if deflated, false if in-use 1731 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1732 ObjectMonitor** freeHeadp, 1733 ObjectMonitor** freeTailp) { 1734 bool deflated; 1735 // Normal case ... The monitor is associated with obj. 1736 const markOop mark = obj->mark(); 1737 guarantee(mark == markOopDesc::encode(mid), "should match: mark=" 1738 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, p2i(mark), 1739 p2i(markOopDesc::encode(mid))); 1740 // Make sure that mark->monitor() and markOopDesc::encode() agree: 1741 guarantee(mark->monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 1742 ", mid=" INTPTR_FORMAT, p2i(mark->monitor()), p2i(mid)); 1743 const markOop dmw = mid->header(); 1744 guarantee(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i(dmw)); 1745 1746 if (mid->is_busy()) { 1747 deflated = false; 1748 } else { 1749 // Deflate the monitor if it is no longer being used 1750 // It's idle - scavenge and return to the global free list 1751 // plain old deflation ... 1752 if (log_is_enabled(Trace, monitorinflation)) { 1753 ResourceMark rm; 1754 log_trace(monitorinflation)("deflate_monitor: " 1755 "object=" INTPTR_FORMAT ", mark=" 1756 INTPTR_FORMAT ", type='%s'", p2i(obj), 1757 p2i(mark), obj->klass()->external_name()); 1758 } 1759 1760 // Restore the header back to obj 1761 obj->release_set_mark(dmw); 1762 mid->clear(); 1763 1764 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 1765 p2i(mid->object())); 1766 assert(mid->is_free(), "invariant"); 1767 1768 // Move the object to the working free list defined by freeHeadp, freeTailp 1769 if (*freeHeadp == NULL) *freeHeadp = mid; 1770 if (*freeTailp != NULL) { 1771 ObjectMonitor * prevtail = *freeTailp; 1772 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1773 prevtail->FreeNext = mid; 1774 } 1775 *freeTailp = mid; 1776 deflated = true; 1777 } 1778 return deflated; 1779 } 1780 1781 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 1782 // Returns true if it was deflated and false otherwise. 1783 // 1784 // The async deflation protocol sets owner to DEFLATER_MARKER and 1785 // makes contentions negative as signals to contending threads that 1786 // an async deflation is in progress. There are a number of checks 1787 // as part of the protocol to make sure that the calling thread has 1788 // not lost the race to a contending thread or to a thread that just 1789 // wants to use the ObjectMonitor*. 1790 // 1791 // The ObjectMonitor has been successfully async deflated when: 1792 // (owner == DEFLATER_MARKER && contentions < 0 && ref_count < 0). 1793 // Contending threads or ObjectMonitor* using threads that see those 1794 // values know to retry their operation. 1795 // 1796 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 1797 ObjectMonitor** freeHeadp, 1798 ObjectMonitor** freeTailp) { 1799 assert(AsyncDeflateIdleMonitors, "sanity check"); 1800 assert(Thread::current()->is_Java_thread(), "precondition"); 1801 // A newly allocated ObjectMonitor should not be seen here so we 1802 // avoid an endless inflate/deflate cycle. 1803 assert(mid->is_old(), "must be old: allocation_state=%d", 1804 (int) mid->allocation_state()); 1805 1806 if (mid->is_busy() || mid->ref_count() != 0) { 1807 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 1808 // is in use so no deflation. 1809 return false; 1810 } 1811 1812 if (Atomic::replace_if_null(DEFLATER_MARKER, &(mid->_owner))) { 1813 // ObjectMonitor is not owned by another thread. Our setting 1814 // owner to DEFLATER_MARKER forces any contending thread through 1815 // the slow path. This is just the first part of the async 1816 // deflation dance. 1817 1818 if (mid->_waiters != 0 || mid->ref_count() != 0) { 1819 // Another thread has raced to enter the ObjectMonitor after 1820 // mid->is_busy() above and has already waited on it which 1821 // makes it busy so no deflation. Or the ObjectMonitor* is 1822 // in use for some other operation like inflate(). Restore 1823 // owner to NULL if it is still DEFLATER_MARKER. 1824 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); 1825 return false; 1826 } 1827 1828 if (Atomic::cmpxchg(-max_jint, &mid->_contentions, (jint)0) == 0) { 1829 // Make contentions negative to force any contending threads to 1830 // retry. This is the second part of the async deflation dance. 1831 1832 if (mid->_owner == DEFLATER_MARKER && 1833 Atomic::cmpxchg(-max_jint, &mid->_ref_count, (jint)0) == 0) { 1834 // If owner is still DEFLATER_MARKER, then we have successfully 1835 // signaled any contending threads to retry. If it is not, then we 1836 // have lost the race to an entering thread and the ObjectMonitor 1837 // is now busy. If we cannot make ref_count negative (because the 1838 // ObjectMonitor* is in use), then we have lost that race instead. 1839 // This is the third and final part of the async deflation dance. 1840 // Note: This owner check solves the ABA problem with contentions 1841 // where another thread acquired the ObjectMonitor, finished 1842 // using it and restored the contentions to zero. 1843 // Note: Making ref_count negative solves the race with 1844 // ObjectMonitor::save_om_ptr() where its ref_count increment 1845 // happens after the first ref_count check in this function. 1846 // Note: Making ref_count negative must happen after the third 1847 // part check of "owner == DEFLATER_MARKER". When save_om_ptr() 1848 // retries, it will call install_displaced_markword_in_object() 1849 // which will disconnect the object from the ObjectMonitor so 1850 // deflation must happen. 1851 1852 // Sanity checks for the races: 1853 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 1854 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 1855 INTPTR_FORMAT, p2i(mid->_cxq)); 1856 guarantee(mid->_EntryList == NULL, 1857 "must be no entering threads: EntryList=" INTPTR_FORMAT, 1858 p2i(mid->_EntryList)); 1859 1860 const oop obj = (oop) mid->object(); 1861 if (log_is_enabled(Trace, monitorinflation)) { 1862 ResourceMark rm; 1863 log_trace(monitorinflation)("deflate_monitor_using_JT: " 1864 "object=" INTPTR_FORMAT ", mark=" 1865 INTPTR_FORMAT ", type='%s'", 1866 p2i(obj), p2i(obj->mark()), 1867 obj->klass()->external_name()); 1868 } 1869 1870 // Install the old mark word if nobody else has already done it. 1871 mid->install_displaced_markword_in_object(obj); 1872 mid->clear_using_JT(); 1873 1874 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 1875 p2i(mid->object())); 1876 assert(mid->is_free(), "must be free: allocation_state=%d", 1877 (int) mid->allocation_state()); 1878 1879 // Move the deflated ObjectMonitor to the working free list 1880 // defined by freeHeadp and freeTailp. 1881 if (*freeHeadp == NULL) { 1882 // First one on the list. 1883 *freeHeadp = mid; 1884 } 1885 if (*freeTailp != NULL) { 1886 // We append to the list so the caller can use mid->FreeNext 1887 // to fix the linkages in its context. 1888 ObjectMonitor * prevtail = *freeTailp; 1889 // Should have been cleaned up by the caller: 1890 assert(prevtail->FreeNext == NULL, "must be NULL: FreeNext=" 1891 INTPTR_FORMAT, p2i(prevtail->FreeNext)); 1892 prevtail->FreeNext = mid; 1893 } 1894 *freeTailp = mid; 1895 1896 // At this point, mid->FreeNext still refers to its current 1897 // value and another ObjectMonitor's FreeNext field still 1898 // refers to this ObjectMonitor. Those linkages have to be 1899 // cleaned up by the caller who has the complete context. 1900 1901 // We leave owner == DEFLATER_MARKER and contentions < 0 1902 // to force any racing threads to retry. 1903 return true; // Success, ObjectMonitor has been deflated. 1904 } 1905 1906 // The owner was changed from DEFLATER_MARKER or ObjectMonitor* 1907 // is in use so we lost the race since the ObjectMonitor is now 1908 // busy. 1909 1910 // Restore owner to NULL if it is still DEFLATER_MARKER: 1911 Atomic::cmpxchg((void*)NULL, &mid->_owner, DEFLATER_MARKER); 1912 1913 // Add back max_jint to restore the contentions field to its 1914 // proper value (which may not be what we saw above): 1915 Atomic::add(max_jint, &mid->_contentions); 1916 1917 assert(mid->_contentions >= 0, "must not be negative: contentions=%d", 1918 mid->_contentions); 1919 } 1920 1921 // The contentions was no longer 0 so we lost the race since the 1922 // ObjectMonitor is now busy. 1923 assert(mid->_owner != DEFLATER_MARKER, "must not be DEFLATER_MARKER"); 1924 } 1925 1926 // The owner field is no longer NULL so we lost the race since the 1927 // ObjectMonitor is now busy. 1928 return false; 1929 } 1930 1931 // Walk a given monitor list, and deflate idle monitors 1932 // The given list could be a per-thread list or a global list 1933 // Caller acquires gListLock as needed. 1934 // 1935 // In the case of parallel processing of thread local monitor lists, 1936 // work is done by Threads::parallel_threads_do() which ensures that 1937 // each Java thread is processed by exactly one worker thread, and 1938 // thus avoid conflicts that would arise when worker threads would 1939 // process the same monitor lists concurrently. 1940 // 1941 // See also ParallelSPCleanupTask and 1942 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 1943 // Threads::parallel_java_threads_do() in thread.cpp. 1944 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1945 ObjectMonitor** freeHeadp, 1946 ObjectMonitor** freeTailp) { 1947 ObjectMonitor* mid; 1948 ObjectMonitor* next; 1949 ObjectMonitor* cur_mid_in_use = NULL; 1950 int deflated_count = 0; 1951 1952 for (mid = *listHeadp; mid != NULL;) { 1953 oop obj = (oop) mid->object(); 1954 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1955 // if deflate_monitor succeeded, 1956 // extract from per-thread in-use list 1957 if (mid == *listHeadp) { 1958 *listHeadp = mid->FreeNext; 1959 } else if (cur_mid_in_use != NULL) { 1960 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1961 } 1962 next = mid->FreeNext; 1963 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1964 mid = next; 1965 deflated_count++; 1966 } else { 1967 cur_mid_in_use = mid; 1968 mid = mid->FreeNext; 1969 } 1970 } 1971 return deflated_count; 1972 } 1973 1974 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 1975 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 1976 // list could be a per-thread in-use list or the global in-use list. 1977 // Caller acquires gListLock as appropriate. If a safepoint has started, 1978 // then we save state via savedMidInUsep and return to the caller to 1979 // honor the safepoint. 1980 // 1981 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** listHeadp, 1982 ObjectMonitor** freeHeadp, 1983 ObjectMonitor** freeTailp, 1984 ObjectMonitor** savedMidInUsep) { 1985 assert(AsyncDeflateIdleMonitors, "sanity check"); 1986 assert(Thread::current()->is_Java_thread(), "precondition"); 1987 1988 ObjectMonitor* mid; 1989 ObjectMonitor* next; 1990 ObjectMonitor* cur_mid_in_use = NULL; 1991 int deflated_count = 0; 1992 1993 if (*savedMidInUsep == NULL) { 1994 // No saved state so start at the beginning. 1995 mid = *listHeadp; 1996 } else { 1997 // We're restarting after a safepoint so restore the necessary state 1998 // before we resume. 1999 cur_mid_in_use = *savedMidInUsep; 2000 mid = cur_mid_in_use->FreeNext; 2001 } 2002 while (mid != NULL) { 2003 // Only try to deflate if there is an associated Java object and if 2004 // mid is old (is not newly allocated and is not newly freed). 2005 if (mid->object() != NULL && mid->is_old() && 2006 deflate_monitor_using_JT(mid, freeHeadp, freeTailp)) { 2007 // Deflation succeeded so update the in-use list. 2008 if (mid == *listHeadp) { 2009 *listHeadp = mid->FreeNext; 2010 } else if (cur_mid_in_use != NULL) { 2011 // Maintain the current in-use list. 2012 cur_mid_in_use->FreeNext = mid->FreeNext; 2013 } 2014 next = mid->FreeNext; 2015 mid->FreeNext = NULL; 2016 // At this point mid is disconnected from the in-use list 2017 // and is the current tail in the freeHeadp list. 2018 mid = next; 2019 deflated_count++; 2020 } else { 2021 // mid is considered in-use if it does not have an associated 2022 // Java object or mid is not old or deflation did not succeed. 2023 // A mid->is_new() node can be seen here when it is freshly 2024 // returned by omAlloc() (and skips the deflation code path). 2025 // A mid->is_old() node can be seen here when deflation failed. 2026 // A mid->is_free() node can be seen here when a fresh node from 2027 // omAlloc() is released by omRelease() due to losing the race 2028 // in inflate(). 2029 2030 cur_mid_in_use = mid; 2031 mid = mid->FreeNext; 2032 2033 if (SafepointSynchronize::is_synchronizing() && 2034 cur_mid_in_use != *listHeadp && cur_mid_in_use->is_old()) { 2035 // If a safepoint has started and cur_mid_in_use is not the list 2036 // head and is old, then it is safe to use as saved state. Return 2037 // to the caller so gListLock can be dropped as appropriate 2038 // before blocking. 2039 *savedMidInUsep = cur_mid_in_use; 2040 return deflated_count; 2041 } 2042 } 2043 } 2044 // We finished the list without a safepoint starting so there's 2045 // no need to save state. 2046 *savedMidInUsep = NULL; 2047 return deflated_count; 2048 } 2049 2050 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2051 counters->nInuse = 0; // currently associated with objects 2052 counters->nInCirculation = 0; // extant 2053 counters->nScavenged = 0; // reclaimed (global and per-thread) 2054 counters->perThreadScavenged = 0; // per-thread scavenge total 2055 counters->perThreadTimes = 0.0; // per-thread scavenge times 2056 } 2057 2058 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2059 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2060 2061 if (AsyncDeflateIdleMonitors) { 2062 // Nothing to do when global idle ObjectMonitors are deflated using 2063 // a JavaThread unless a special cleanup has been requested. 2064 if (!is_cleanup_requested()) { 2065 return; 2066 } 2067 } 2068 2069 bool deflated = false; 2070 2071 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 2072 ObjectMonitor * freeTailp = NULL; 2073 elapsedTimer timer; 2074 2075 if (log_is_enabled(Info, monitorinflation)) { 2076 timer.start(); 2077 } 2078 2079 // Prevent omFlush from changing mids in Thread dtor's during deflation 2080 // And in case the vm thread is acquiring a lock during a safepoint 2081 // See e.g. 6320749 2082 Thread::muxAcquire(&gListLock, "deflate_idle_monitors"); 2083 2084 // Note: the thread-local monitors lists get deflated in 2085 // a separate pass. See deflate_thread_local_monitors(). 2086 2087 // For moribund threads, scan gOmInUseList 2088 int deflated_count = 0; 2089 if (gOmInUseList) { 2090 counters->nInCirculation += gOmInUseCount; 2091 deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 2092 gOmInUseCount -= deflated_count; 2093 counters->nScavenged += deflated_count; 2094 counters->nInuse += gOmInUseCount; 2095 } 2096 2097 // Move the scavenged monitors back to the global free list. 2098 if (freeHeadp != NULL) { 2099 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant"); 2100 assert(freeTailp->FreeNext == NULL, "invariant"); 2101 // constant-time list splice - prepend scavenged segment to gFreeList 2102 freeTailp->FreeNext = gFreeList; 2103 gFreeList = freeHeadp; 2104 } 2105 Thread::muxRelease(&gListLock); 2106 timer.stop(); 2107 2108 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2109 LogStreamHandle(Info, monitorinflation) lsh_info; 2110 LogStream * ls = NULL; 2111 if (log_is_enabled(Debug, monitorinflation)) { 2112 ls = &lsh_debug; 2113 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2114 ls = &lsh_info; 2115 } 2116 if (ls != NULL) { 2117 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2118 } 2119 } 2120 2121 // Deflate global idle ObjectMonitors using a JavaThread. 2122 // 2123 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2124 assert(AsyncDeflateIdleMonitors, "sanity check"); 2125 assert(Thread::current()->is_Java_thread(), "precondition"); 2126 JavaThread * self = JavaThread::current(); 2127 2128 _gOmShouldDeflateIdleMonitors = false; 2129 2130 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2131 } 2132 2133 // Deflate per-thread idle ObjectMonitors using a JavaThread. 2134 // 2135 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT() { 2136 assert(AsyncDeflateIdleMonitors, "sanity check"); 2137 assert(Thread::current()->is_Java_thread(), "precondition"); 2138 JavaThread * self = JavaThread::current(); 2139 2140 self->omShouldDeflateIdleMonitors = false; 2141 2142 deflate_common_idle_monitors_using_JT(false /* !is_global */, self); 2143 } 2144 2145 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2146 // 2147 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread * self) { 2148 int deflated_count = 0; 2149 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged ObjectMonitors 2150 ObjectMonitor * freeTailp = NULL; 2151 ObjectMonitor * savedMidInUsep = NULL; 2152 elapsedTimer timer; 2153 2154 if (log_is_enabled(Info, monitorinflation)) { 2155 timer.start(); 2156 } 2157 2158 if (is_global) { 2159 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(1)"); 2160 OM_PERFDATA_OP(MonExtant, set_value(gOmInUseCount)); 2161 } else { 2162 OM_PERFDATA_OP(MonExtant, inc(self->omInUseCount)); 2163 } 2164 2165 do { 2166 int local_deflated_count; 2167 if (is_global) { 2168 local_deflated_count = deflate_monitor_list_using_JT((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp, &savedMidInUsep); 2169 gOmInUseCount -= local_deflated_count; 2170 } else { 2171 local_deflated_count = deflate_monitor_list_using_JT(self->omInUseList_addr(), &freeHeadp, &freeTailp, &savedMidInUsep); 2172 self->omInUseCount -= local_deflated_count; 2173 } 2174 deflated_count += local_deflated_count; 2175 2176 if (freeHeadp != NULL) { 2177 // Move the scavenged ObjectMonitors to the global free list. 2178 guarantee(freeTailp != NULL && local_deflated_count > 0, "freeTailp=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(freeTailp), local_deflated_count); 2179 assert(freeTailp->FreeNext == NULL, "invariant"); 2180 2181 if (!is_global) { 2182 Thread::muxAcquire(&gListLock, "deflate_per_thread_idle_monitors_using_JT(2)"); 2183 } 2184 // Constant-time list splice - prepend scavenged segment to gFreeList. 2185 freeTailp->FreeNext = gFreeList; 2186 gFreeList = freeHeadp; 2187 2188 gMonitorFreeCount += local_deflated_count; 2189 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2190 if (!is_global) { 2191 Thread::muxRelease(&gListLock); 2192 } 2193 } 2194 2195 if (savedMidInUsep != NULL) { 2196 // deflate_monitor_list_using_JT() detected a safepoint starting. 2197 if (is_global) { 2198 Thread::muxRelease(&gListLock); 2199 } 2200 timer.stop(); 2201 { 2202 if (is_global) { 2203 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2204 } else { 2205 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(self)); 2206 } 2207 assert(SafepointSynchronize::is_synchronizing(), "sanity check"); 2208 ThreadBlockInVM blocker(self); 2209 } 2210 // Prepare for another loop after the safepoint. 2211 freeHeadp = NULL; 2212 freeTailp = NULL; 2213 if (log_is_enabled(Info, monitorinflation)) { 2214 timer.start(); 2215 } 2216 if (is_global) { 2217 Thread::muxAcquire(&gListLock, "deflate_global_idle_monitors_using_JT(3)"); 2218 } 2219 } 2220 } while (savedMidInUsep != NULL); 2221 if (is_global) { 2222 Thread::muxRelease(&gListLock); 2223 } 2224 timer.stop(); 2225 2226 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2227 LogStreamHandle(Info, monitorinflation) lsh_info; 2228 LogStream * ls = NULL; 2229 if (log_is_enabled(Debug, monitorinflation)) { 2230 ls = &lsh_debug; 2231 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2232 ls = &lsh_info; 2233 } 2234 if (ls != NULL) { 2235 if (is_global) { 2236 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2237 } else { 2238 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(self), timer.seconds(), deflated_count); 2239 } 2240 } 2241 } 2242 2243 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2244 // Report the cumulative time for deflating each thread's idle 2245 // monitors. Note: if the work is split among more than one 2246 // worker thread, then the reported time will likely be more 2247 // than a beginning to end measurement of the phase. 2248 // Note: AsyncDeflateIdleMonitors only deflates per-thread idle 2249 // monitors at a safepoint when a special cleanup has been requested. 2250 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged); 2251 2252 bool needs_special_cleanup = is_cleanup_requested(); 2253 if (!AsyncDeflateIdleMonitors || needs_special_cleanup) { 2254 // AsyncDeflateIdleMonitors does not use these counters unless 2255 // there is a special cleanup request. 2256 2257 gMonitorFreeCount += counters->nScavenged; 2258 2259 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); 2260 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); 2261 } 2262 2263 if (log_is_enabled(Debug, monitorinflation)) { 2264 // exit_globals()'s call to audit_and_print_stats() is done 2265 // at the Info level. 2266 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2267 } else if (log_is_enabled(Info, monitorinflation)) { 2268 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors"); 2269 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, " 2270 "gMonitorFreeCount=%d", gMonitorPopulation, 2271 gOmInUseCount, gMonitorFreeCount); 2272 Thread::muxRelease(&gListLock); 2273 } 2274 2275 ForceMonitorScavenge = 0; // Reset 2276 GVars.stwRandom = os::random(); 2277 GVars.stwCycle++; 2278 if (needs_special_cleanup) { 2279 set_is_cleanup_requested(false); // special clean up is done 2280 } 2281 } 2282 2283 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2284 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2285 2286 if (AsyncDeflateIdleMonitors) { 2287 if (!is_cleanup_requested()) { 2288 // Mark the JavaThread for idle monitor cleanup if a special 2289 // cleanup has NOT been requested. 2290 if (thread->omInUseCount > 0) { 2291 // This JavaThread is using monitors so mark it. 2292 thread->omShouldDeflateIdleMonitors = true; 2293 } 2294 return; 2295 } 2296 } 2297 2298 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 2299 ObjectMonitor * freeTailp = NULL; 2300 elapsedTimer timer; 2301 2302 if (log_is_enabled(Info, safepoint, cleanup) || 2303 log_is_enabled(Info, monitorinflation)) { 2304 timer.start(); 2305 } 2306 2307 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp); 2308 2309 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors"); 2310 2311 // Adjust counters 2312 counters->nInCirculation += thread->omInUseCount; 2313 thread->omInUseCount -= deflated_count; 2314 counters->nScavenged += deflated_count; 2315 counters->nInuse += thread->omInUseCount; 2316 counters->perThreadScavenged += deflated_count; 2317 2318 // Move the scavenged monitors back to the global free list. 2319 if (freeHeadp != NULL) { 2320 guarantee(freeTailp != NULL && deflated_count > 0, "invariant"); 2321 assert(freeTailp->FreeNext == NULL, "invariant"); 2322 2323 // constant-time list splice - prepend scavenged segment to gFreeList 2324 freeTailp->FreeNext = gFreeList; 2325 gFreeList = freeHeadp; 2326 } 2327 2328 timer.stop(); 2329 // Safepoint logging cares about cumulative perThreadTimes and 2330 // we'll capture most of the cost, but not the muxRelease() which 2331 // should be cheap. 2332 counters->perThreadTimes += timer.seconds(); 2333 2334 Thread::muxRelease(&gListLock); 2335 2336 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2337 LogStreamHandle(Info, monitorinflation) lsh_info; 2338 LogStream * ls = NULL; 2339 if (log_is_enabled(Debug, monitorinflation)) { 2340 ls = &lsh_debug; 2341 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2342 ls = &lsh_info; 2343 } 2344 if (ls != NULL) { 2345 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2346 } 2347 } 2348 2349 // Monitor cleanup on JavaThread::exit 2350 2351 // Iterate through monitor cache and attempt to release thread's monitors 2352 // Gives up on a particular monitor if an exception occurs, but continues 2353 // the overall iteration, swallowing the exception. 2354 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2355 private: 2356 TRAPS; 2357 2358 public: 2359 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2360 void do_monitor(ObjectMonitor* mid) { 2361 if (mid->owner() == THREAD) { 2362 (void)mid->complete_exit(CHECK); 2363 } 2364 } 2365 }; 2366 2367 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2368 // ignored. This is meant to be called during JNI thread detach which assumes 2369 // all remaining monitors are heavyweight. All exceptions are swallowed. 2370 // Scanning the extant monitor list can be time consuming. 2371 // A simple optimization is to add a per-thread flag that indicates a thread 2372 // called jni_monitorenter() during its lifetime. 2373 // 2374 // Instead of No_Savepoint_Verifier it might be cheaper to 2375 // use an idiom of the form: 2376 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2377 // <code that must not run at safepoint> 2378 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2379 // Since the tests are extremely cheap we could leave them enabled 2380 // for normal product builds. 2381 2382 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2383 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2384 NoSafepointVerifier nsv; 2385 ReleaseJavaMonitorsClosure rjmc(THREAD); 2386 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 2387 ObjectSynchronizer::monitors_iterate(&rjmc); 2388 Thread::muxRelease(&gListLock); 2389 THREAD->clear_pending_exception(); 2390 } 2391 2392 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2393 switch (cause) { 2394 case inflate_cause_vm_internal: return "VM Internal"; 2395 case inflate_cause_monitor_enter: return "Monitor Enter"; 2396 case inflate_cause_wait: return "Monitor Wait"; 2397 case inflate_cause_notify: return "Monitor Notify"; 2398 case inflate_cause_hash_code: return "Monitor Hash Code"; 2399 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2400 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2401 default: 2402 ShouldNotReachHere(); 2403 } 2404 return "Unknown"; 2405 } 2406 2407 //------------------------------------------------------------------------------ 2408 // Debugging code 2409 2410 u_char* ObjectSynchronizer::get_gvars_addr() { 2411 return (u_char*)&GVars; 2412 } 2413 2414 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() { 2415 return (u_char*)&GVars.hcSequence; 2416 } 2417 2418 size_t ObjectSynchronizer::get_gvars_size() { 2419 return sizeof(SharedGlobals); 2420 } 2421 2422 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() { 2423 return (u_char*)&GVars.stwRandom; 2424 } 2425 2426 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2427 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2428 2429 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2430 LogStreamHandle(Info, monitorinflation) lsh_info; 2431 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2432 LogStream * ls = NULL; 2433 if (log_is_enabled(Trace, monitorinflation)) { 2434 ls = &lsh_trace; 2435 } else if (log_is_enabled(Debug, monitorinflation)) { 2436 ls = &lsh_debug; 2437 } else if (log_is_enabled(Info, monitorinflation)) { 2438 ls = &lsh_info; 2439 } 2440 assert(ls != NULL, "sanity check"); 2441 2442 if (!on_exit) { 2443 // Not at VM exit so grab the global list lock. 2444 Thread::muxAcquire(&gListLock, "audit_and_print_stats"); 2445 } 2446 2447 // Log counts for the global and per-thread monitor lists: 2448 int chkMonitorPopulation = log_monitor_list_counts(ls); 2449 int error_cnt = 0; 2450 2451 ls->print_cr("Checking global lists:"); 2452 2453 // Check gMonitorPopulation: 2454 if (gMonitorPopulation == chkMonitorPopulation) { 2455 ls->print_cr("gMonitorPopulation=%d equals chkMonitorPopulation=%d", 2456 gMonitorPopulation, chkMonitorPopulation); 2457 } else { 2458 ls->print_cr("ERROR: gMonitorPopulation=%d is not equal to " 2459 "chkMonitorPopulation=%d", gMonitorPopulation, 2460 chkMonitorPopulation); 2461 error_cnt++; 2462 } 2463 2464 // Check gOmInUseList and gOmInUseCount: 2465 chk_global_in_use_list_and_count(ls, &error_cnt); 2466 2467 // Check gFreeList and gMonitorFreeCount: 2468 chk_global_free_list_and_count(ls, &error_cnt); 2469 2470 if (!on_exit) { 2471 Thread::muxRelease(&gListLock); 2472 } 2473 2474 ls->print_cr("Checking per-thread lists:"); 2475 2476 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2477 // Check omInUseList and omInUseCount: 2478 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2479 2480 // Check omFreeList and omFreeCount: 2481 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2482 } 2483 2484 if (error_cnt == 0) { 2485 ls->print_cr("No errors found in monitor list checks."); 2486 } else { 2487 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2488 } 2489 2490 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2491 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2492 // When exiting this log output is at the Info level. When called 2493 // at a safepoint, this log output is at the Trace level since 2494 // there can be a lot of it. 2495 log_in_use_monitor_details(ls, on_exit); 2496 } 2497 2498 ls->flush(); 2499 2500 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2501 } 2502 2503 // Check a free monitor entry; log any errors. 2504 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n, 2505 outputStream * out, int *error_cnt_p) { 2506 if ((!AsyncDeflateIdleMonitors && n->is_busy()) || 2507 (AsyncDeflateIdleMonitors && n->is_busy_async())) { 2508 if (jt != NULL) { 2509 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2510 ": free per-thread monitor must not be busy.", p2i(jt), 2511 p2i(n)); 2512 } else { 2513 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2514 "must not be busy.", p2i(n)); 2515 } 2516 *error_cnt_p = *error_cnt_p + 1; 2517 } 2518 if (n->header() != NULL) { 2519 if (jt != NULL) { 2520 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2521 ": free per-thread monitor must have NULL _header " 2522 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 2523 p2i(n->header())); 2524 *error_cnt_p = *error_cnt_p + 1; 2525 } else if (!AsyncDeflateIdleMonitors) { 2526 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2527 "must have NULL _header field: _header=" INTPTR_FORMAT, 2528 p2i(n), p2i(n->header())); 2529 *error_cnt_p = *error_cnt_p + 1; 2530 } 2531 } 2532 if (n->object() != NULL) { 2533 if (jt != NULL) { 2534 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2535 ": free per-thread monitor must have NULL _object " 2536 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 2537 p2i(n->object())); 2538 } else { 2539 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2540 "must have NULL _object field: _object=" INTPTR_FORMAT, 2541 p2i(n), p2i(n->object())); 2542 } 2543 *error_cnt_p = *error_cnt_p + 1; 2544 } 2545 } 2546 2547 // Check the global free list and count; log the results of the checks. 2548 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 2549 int *error_cnt_p) { 2550 int chkMonitorFreeCount = 0; 2551 for (ObjectMonitor * n = gFreeList; n != NULL; n = n->FreeNext) { 2552 chk_free_entry(NULL /* jt */, n, out, error_cnt_p); 2553 chkMonitorFreeCount++; 2554 } 2555 if (gMonitorFreeCount == chkMonitorFreeCount) { 2556 out->print_cr("gMonitorFreeCount=%d equals chkMonitorFreeCount=%d", 2557 gMonitorFreeCount, chkMonitorFreeCount); 2558 } else { 2559 out->print_cr("ERROR: gMonitorFreeCount=%d is not equal to " 2560 "chkMonitorFreeCount=%d", gMonitorFreeCount, 2561 chkMonitorFreeCount); 2562 *error_cnt_p = *error_cnt_p + 1; 2563 } 2564 } 2565 2566 // Check the global in-use list and count; log the results of the checks. 2567 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 2568 int *error_cnt_p) { 2569 int chkOmInUseCount = 0; 2570 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 2571 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p); 2572 chkOmInUseCount++; 2573 } 2574 if (gOmInUseCount == chkOmInUseCount) { 2575 out->print_cr("gOmInUseCount=%d equals chkOmInUseCount=%d", gOmInUseCount, 2576 chkOmInUseCount); 2577 } else { 2578 out->print_cr("ERROR: gOmInUseCount=%d is not equal to chkOmInUseCount=%d", 2579 gOmInUseCount, chkOmInUseCount); 2580 *error_cnt_p = *error_cnt_p + 1; 2581 } 2582 } 2583 2584 // Check an in-use monitor entry; log any errors. 2585 void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n, 2586 outputStream * out, int *error_cnt_p) { 2587 if (n->header() == NULL) { 2588 if (jt != NULL) { 2589 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2590 ": in-use per-thread monitor must have non-NULL _header " 2591 "field.", p2i(jt), p2i(n)); 2592 } else { 2593 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2594 "must have non-NULL _header field.", p2i(n)); 2595 } 2596 *error_cnt_p = *error_cnt_p + 1; 2597 } 2598 if (n->object() == NULL) { 2599 if (jt != NULL) { 2600 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2601 ": in-use per-thread monitor must have non-NULL _object " 2602 "field.", p2i(jt), p2i(n)); 2603 } else { 2604 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2605 "must have non-NULL _object field.", p2i(n)); 2606 } 2607 *error_cnt_p = *error_cnt_p + 1; 2608 } 2609 const oop obj = (oop)n->object(); 2610 const markOop mark = obj->mark(); 2611 if (!mark->has_monitor()) { 2612 if (jt != NULL) { 2613 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2614 ": in-use per-thread monitor's object does not think " 2615 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 2616 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), p2i(mark)); 2617 } else { 2618 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2619 "monitor's object does not think it has a monitor: obj=" 2620 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 2621 p2i(obj), p2i(mark)); 2622 } 2623 *error_cnt_p = *error_cnt_p + 1; 2624 } 2625 ObjectMonitor * const obj_mon = mark->monitor(); 2626 if (n != obj_mon) { 2627 if (jt != NULL) { 2628 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2629 ": in-use per-thread monitor's object does not refer " 2630 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 2631 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 2632 p2i(n), p2i(obj), p2i(mark), p2i(obj_mon)); 2633 } else { 2634 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2635 "monitor's object does not refer to the same monitor: obj=" 2636 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 2637 INTPTR_FORMAT, p2i(n), p2i(obj), p2i(mark), p2i(obj_mon)); 2638 } 2639 *error_cnt_p = *error_cnt_p + 1; 2640 } 2641 } 2642 2643 // Check the thread's free list and count; log the results of the checks. 2644 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 2645 outputStream * out, 2646 int *error_cnt_p) { 2647 int chkOmFreeCount = 0; 2648 for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) { 2649 chk_free_entry(jt, n, out, error_cnt_p); 2650 chkOmFreeCount++; 2651 } 2652 if (jt->omFreeCount == chkOmFreeCount) { 2653 out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals " 2654 "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount); 2655 } else { 2656 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not " 2657 "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, 2658 chkOmFreeCount); 2659 *error_cnt_p = *error_cnt_p + 1; 2660 } 2661 } 2662 2663 // Check the thread's in-use list and count; log the results of the checks. 2664 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 2665 outputStream * out, 2666 int *error_cnt_p) { 2667 int chkOmInUseCount = 0; 2668 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2669 chk_in_use_entry(jt, n, out, error_cnt_p); 2670 chkOmInUseCount++; 2671 } 2672 if (jt->omInUseCount == chkOmInUseCount) { 2673 out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals " 2674 "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2675 chkOmInUseCount); 2676 } else { 2677 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not " 2678 "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2679 chkOmInUseCount); 2680 *error_cnt_p = *error_cnt_p + 1; 2681 } 2682 } 2683 2684 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 2685 // flags indicate why the entry is in-use, 'object' and 'object type' 2686 // indicate the associated object and its type. 2687 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out, 2688 bool on_exit) { 2689 if (!on_exit) { 2690 // Not at VM exit so grab the global list lock. 2691 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details"); 2692 } 2693 2694 if (gOmInUseCount > 0) { 2695 out->print_cr("In-use global monitor info:"); 2696 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2697 out->print_cr("%18s %s %7s %18s %18s", 2698 "monitor", "BHL", "ref_cnt", "object", "object type"); 2699 out->print_cr("================== === ======= ================== =================="); 2700 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 2701 const oop obj = (oop) n->object(); 2702 const markOop mark = n->header(); 2703 ResourceMark rm; 2704 out->print_cr(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", 2705 p2i(n), n->is_busy() != 0, mark->hash() != 0, 2706 n->owner() != NULL, (int)n->ref_count(), p2i(obj), 2707 obj->klass()->external_name()); 2708 } 2709 } 2710 2711 if (!on_exit) { 2712 Thread::muxRelease(&gListLock); 2713 } 2714 2715 out->print_cr("In-use per-thread monitor info:"); 2716 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2717 out->print_cr("%18s %18s %s %7s %18s %18s", 2718 "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); 2719 out->print_cr("================== ================== === ======= ================== =================="); 2720 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2721 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2722 const oop obj = (oop) n->object(); 2723 const markOop mark = n->header(); 2724 ResourceMark rm; 2725 out->print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " 2726 INTPTR_FORMAT " %s", p2i(jt), p2i(n), n->is_busy() != 0, 2727 mark->hash() != 0, n->owner() != NULL, (int)n->ref_count(), 2728 p2i(obj), obj->klass()->external_name()); 2729 } 2730 } 2731 2732 out->flush(); 2733 } 2734 2735 // Log counts for the global and per-thread monitor lists and return 2736 // the population count. 2737 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 2738 int popCount = 0; 2739 out->print_cr("%18s %10s %10s %10s", 2740 "Global Lists:", "InUse", "Free", "Total"); 2741 out->print_cr("================== ========== ========== =========="); 2742 out->print_cr("%18s %10d %10d %10d", "", 2743 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation); 2744 popCount += gOmInUseCount + gMonitorFreeCount; 2745 2746 out->print_cr("%18s %10s %10s %10s", 2747 "Per-Thread Lists:", "InUse", "Free", "Provision"); 2748 out->print_cr("================== ========== ========== =========="); 2749 2750 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2751 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 2752 jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision); 2753 popCount += jt->omInUseCount + jt->omFreeCount; 2754 } 2755 return popCount; 2756 } 2757 2758 #ifndef PRODUCT 2759 2760 // Check if monitor belongs to the monitor cache 2761 // The list is grow-only so it's *relatively* safe to traverse 2762 // the list of extant blocks without taking a lock. 2763 2764 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 2765 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 2766 while (block != NULL) { 2767 assert(block->object() == CHAINMARKER, "must be a block header"); 2768 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 2769 address mon = (address)monitor; 2770 address blk = (address)block; 2771 size_t diff = mon - blk; 2772 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 2773 return 1; 2774 } 2775 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 2776 } 2777 return 0; 2778 } 2779 2780 #endif