1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/interfaceSupport.inline.hpp" 41 #include "runtime/mutexLocker.hpp" 42 #include "runtime/objectMonitor.hpp" 43 #include "runtime/objectMonitor.inline.hpp" 44 #include "runtime/osThread.hpp" 45 #include "runtime/safepointVerifiers.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "runtime/synchronizer.hpp" 49 #include "runtime/thread.inline.hpp" 50 #include "runtime/timer.hpp" 51 #include "runtime/vframe.hpp" 52 #include "runtime/vmThread.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/dtrace.hpp" 55 #include "utilities/events.hpp" 56 #include "utilities/preserveException.hpp" 57 58 // The "core" versions of monitor enter and exit reside in this file. 59 // The interpreter and compilers contain specialized transliterated 60 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 61 // for instance. If you make changes here, make sure to modify the 62 // interpreter, and both C1 and C2 fast-path inline locking code emission. 63 // 64 // ----------------------------------------------------------------------------- 65 66 #ifdef DTRACE_ENABLED 67 68 // Only bother with this argument setup if dtrace is available 69 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 70 71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 72 char* bytes = NULL; \ 73 int len = 0; \ 74 jlong jtid = SharedRuntime::get_java_tid(thread); \ 75 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 76 if (klassname != NULL) { \ 77 bytes = (char*)klassname->bytes(); \ 78 len = klassname->utf8_length(); \ 79 } 80 81 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 82 { \ 83 if (DTraceMonitorProbes) { \ 84 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 85 HOTSPOT_MONITOR_WAIT(jtid, \ 86 (uintptr_t)(monitor), bytes, len, (millis)); \ 87 } \ 88 } 89 90 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 91 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 92 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 93 94 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 95 { \ 96 if (DTraceMonitorProbes) { \ 97 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 98 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 99 (uintptr_t)(monitor), bytes, len); \ 100 } \ 101 } 102 103 #else // ndef DTRACE_ENABLED 104 105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 107 108 #endif // ndef DTRACE_ENABLED 109 110 // This exists only as a workaround of dtrace bug 6254741 111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 113 return 0; 114 } 115 116 #define NINFLATIONLOCKS 256 117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 118 119 // global list of blocks of monitors 120 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL; 121 // global monitor free list 122 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 123 // global monitor in-use list, for moribund threads, 124 // monitors they inflated need to be scanned for deflation 125 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 126 // count of entries in gOmInUseList 127 int ObjectSynchronizer::gOmInUseCount = 0; 128 129 static volatile intptr_t gListLock = 0; // protects global monitor lists 130 static volatile int gMonitorFreeCount = 0; // # on gFreeList 131 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 132 133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 134 135 136 // =====================> Quick functions 137 138 // The quick_* forms are special fast-path variants used to improve 139 // performance. In the simplest case, a "quick_*" implementation could 140 // simply return false, in which case the caller will perform the necessary 141 // state transitions and call the slow-path form. 142 // The fast-path is designed to handle frequently arising cases in an efficient 143 // manner and is just a degenerate "optimistic" variant of the slow-path. 144 // returns true -- to indicate the call was satisfied. 145 // returns false -- to indicate the call needs the services of the slow-path. 146 // A no-loitering ordinance is in effect for code in the quick_* family 147 // operators: safepoints or indefinite blocking (blocking that might span a 148 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 149 // entry. 150 // 151 // Consider: An interesting optimization is to have the JIT recognize the 152 // following common idiom: 153 // synchronized (someobj) { .... ; notify(); } 154 // That is, we find a notify() or notifyAll() call that immediately precedes 155 // the monitorexit operation. In that case the JIT could fuse the operations 156 // into a single notifyAndExit() runtime primitive. 157 158 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 159 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 160 assert(self->is_Java_thread(), "invariant"); 161 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 162 NoSafepointVerifier nsv; 163 if (obj == NULL) return false; // slow-path for invalid obj 164 const markWord mark = obj->mark(); 165 166 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 167 // Degenerate notify 168 // stack-locked by caller so by definition the implied waitset is empty. 169 return true; 170 } 171 172 if (mark.has_monitor()) { 173 ObjectMonitor * const mon = mark.monitor(); 174 assert(oopDesc::equals((oop) mon->object(), obj), "invariant"); 175 if (mon->owner() != self) return false; // slow-path for IMS exception 176 177 if (mon->first_waiter() != NULL) { 178 // We have one or more waiters. Since this is an inflated monitor 179 // that we own, we can transfer one or more threads from the waitset 180 // to the entrylist here and now, avoiding the slow-path. 181 if (all) { 182 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 183 } else { 184 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 185 } 186 int tally = 0; 187 do { 188 mon->INotify(self); 189 ++tally; 190 } while (mon->first_waiter() != NULL && all); 191 OM_PERFDATA_OP(Notifications, inc(tally)); 192 } 193 return true; 194 } 195 196 // biased locking and any other IMS exception states take the slow-path 197 return false; 198 } 199 200 201 // The LockNode emitted directly at the synchronization site would have 202 // been too big if it were to have included support for the cases of inflated 203 // recursive enter and exit, so they go here instead. 204 // Note that we can't safely call AsyncPrintJavaStack() from within 205 // quick_enter() as our thread state remains _in_Java. 206 207 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 208 BasicLock * lock) { 209 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 210 assert(Self->is_Java_thread(), "invariant"); 211 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 212 NoSafepointVerifier nsv; 213 if (obj == NULL) return false; // Need to throw NPE 214 const markWord mark = obj->mark(); 215 216 if (mark.has_monitor()) { 217 ObjectMonitor * const m = mark.monitor(); 218 assert(oopDesc::equals((oop) m->object(), obj), "invariant"); 219 Thread * const owner = (Thread *) m->_owner; 220 221 // Lock contention and Transactional Lock Elision (TLE) diagnostics 222 // and observability 223 // Case: light contention possibly amenable to TLE 224 // Case: TLE inimical operations such as nested/recursive synchronization 225 226 if (owner == Self) { 227 m->_recursions++; 228 return true; 229 } 230 231 // This Java Monitor is inflated so obj's header will never be 232 // displaced to this thread's BasicLock. Make the displaced header 233 // non-NULL so this BasicLock is not seen as recursive nor as 234 // being locked. We do this unconditionally so that this thread's 235 // BasicLock cannot be mis-interpreted by any stack walkers. For 236 // performance reasons, stack walkers generally first check for 237 // Biased Locking in the object's header, the second check is for 238 // stack-locking in the object's header, the third check is for 239 // recursive stack-locking in the displaced header in the BasicLock, 240 // and last are the inflated Java Monitor (ObjectMonitor) checks. 241 lock->set_displaced_header(markWord::unused_mark()); 242 243 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { 244 assert(m->_recursions == 0, "invariant"); 245 return true; 246 } 247 } 248 249 // Note that we could inflate in quick_enter. 250 // This is likely a useful optimization 251 // Critically, in quick_enter() we must not: 252 // -- perform bias revocation, or 253 // -- block indefinitely, or 254 // -- reach a safepoint 255 256 return false; // revert to slow-path 257 } 258 259 // ----------------------------------------------------------------------------- 260 // Monitor Enter/Exit 261 // The interpreter and compiler assembly code tries to lock using the fast path 262 // of this algorithm. Make sure to update that code if the following function is 263 // changed. The implementation is extremely sensitive to race condition. Be careful. 264 265 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 266 if (UseBiasedLocking) { 267 if (!SafepointSynchronize::is_at_safepoint()) { 268 BiasedLocking::revoke(obj, THREAD); 269 } else { 270 BiasedLocking::revoke_at_safepoint(obj); 271 } 272 } 273 274 markWord mark = obj->mark(); 275 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 276 277 if (mark.is_neutral()) { 278 // Anticipate successful CAS -- the ST of the displaced mark must 279 // be visible <= the ST performed by the CAS. 280 lock->set_displaced_header(mark); 281 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 282 return; 283 } 284 // Fall through to inflate() ... 285 } else if (mark.has_locker() && 286 THREAD->is_lock_owned((address)mark.locker())) { 287 assert(lock != mark.locker(), "must not re-lock the same lock"); 288 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 289 lock->set_displaced_header(markWord::from_pointer(NULL)); 290 return; 291 } 292 293 // The object header will never be displaced to this lock, 294 // so it does not matter what the value is, except that it 295 // must be non-zero to avoid looking like a re-entrant lock, 296 // and must not look locked either. 297 lock->set_displaced_header(markWord::unused_mark()); 298 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD); 299 } 300 301 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 302 markWord mark = object->mark(); 303 // We cannot check for Biased Locking if we are racing an inflation. 304 assert(mark == markWord::INFLATING() || 305 !mark.has_bias_pattern(), "should not see bias pattern here"); 306 307 markWord dhw = lock->displaced_header(); 308 if (dhw.value() == 0) { 309 // If the displaced header is NULL, then this exit matches up with 310 // a recursive enter. No real work to do here except for diagnostics. 311 #ifndef PRODUCT 312 if (mark != markWord::INFLATING()) { 313 // Only do diagnostics if we are not racing an inflation. Simply 314 // exiting a recursive enter of a Java Monitor that is being 315 // inflated is safe; see the has_monitor() comment below. 316 assert(!mark.is_neutral(), "invariant"); 317 assert(!mark.has_locker() || 318 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 319 if (mark.has_monitor()) { 320 // The BasicLock's displaced_header is marked as a recursive 321 // enter and we have an inflated Java Monitor (ObjectMonitor). 322 // This is a special case where the Java Monitor was inflated 323 // after this thread entered the stack-lock recursively. When a 324 // Java Monitor is inflated, we cannot safely walk the Java 325 // Monitor owner's stack and update the BasicLocks because a 326 // Java Monitor can be asynchronously inflated by a thread that 327 // does not own the Java Monitor. 328 ObjectMonitor * m = mark.monitor(); 329 assert(((oop)(m->object()))->mark() == mark, "invariant"); 330 assert(m->is_entered(THREAD), "invariant"); 331 } 332 } 333 #endif 334 return; 335 } 336 337 if (mark == markWord::from_pointer(lock)) { 338 // If the object is stack-locked by the current thread, try to 339 // swing the displaced header from the BasicLock back to the mark. 340 assert(dhw.is_neutral(), "invariant"); 341 if (object->cas_set_mark(dhw, mark) == mark) { 342 return; 343 } 344 } 345 346 // We have to take the slow-path of possible inflation and then exit. 347 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); 348 } 349 350 // ----------------------------------------------------------------------------- 351 // Class Loader support to workaround deadlocks on the class loader lock objects 352 // Also used by GC 353 // complete_exit()/reenter() are used to wait on a nested lock 354 // i.e. to give up an outer lock completely and then re-enter 355 // Used when holding nested locks - lock acquisition order: lock1 then lock2 356 // 1) complete_exit lock1 - saving recursion count 357 // 2) wait on lock2 358 // 3) when notified on lock2, unlock lock2 359 // 4) reenter lock1 with original recursion count 360 // 5) lock lock2 361 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 362 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 363 if (UseBiasedLocking) { 364 BiasedLocking::revoke(obj, THREAD); 365 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 366 } 367 368 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 369 370 return monitor->complete_exit(THREAD); 371 } 372 373 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 374 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 375 if (UseBiasedLocking) { 376 BiasedLocking::revoke(obj, THREAD); 377 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 378 } 379 380 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 381 382 monitor->reenter(recursion, THREAD); 383 } 384 // ----------------------------------------------------------------------------- 385 // JNI locks on java objects 386 // NOTE: must use heavy weight monitor to handle jni monitor enter 387 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 388 // the current locking is from JNI instead of Java code 389 if (UseBiasedLocking) { 390 BiasedLocking::revoke(obj, THREAD); 391 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 392 } 393 THREAD->set_current_pending_monitor_is_from_java(false); 394 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 395 THREAD->set_current_pending_monitor_is_from_java(true); 396 } 397 398 // NOTE: must use heavy weight monitor to handle jni monitor exit 399 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 400 if (UseBiasedLocking) { 401 Handle h_obj(THREAD, obj); 402 BiasedLocking::revoke(h_obj, THREAD); 403 obj = h_obj(); 404 } 405 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 406 407 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); 408 // If this thread has locked the object, exit the monitor. We 409 // intentionally do not use CHECK here because we must exit the 410 // monitor even if an exception is pending. 411 if (monitor->check_owner(THREAD)) { 412 monitor->exit(true, THREAD); 413 } 414 } 415 416 // ----------------------------------------------------------------------------- 417 // Internal VM locks on java objects 418 // standard constructor, allows locking failures 419 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 420 _dolock = doLock; 421 _thread = thread; 422 _thread->check_for_valid_safepoint_state(false); 423 _obj = obj; 424 425 if (_dolock) { 426 ObjectSynchronizer::enter(_obj, &_lock, _thread); 427 } 428 } 429 430 ObjectLocker::~ObjectLocker() { 431 if (_dolock) { 432 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 433 } 434 } 435 436 437 // ----------------------------------------------------------------------------- 438 // Wait/Notify/NotifyAll 439 // NOTE: must use heavy weight monitor to handle wait() 440 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 441 if (UseBiasedLocking) { 442 BiasedLocking::revoke(obj, THREAD); 443 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 444 } 445 if (millis < 0) { 446 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 447 } 448 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 449 450 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 451 monitor->wait(millis, true, THREAD); 452 453 // This dummy call is in place to get around dtrace bug 6254741. Once 454 // that's fixed we can uncomment the following line, remove the call 455 // and change this function back into a "void" func. 456 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 457 return dtrace_waited_probe(monitor, obj, THREAD); 458 } 459 460 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 461 if (UseBiasedLocking) { 462 BiasedLocking::revoke(obj, THREAD); 463 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 464 } 465 if (millis < 0) { 466 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 467 } 468 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD); 469 } 470 471 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 472 if (UseBiasedLocking) { 473 BiasedLocking::revoke(obj, THREAD); 474 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 475 } 476 477 markWord mark = obj->mark(); 478 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 479 return; 480 } 481 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD); 482 } 483 484 // NOTE: see comment of notify() 485 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 486 if (UseBiasedLocking) { 487 BiasedLocking::revoke(obj, THREAD); 488 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 489 } 490 491 markWord mark = obj->mark(); 492 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 493 return; 494 } 495 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD); 496 } 497 498 // ----------------------------------------------------------------------------- 499 // Hash Code handling 500 // 501 // Performance concern: 502 // OrderAccess::storestore() calls release() which at one time stored 0 503 // into the global volatile OrderAccess::dummy variable. This store was 504 // unnecessary for correctness. Many threads storing into a common location 505 // causes considerable cache migration or "sloshing" on large SMP systems. 506 // As such, I avoided using OrderAccess::storestore(). In some cases 507 // OrderAccess::fence() -- which incurs local latency on the executing 508 // processor -- is a better choice as it scales on SMP systems. 509 // 510 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 511 // a discussion of coherency costs. Note that all our current reference 512 // platforms provide strong ST-ST order, so the issue is moot on IA32, 513 // x64, and SPARC. 514 // 515 // As a general policy we use "volatile" to control compiler-based reordering 516 // and explicit fences (barriers) to control for architectural reordering 517 // performed by the CPU(s) or platform. 518 519 struct SharedGlobals { 520 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 521 // These are highly shared mostly-read variables. 522 // To avoid false-sharing they need to be the sole occupants of a cache line. 523 volatile int stwRandom; 524 volatile int stwCycle; 525 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 526 // Hot RW variable -- Sequester to avoid false-sharing 527 volatile int hcSequence; 528 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 529 }; 530 531 static SharedGlobals GVars; 532 static int MonitorScavengeThreshold = 1000000; 533 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 534 535 static markWord ReadStableMark(oop obj) { 536 markWord mark = obj->mark(); 537 if (!mark.is_being_inflated()) { 538 return mark; // normal fast-path return 539 } 540 541 int its = 0; 542 for (;;) { 543 markWord mark = obj->mark(); 544 if (!mark.is_being_inflated()) { 545 return mark; // normal fast-path return 546 } 547 548 // The object is being inflated by some other thread. 549 // The caller of ReadStableMark() must wait for inflation to complete. 550 // Avoid live-lock 551 // TODO: consider calling SafepointSynchronize::do_call_back() while 552 // spinning to see if there's a safepoint pending. If so, immediately 553 // yielding or blocking would be appropriate. Avoid spinning while 554 // there is a safepoint pending. 555 // TODO: add inflation contention performance counters. 556 // TODO: restrict the aggregate number of spinners. 557 558 ++its; 559 if (its > 10000 || !os::is_MP()) { 560 if (its & 1) { 561 os::naked_yield(); 562 } else { 563 // Note that the following code attenuates the livelock problem but is not 564 // a complete remedy. A more complete solution would require that the inflating 565 // thread hold the associated inflation lock. The following code simply restricts 566 // the number of spinners to at most one. We'll have N-2 threads blocked 567 // on the inflationlock, 1 thread holding the inflation lock and using 568 // a yield/park strategy, and 1 thread in the midst of inflation. 569 // A more refined approach would be to change the encoding of INFLATING 570 // to allow encapsulation of a native thread pointer. Threads waiting for 571 // inflation to complete would use CAS to push themselves onto a singly linked 572 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 573 // and calling park(). When inflation was complete the thread that accomplished inflation 574 // would detach the list and set the markword to inflated with a single CAS and 575 // then for each thread on the list, set the flag and unpark() the thread. 576 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 577 // wakes at most one thread whereas we need to wake the entire list. 578 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 579 int YieldThenBlock = 0; 580 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 581 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 582 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 583 while (obj->mark() == markWord::INFLATING()) { 584 // Beware: NakedYield() is advisory and has almost no effect on some platforms 585 // so we periodically call Self->_ParkEvent->park(1). 586 // We use a mixed spin/yield/block mechanism. 587 if ((YieldThenBlock++) >= 16) { 588 Thread::current()->_ParkEvent->park(1); 589 } else { 590 os::naked_yield(); 591 } 592 } 593 Thread::muxRelease(gInflationLocks + ix); 594 } 595 } else { 596 SpinPause(); // SMP-polite spinning 597 } 598 } 599 } 600 601 // hashCode() generation : 602 // 603 // Possibilities: 604 // * MD5Digest of {obj,stwRandom} 605 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 606 // * A DES- or AES-style SBox[] mechanism 607 // * One of the Phi-based schemes, such as: 608 // 2654435761 = 2^32 * Phi (golden ratio) 609 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 610 // * A variation of Marsaglia's shift-xor RNG scheme. 611 // * (obj ^ stwRandom) is appealing, but can result 612 // in undesirable regularity in the hashCode values of adjacent objects 613 // (objects allocated back-to-back, in particular). This could potentially 614 // result in hashtable collisions and reduced hashtable efficiency. 615 // There are simple ways to "diffuse" the middle address bits over the 616 // generated hashCode values: 617 618 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 619 intptr_t value = 0; 620 if (hashCode == 0) { 621 // This form uses global Park-Miller RNG. 622 // On MP system we'll have lots of RW access to a global, so the 623 // mechanism induces lots of coherency traffic. 624 value = os::random(); 625 } else if (hashCode == 1) { 626 // This variation has the property of being stable (idempotent) 627 // between STW operations. This can be useful in some of the 1-0 628 // synchronization schemes. 629 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 630 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 631 } else if (hashCode == 2) { 632 value = 1; // for sensitivity testing 633 } else if (hashCode == 3) { 634 value = ++GVars.hcSequence; 635 } else if (hashCode == 4) { 636 value = cast_from_oop<intptr_t>(obj); 637 } else { 638 // Marsaglia's xor-shift scheme with thread-specific state 639 // This is probably the best overall implementation -- we'll 640 // likely make this the default in future releases. 641 unsigned t = Self->_hashStateX; 642 t ^= (t << 11); 643 Self->_hashStateX = Self->_hashStateY; 644 Self->_hashStateY = Self->_hashStateZ; 645 Self->_hashStateZ = Self->_hashStateW; 646 unsigned v = Self->_hashStateW; 647 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 648 Self->_hashStateW = v; 649 value = v; 650 } 651 652 value &= markWord::hash_mask; 653 if (value == 0) value = 0xBAD; 654 assert(value != markWord::no_hash, "invariant"); 655 return value; 656 } 657 658 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 659 if (UseBiasedLocking) { 660 // NOTE: many places throughout the JVM do not expect a safepoint 661 // to be taken here, in particular most operations on perm gen 662 // objects. However, we only ever bias Java instances and all of 663 // the call sites of identity_hash that might revoke biases have 664 // been checked to make sure they can handle a safepoint. The 665 // added check of the bias pattern is to avoid useless calls to 666 // thread-local storage. 667 if (obj->mark().has_bias_pattern()) { 668 // Handle for oop obj in case of STW safepoint 669 Handle hobj(Self, obj); 670 // Relaxing assertion for bug 6320749. 671 assert(Universe::verify_in_progress() || 672 !SafepointSynchronize::is_at_safepoint(), 673 "biases should not be seen by VM thread here"); 674 BiasedLocking::revoke(hobj, JavaThread::current()); 675 obj = hobj(); 676 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 677 } 678 } 679 680 // hashCode() is a heap mutator ... 681 // Relaxing assertion for bug 6320749. 682 assert(Universe::verify_in_progress() || DumpSharedSpaces || 683 !SafepointSynchronize::is_at_safepoint(), "invariant"); 684 assert(Universe::verify_in_progress() || DumpSharedSpaces || 685 Self->is_Java_thread() , "invariant"); 686 assert(Universe::verify_in_progress() || DumpSharedSpaces || 687 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 688 689 ObjectMonitor* monitor = NULL; 690 markWord temp, test; 691 intptr_t hash; 692 markWord mark = ReadStableMark(obj); 693 694 // object should remain ineligible for biased locking 695 assert(!mark.has_bias_pattern(), "invariant"); 696 697 if (mark.is_neutral()) { 698 hash = mark.hash(); // this is a normal header 699 if (hash != 0) { // if it has hash, just return it 700 return hash; 701 } 702 hash = get_next_hash(Self, obj); // allocate a new hash code 703 temp = mark.copy_set_hash(hash); // merge the hash code into header 704 // use (machine word version) atomic operation to install the hash 705 test = obj->cas_set_mark(temp, mark); 706 if (test == mark) { 707 return hash; 708 } 709 // If atomic operation failed, we must inflate the header 710 // into heavy weight monitor. We could add more code here 711 // for fast path, but it does not worth the complexity. 712 } else if (mark.has_monitor()) { 713 monitor = mark.monitor(); 714 temp = monitor->header(); 715 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 716 hash = temp.hash(); 717 if (hash != 0) { 718 return hash; 719 } 720 // Skip to the following code to reduce code size 721 } else if (Self->is_lock_owned((address)mark.locker())) { 722 temp = mark.displaced_mark_helper(); // this is a lightweight monitor owned 723 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 724 hash = temp.hash(); // by current thread, check if the displaced 725 if (hash != 0) { // header contains hash code 726 return hash; 727 } 728 // WARNING: 729 // The displaced header in the BasicLock on a thread's stack 730 // is strictly immutable. It CANNOT be changed in ANY cases. 731 // So we have to inflate the stack lock into an ObjectMonitor 732 // even if the current thread owns the lock. The BasicLock on 733 // a thread's stack can be asynchronously read by other threads 734 // during an inflate() call so any change to that stack memory 735 // may not propagate to other threads correctly. 736 } 737 738 // Inflate the monitor to set hash code 739 monitor = inflate(Self, obj, inflate_cause_hash_code); 740 // Load displaced header and check it has hash code 741 mark = monitor->header(); 742 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 743 hash = mark.hash(); 744 if (hash == 0) { 745 hash = get_next_hash(Self, obj); 746 temp = mark.copy_set_hash(hash); // merge hash code into header 747 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 748 uintptr_t v = Atomic::cmpxchg(temp.value(), (volatile uintptr_t*)monitor->header_addr(), mark.value()); 749 test = markWord(v); 750 if (test != mark) { 751 // The only update to the ObjectMonitor's header/dmw field 752 // is to merge in the hash code. If someone adds a new usage 753 // of the header/dmw field, please update this code. 754 hash = test.hash(); 755 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 756 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 757 } 758 } 759 // We finally get the hash 760 return hash; 761 } 762 763 // Deprecated -- use FastHashCode() instead. 764 765 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 766 return FastHashCode(Thread::current(), obj()); 767 } 768 769 770 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 771 Handle h_obj) { 772 if (UseBiasedLocking) { 773 BiasedLocking::revoke(h_obj, thread); 774 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 775 } 776 777 assert(thread == JavaThread::current(), "Can only be called on current thread"); 778 oop obj = h_obj(); 779 780 markWord mark = ReadStableMark(obj); 781 782 // Uncontended case, header points to stack 783 if (mark.has_locker()) { 784 return thread->is_lock_owned((address)mark.locker()); 785 } 786 // Contended case, header points to ObjectMonitor (tagged pointer) 787 if (mark.has_monitor()) { 788 ObjectMonitor* monitor = mark.monitor(); 789 return monitor->is_entered(thread) != 0; 790 } 791 // Unlocked case, header in place 792 assert(mark.is_neutral(), "sanity check"); 793 return false; 794 } 795 796 // Be aware of this method could revoke bias of the lock object. 797 // This method queries the ownership of the lock handle specified by 'h_obj'. 798 // If the current thread owns the lock, it returns owner_self. If no 799 // thread owns the lock, it returns owner_none. Otherwise, it will return 800 // owner_other. 801 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 802 (JavaThread *self, Handle h_obj) { 803 // The caller must beware this method can revoke bias, and 804 // revocation can result in a safepoint. 805 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 806 assert(self->thread_state() != _thread_blocked, "invariant"); 807 808 // Possible mark states: neutral, biased, stack-locked, inflated 809 810 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 811 // CASE: biased 812 BiasedLocking::revoke(h_obj, self); 813 assert(!h_obj->mark().has_bias_pattern(), 814 "biases should be revoked by now"); 815 } 816 817 assert(self == JavaThread::current(), "Can only be called on current thread"); 818 oop obj = h_obj(); 819 markWord mark = ReadStableMark(obj); 820 821 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 822 if (mark.has_locker()) { 823 return self->is_lock_owned((address)mark.locker()) ? 824 owner_self : owner_other; 825 } 826 827 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 828 // The Object:ObjectMonitor relationship is stable as long as we're 829 // not at a safepoint. 830 if (mark.has_monitor()) { 831 void * owner = mark.monitor()->_owner; 832 if (owner == NULL) return owner_none; 833 return (owner == self || 834 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 835 } 836 837 // CASE: neutral 838 assert(mark.is_neutral(), "sanity check"); 839 return owner_none; // it's unlocked 840 } 841 842 // FIXME: jvmti should call this 843 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 844 if (UseBiasedLocking) { 845 if (SafepointSynchronize::is_at_safepoint()) { 846 BiasedLocking::revoke_at_safepoint(h_obj); 847 } else { 848 BiasedLocking::revoke(h_obj, JavaThread::current()); 849 } 850 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 851 } 852 853 oop obj = h_obj(); 854 address owner = NULL; 855 856 markWord mark = ReadStableMark(obj); 857 858 // Uncontended case, header points to stack 859 if (mark.has_locker()) { 860 owner = (address) mark.locker(); 861 } 862 863 // Contended case, header points to ObjectMonitor (tagged pointer) 864 else if (mark.has_monitor()) { 865 ObjectMonitor* monitor = mark.monitor(); 866 assert(monitor != NULL, "monitor should be non-null"); 867 owner = (address) monitor->owner(); 868 } 869 870 if (owner != NULL) { 871 // owning_thread_from_monitor_owner() may also return NULL here 872 return Threads::owning_thread_from_monitor_owner(t_list, owner); 873 } 874 875 // Unlocked case, header in place 876 // Cannot have assertion since this object may have been 877 // locked by another thread when reaching here. 878 // assert(mark.is_neutral(), "sanity check"); 879 880 return NULL; 881 } 882 883 // Visitors ... 884 885 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 886 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 887 while (block != NULL) { 888 assert(block->object() == CHAINMARKER, "must be a block header"); 889 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 890 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 891 oop object = (oop)mid->object(); 892 if (object != NULL) { 893 closure->do_monitor(mid); 894 } 895 } 896 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 897 } 898 } 899 900 // Get the next block in the block list. 901 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) { 902 assert(block->object() == CHAINMARKER, "must be a block header"); 903 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext; 904 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 905 return block; 906 } 907 908 static bool monitors_used_above_threshold() { 909 if (gMonitorPopulation == 0) { 910 return false; 911 } 912 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 913 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; 914 return monitor_usage > MonitorUsedDeflationThreshold; 915 } 916 917 bool ObjectSynchronizer::is_cleanup_needed() { 918 if (MonitorUsedDeflationThreshold > 0) { 919 return monitors_used_above_threshold(); 920 } 921 return false; 922 } 923 924 void ObjectSynchronizer::oops_do(OopClosure* f) { 925 // We only scan the global used list here (for moribund threads), and 926 // the thread-local monitors in Thread::oops_do(). 927 global_used_oops_do(f); 928 } 929 930 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 931 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 932 list_oops_do(gOmInUseList, f); 933 } 934 935 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 936 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 937 list_oops_do(thread->omInUseList, f); 938 } 939 940 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 941 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 942 ObjectMonitor* mid; 943 for (mid = list; mid != NULL; mid = mid->FreeNext) { 944 if (mid->object() != NULL) { 945 f->do_oop((oop*)mid->object_addr()); 946 } 947 } 948 } 949 950 951 // ----------------------------------------------------------------------------- 952 // ObjectMonitor Lifecycle 953 // ----------------------- 954 // Inflation unlinks monitors from the global gFreeList and 955 // associates them with objects. Deflation -- which occurs at 956 // STW-time -- disassociates idle monitors from objects. Such 957 // scavenged monitors are returned to the gFreeList. 958 // 959 // The global list is protected by gListLock. All the critical sections 960 // are short and operate in constant-time. 961 // 962 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 963 // 964 // Lifecycle: 965 // -- unassigned and on the global free list 966 // -- unassigned and on a thread's private omFreeList 967 // -- assigned to an object. The object is inflated and the mark refers 968 // to the objectmonitor. 969 970 971 // Constraining monitor pool growth via MonitorBound ... 972 // 973 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 974 // the rate of scavenging is driven primarily by GC. As such, we can find 975 // an inordinate number of monitors in circulation. 976 // To avoid that scenario we can artificially induce a STW safepoint 977 // if the pool appears to be growing past some reasonable bound. 978 // Generally we favor time in space-time tradeoffs, but as there's no 979 // natural back-pressure on the # of extant monitors we need to impose some 980 // type of limit. Beware that if MonitorBound is set to too low a value 981 // we could just loop. In addition, if MonitorBound is set to a low value 982 // we'll incur more safepoints, which are harmful to performance. 983 // See also: GuaranteedSafepointInterval 984 // 985 // The current implementation uses asynchronous VM operations. 986 987 static void InduceScavenge(Thread * Self, const char * Whence) { 988 // Induce STW safepoint to trim monitors 989 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 990 // More precisely, trigger an asynchronous STW safepoint as the number 991 // of active monitors passes the specified threshold. 992 // TODO: assert thread state is reasonable 993 994 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 995 // Induce a 'null' safepoint to scavenge monitors 996 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 997 // to the VMthread and have a lifespan longer than that of this activation record. 998 // The VMThread will delete the op when completed. 999 VMThread::execute(new VM_ScavengeMonitors()); 1000 } 1001 } 1002 1003 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1004 // A large MAXPRIVATE value reduces both list lock contention 1005 // and list coherency traffic, but also tends to increase the 1006 // number of objectMonitors in circulation as well as the STW 1007 // scavenge costs. As usual, we lean toward time in space-time 1008 // tradeoffs. 1009 const int MAXPRIVATE = 1024; 1010 stringStream ss; 1011 for (;;) { 1012 ObjectMonitor * m; 1013 1014 // 1: try to allocate from the thread's local omFreeList. 1015 // Threads will attempt to allocate first from their local list, then 1016 // from the global list, and only after those attempts fail will the thread 1017 // attempt to instantiate new monitors. Thread-local free lists take 1018 // heat off the gListLock and improve allocation latency, as well as reducing 1019 // coherency traffic on the shared global list. 1020 m = Self->omFreeList; 1021 if (m != NULL) { 1022 Self->omFreeList = m->FreeNext; 1023 Self->omFreeCount--; 1024 guarantee(m->object() == NULL, "invariant"); 1025 m->FreeNext = Self->omInUseList; 1026 Self->omInUseList = m; 1027 Self->omInUseCount++; 1028 return m; 1029 } 1030 1031 // 2: try to allocate from the global gFreeList 1032 // CONSIDER: use muxTry() instead of muxAcquire(). 1033 // If the muxTry() fails then drop immediately into case 3. 1034 // If we're using thread-local free lists then try 1035 // to reprovision the caller's free list. 1036 if (gFreeList != NULL) { 1037 // Reprovision the thread's omFreeList. 1038 // Use bulk transfers to reduce the allocation rate and heat 1039 // on various locks. 1040 Thread::muxAcquire(&gListLock, "omAlloc(1)"); 1041 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1042 gMonitorFreeCount--; 1043 ObjectMonitor * take = gFreeList; 1044 gFreeList = take->FreeNext; 1045 guarantee(take->object() == NULL, "invariant"); 1046 take->Recycle(); 1047 omRelease(Self, take, false); 1048 } 1049 Thread::muxRelease(&gListLock); 1050 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1051 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1052 1053 const int mx = MonitorBound; 1054 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1055 // We can't safely induce a STW safepoint from omAlloc() as our thread 1056 // state may not be appropriate for such activities and callers may hold 1057 // naked oops, so instead we defer the action. 1058 InduceScavenge(Self, "omAlloc"); 1059 } 1060 continue; 1061 } 1062 1063 // 3: allocate a block of new ObjectMonitors 1064 // Both the local and global free lists are empty -- resort to malloc(). 1065 // In the current implementation objectMonitors are TSM - immortal. 1066 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1067 // each ObjectMonitor to start at the beginning of a cache line, 1068 // so we use align_up(). 1069 // A better solution would be to use C++ placement-new. 1070 // BEWARE: As it stands currently, we don't run the ctors! 1071 assert(_BLOCKSIZE > 1, "invariant"); 1072 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1073 PaddedEnd<ObjectMonitor> * temp; 1074 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1075 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1076 mtInternal); 1077 temp = (PaddedEnd<ObjectMonitor> *) 1078 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); 1079 1080 // NOTE: (almost) no way to recover if allocation failed. 1081 // We might be able to induce a STW safepoint and scavenge enough 1082 // objectMonitors to permit progress. 1083 if (temp == NULL) { 1084 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1085 "Allocate ObjectMonitors"); 1086 } 1087 (void)memset((void *) temp, 0, neededsize); 1088 1089 // Format the block. 1090 // initialize the linked list, each monitor points to its next 1091 // forming the single linked free list, the very first monitor 1092 // will points to next block, which forms the block list. 1093 // The trick of using the 1st element in the block as gBlockList 1094 // linkage should be reconsidered. A better implementation would 1095 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1096 1097 for (int i = 1; i < _BLOCKSIZE; i++) { 1098 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1099 } 1100 1101 // terminate the last monitor as the end of list 1102 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1103 1104 // Element [0] is reserved for global list linkage 1105 temp[0].set_object(CHAINMARKER); 1106 1107 // Consider carving out this thread's current request from the 1108 // block in hand. This avoids some lock traffic and redundant 1109 // list activity. 1110 1111 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1112 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1113 Thread::muxAcquire(&gListLock, "omAlloc(2)"); 1114 gMonitorPopulation += _BLOCKSIZE-1; 1115 gMonitorFreeCount += _BLOCKSIZE-1; 1116 1117 // Add the new block to the list of extant blocks (gBlockList). 1118 // The very first objectMonitor in a block is reserved and dedicated. 1119 // It serves as blocklist "next" linkage. 1120 temp[0].FreeNext = gBlockList; 1121 // There are lock-free uses of gBlockList so make sure that 1122 // the previous stores happen before we update gBlockList. 1123 OrderAccess::release_store(&gBlockList, temp); 1124 1125 // Add the new string of objectMonitors to the global free list 1126 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1127 gFreeList = temp + 1; 1128 Thread::muxRelease(&gListLock); 1129 } 1130 } 1131 1132 // Place "m" on the caller's private per-thread omFreeList. 1133 // In practice there's no need to clamp or limit the number of 1134 // monitors on a thread's omFreeList as the only time we'll call 1135 // omRelease is to return a monitor to the free list after a CAS 1136 // attempt failed. This doesn't allow unbounded #s of monitors to 1137 // accumulate on a thread's free list. 1138 // 1139 // Key constraint: all ObjectMonitors on a thread's free list and the global 1140 // free list must have their object field set to null. This prevents the 1141 // scavenger -- deflate_monitor_list() -- from reclaiming them. 1142 1143 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1144 bool fromPerThreadAlloc) { 1145 guarantee(m->header().value() == 0, "invariant"); 1146 guarantee(m->object() == NULL, "invariant"); 1147 stringStream ss; 1148 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " 1149 "%s, recursions=" INTPTR_FORMAT, m->is_busy_to_string(&ss), 1150 m->_recursions); 1151 // Remove from omInUseList 1152 if (fromPerThreadAlloc) { 1153 ObjectMonitor* cur_mid_in_use = NULL; 1154 bool extracted = false; 1155 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1156 if (m == mid) { 1157 // extract from per-thread in-use list 1158 if (mid == Self->omInUseList) { 1159 Self->omInUseList = mid->FreeNext; 1160 } else if (cur_mid_in_use != NULL) { 1161 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1162 } 1163 extracted = true; 1164 Self->omInUseCount--; 1165 break; 1166 } 1167 } 1168 assert(extracted, "Should have extracted from in-use list"); 1169 } 1170 1171 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1172 m->FreeNext = Self->omFreeList; 1173 Self->omFreeList = m; 1174 Self->omFreeCount++; 1175 } 1176 1177 // Return the monitors of a moribund thread's local free list to 1178 // the global free list. Typically a thread calls omFlush() when 1179 // it's dying. We could also consider having the VM thread steal 1180 // monitors from threads that have not run java code over a few 1181 // consecutive STW safepoints. Relatedly, we might decay 1182 // omFreeProvision at STW safepoints. 1183 // 1184 // Also return the monitors of a moribund thread's omInUseList to 1185 // a global gOmInUseList under the global list lock so these 1186 // will continue to be scanned. 1187 // 1188 // We currently call omFlush() from Threads::remove() _before the thread 1189 // has been excised from the thread list and is no longer a mutator. 1190 // This means that omFlush() cannot run concurrently with a safepoint and 1191 // interleave with the deflate_idle_monitors scavenge operator. In particular, 1192 // this ensures that the thread's monitors are scanned by a GC safepoint, 1193 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via 1194 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1195 // monitors have been transferred to the global in-use list). 1196 1197 void ObjectSynchronizer::omFlush(Thread * Self) { 1198 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1199 ObjectMonitor * tail = NULL; 1200 int tally = 0; 1201 if (list != NULL) { 1202 ObjectMonitor * s; 1203 // The thread is going away. Set 'tail' to the last per-thread free 1204 // monitor which will be linked to gFreeList below under the gListLock. 1205 stringStream ss; 1206 for (s = list; s != NULL; s = s->FreeNext) { 1207 tally++; 1208 tail = s; 1209 guarantee(s->object() == NULL, "invariant"); 1210 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); 1211 } 1212 guarantee(tail != NULL, "invariant"); 1213 assert(Self->omFreeCount == tally, "free-count off"); 1214 Self->omFreeList = NULL; 1215 Self->omFreeCount = 0; 1216 } 1217 1218 ObjectMonitor * inUseList = Self->omInUseList; 1219 ObjectMonitor * inUseTail = NULL; 1220 int inUseTally = 0; 1221 if (inUseList != NULL) { 1222 ObjectMonitor *cur_om; 1223 // The thread is going away, however the omInUseList inflated 1224 // monitors may still be in-use by other threads. 1225 // Link them to inUseTail, which will be linked into the global in-use list 1226 // gOmInUseList below, under the gListLock 1227 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1228 inUseTail = cur_om; 1229 inUseTally++; 1230 } 1231 guarantee(inUseTail != NULL, "invariant"); 1232 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1233 Self->omInUseList = NULL; 1234 Self->omInUseCount = 0; 1235 } 1236 1237 Thread::muxAcquire(&gListLock, "omFlush"); 1238 if (tail != NULL) { 1239 tail->FreeNext = gFreeList; 1240 gFreeList = list; 1241 gMonitorFreeCount += tally; 1242 } 1243 1244 if (inUseTail != NULL) { 1245 inUseTail->FreeNext = gOmInUseList; 1246 gOmInUseList = inUseList; 1247 gOmInUseCount += inUseTally; 1248 } 1249 1250 Thread::muxRelease(&gListLock); 1251 1252 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1253 LogStreamHandle(Info, monitorinflation) lsh_info; 1254 LogStream * ls = NULL; 1255 if (log_is_enabled(Debug, monitorinflation)) { 1256 ls = &lsh_debug; 1257 } else if ((tally != 0 || inUseTally != 0) && 1258 log_is_enabled(Info, monitorinflation)) { 1259 ls = &lsh_info; 1260 } 1261 if (ls != NULL) { 1262 ls->print_cr("omFlush: jt=" INTPTR_FORMAT ", free_monitor_tally=%d" 1263 ", in_use_monitor_tally=%d" ", omFreeProvision=%d", 1264 p2i(Self), tally, inUseTally, Self->omFreeProvision); 1265 } 1266 } 1267 1268 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1269 const oop obj, 1270 ObjectSynchronizer::InflateCause cause) { 1271 assert(event != NULL, "invariant"); 1272 assert(event->should_commit(), "invariant"); 1273 event->set_monitorClass(obj->klass()); 1274 event->set_address((uintptr_t)(void*)obj); 1275 event->set_cause((u1)cause); 1276 event->commit(); 1277 } 1278 1279 // Fast path code shared by multiple functions 1280 void ObjectSynchronizer::inflate_helper(oop obj) { 1281 markWord mark = obj->mark(); 1282 if (mark.has_monitor()) { 1283 assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid"); 1284 assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header"); 1285 return; 1286 } 1287 inflate(Thread::current(), obj, inflate_cause_vm_internal); 1288 } 1289 1290 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1291 oop object, 1292 const InflateCause cause) { 1293 // Inflate mutates the heap ... 1294 // Relaxing assertion for bug 6320749. 1295 assert(Universe::verify_in_progress() || 1296 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1297 1298 EventJavaMonitorInflate event; 1299 1300 for (;;) { 1301 const markWord mark = object->mark(); 1302 assert(!mark.has_bias_pattern(), "invariant"); 1303 1304 // The mark can be in one of the following states: 1305 // * Inflated - just return 1306 // * Stack-locked - coerce it to inflated 1307 // * INFLATING - busy wait for conversion to complete 1308 // * Neutral - aggressively inflate the object. 1309 // * BIASED - Illegal. We should never see this 1310 1311 // CASE: inflated 1312 if (mark.has_monitor()) { 1313 ObjectMonitor * inf = mark.monitor(); 1314 markWord dmw = inf->header(); 1315 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1316 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); 1317 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1318 return inf; 1319 } 1320 1321 // CASE: inflation in progress - inflating over a stack-lock. 1322 // Some other thread is converting from stack-locked to inflated. 1323 // Only that thread can complete inflation -- other threads must wait. 1324 // The INFLATING value is transient. 1325 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1326 // We could always eliminate polling by parking the thread on some auxiliary list. 1327 if (mark == markWord::INFLATING()) { 1328 ReadStableMark(object); 1329 continue; 1330 } 1331 1332 // CASE: stack-locked 1333 // Could be stack-locked either by this thread or by some other thread. 1334 // 1335 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1336 // to install INFLATING into the mark word. We originally installed INFLATING, 1337 // allocated the objectmonitor, and then finally STed the address of the 1338 // objectmonitor into the mark. This was correct, but artificially lengthened 1339 // the interval in which INFLATED appeared in the mark, thus increasing 1340 // the odds of inflation contention. 1341 // 1342 // We now use per-thread private objectmonitor free lists. 1343 // These list are reprovisioned from the global free list outside the 1344 // critical INFLATING...ST interval. A thread can transfer 1345 // multiple objectmonitors en-mass from the global free list to its local free list. 1346 // This reduces coherency traffic and lock contention on the global free list. 1347 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1348 // before or after the CAS(INFLATING) operation. 1349 // See the comments in omAlloc(). 1350 1351 LogStreamHandle(Trace, monitorinflation) lsh; 1352 1353 if (mark.has_locker()) { 1354 ObjectMonitor * m = omAlloc(Self); 1355 // Optimistically prepare the objectmonitor - anticipate successful CAS 1356 // We do this before the CAS in order to minimize the length of time 1357 // in which INFLATING appears in the mark. 1358 m->Recycle(); 1359 m->_Responsible = NULL; 1360 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1361 1362 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1363 if (cmp != mark) { 1364 omRelease(Self, m, true); 1365 continue; // Interference -- just retry 1366 } 1367 1368 // We've successfully installed INFLATING (0) into the mark-word. 1369 // This is the only case where 0 will appear in a mark-word. 1370 // Only the singular thread that successfully swings the mark-word 1371 // to 0 can perform (or more precisely, complete) inflation. 1372 // 1373 // Why do we CAS a 0 into the mark-word instead of just CASing the 1374 // mark-word from the stack-locked value directly to the new inflated state? 1375 // Consider what happens when a thread unlocks a stack-locked object. 1376 // It attempts to use CAS to swing the displaced header value from the 1377 // on-stack basiclock back into the object header. Recall also that the 1378 // header value (hash code, etc) can reside in (a) the object header, or 1379 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1380 // header in an objectMonitor. The inflate() routine must copy the header 1381 // value from the basiclock on the owner's stack to the objectMonitor, all 1382 // the while preserving the hashCode stability invariants. If the owner 1383 // decides to release the lock while the value is 0, the unlock will fail 1384 // and control will eventually pass from slow_exit() to inflate. The owner 1385 // will then spin, waiting for the 0 value to disappear. Put another way, 1386 // the 0 causes the owner to stall if the owner happens to try to 1387 // drop the lock (restoring the header from the basiclock to the object) 1388 // while inflation is in-progress. This protocol avoids races that might 1389 // would otherwise permit hashCode values to change or "flicker" for an object. 1390 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1391 // 0 serves as a "BUSY" inflate-in-progress indicator. 1392 1393 1394 // fetch the displaced mark from the owner's stack. 1395 // The owner can't die or unwind past the lock while our INFLATING 1396 // object is in the mark. Furthermore the owner can't complete 1397 // an unlock on the object, either. 1398 markWord dmw = mark.displaced_mark_helper(); 1399 // Catch if the object's header is not neutral (not locked and 1400 // not marked is what we care about here). 1401 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1402 1403 // Setup monitor fields to proper values -- prepare the monitor 1404 m->set_header(dmw); 1405 1406 // Optimization: if the mark.locker stack address is associated 1407 // with this thread we could simply set m->_owner = Self. 1408 // Note that a thread can inflate an object 1409 // that it has stack-locked -- as might happen in wait() -- directly 1410 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1411 m->set_owner(mark.locker()); 1412 m->set_object(object); 1413 // TODO-FIXME: assert BasicLock->dhw != 0. 1414 1415 // Must preserve store ordering. The monitor state must 1416 // be stable at the time of publishing the monitor address. 1417 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1418 object->release_set_mark(markWord::encode(m)); 1419 1420 // Hopefully the performance counters are allocated on distinct cache lines 1421 // to avoid false sharing on MP systems ... 1422 OM_PERFDATA_OP(Inflations, inc()); 1423 if (log_is_enabled(Trace, monitorinflation)) { 1424 ResourceMark rm(Self); 1425 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1426 INTPTR_FORMAT ", type='%s'", p2i(object), 1427 object->mark().value(), object->klass()->external_name()); 1428 } 1429 if (event.should_commit()) { 1430 post_monitor_inflate_event(&event, object, cause); 1431 } 1432 return m; 1433 } 1434 1435 // CASE: neutral 1436 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1437 // If we know we're inflating for entry it's better to inflate by swinging a 1438 // pre-locked objectMonitor pointer into the object header. A successful 1439 // CAS inflates the object *and* confers ownership to the inflating thread. 1440 // In the current implementation we use a 2-step mechanism where we CAS() 1441 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1442 // An inflateTry() method that we could call from enter() would be useful. 1443 1444 // Catch if the object's header is not neutral (not locked and 1445 // not marked is what we care about here). 1446 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1447 ObjectMonitor * m = omAlloc(Self); 1448 // prepare m for installation - set monitor to initial state 1449 m->Recycle(); 1450 m->set_header(mark); 1451 m->set_object(object); 1452 m->_Responsible = NULL; 1453 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1454 1455 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 1456 m->set_header(markWord::zero()); 1457 m->set_object(NULL); 1458 m->Recycle(); 1459 omRelease(Self, m, true); 1460 m = NULL; 1461 continue; 1462 // interference - the markword changed - just retry. 1463 // The state-transitions are one-way, so there's no chance of 1464 // live-lock -- "Inflated" is an absorbing state. 1465 } 1466 1467 // Hopefully the performance counters are allocated on distinct 1468 // cache lines to avoid false sharing on MP systems ... 1469 OM_PERFDATA_OP(Inflations, inc()); 1470 if (log_is_enabled(Trace, monitorinflation)) { 1471 ResourceMark rm(Self); 1472 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1473 INTPTR_FORMAT ", type='%s'", p2i(object), 1474 object->mark().value(), object->klass()->external_name()); 1475 } 1476 if (event.should_commit()) { 1477 post_monitor_inflate_event(&event, object, cause); 1478 } 1479 return m; 1480 } 1481 } 1482 1483 1484 // We maintain a list of in-use monitors for each thread. 1485 // 1486 // deflate_thread_local_monitors() scans a single thread's in-use list, while 1487 // deflate_idle_monitors() scans only a global list of in-use monitors which 1488 // is populated only as a thread dies (see omFlush()). 1489 // 1490 // These operations are called at all safepoints, immediately after mutators 1491 // are stopped, but before any objects have moved. Collectively they traverse 1492 // the population of in-use monitors, deflating where possible. The scavenged 1493 // monitors are returned to the global monitor free list. 1494 // 1495 // Beware that we scavenge at *every* stop-the-world point. Having a large 1496 // number of monitors in-use could negatively impact performance. We also want 1497 // to minimize the total # of monitors in circulation, as they incur a small 1498 // footprint penalty. 1499 // 1500 // Perversely, the heap size -- and thus the STW safepoint rate -- 1501 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1502 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 1503 // This is an unfortunate aspect of this design. 1504 1505 // Deflate a single monitor if not in-use 1506 // Return true if deflated, false if in-use 1507 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1508 ObjectMonitor** freeHeadp, 1509 ObjectMonitor** freeTailp) { 1510 bool deflated; 1511 // Normal case ... The monitor is associated with obj. 1512 const markWord mark = obj->mark(); 1513 guarantee(mark == markWord::encode(mid), "should match: mark=" 1514 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 1515 markWord::encode(mid).value()); 1516 // Make sure that mark.monitor() and markWord::encode() agree: 1517 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 1518 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 1519 const markWord dmw = mid->header(); 1520 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1521 1522 if (mid->is_busy()) { 1523 deflated = false; 1524 } else { 1525 // Deflate the monitor if it is no longer being used 1526 // It's idle - scavenge and return to the global free list 1527 // plain old deflation ... 1528 if (log_is_enabled(Trace, monitorinflation)) { 1529 ResourceMark rm; 1530 log_trace(monitorinflation)("deflate_monitor: " 1531 "object=" INTPTR_FORMAT ", mark=" 1532 INTPTR_FORMAT ", type='%s'", p2i(obj), 1533 mark.value(), obj->klass()->external_name()); 1534 } 1535 1536 // Restore the header back to obj 1537 obj->release_set_mark(dmw); 1538 mid->clear(); 1539 1540 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 1541 p2i(mid->object())); 1542 1543 // Move the object to the working free list defined by freeHeadp, freeTailp 1544 if (*freeHeadp == NULL) *freeHeadp = mid; 1545 if (*freeTailp != NULL) { 1546 ObjectMonitor * prevtail = *freeTailp; 1547 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1548 prevtail->FreeNext = mid; 1549 } 1550 *freeTailp = mid; 1551 deflated = true; 1552 } 1553 return deflated; 1554 } 1555 1556 // Walk a given monitor list, and deflate idle monitors 1557 // The given list could be a per-thread list or a global list 1558 // Caller acquires gListLock as needed. 1559 // 1560 // In the case of parallel processing of thread local monitor lists, 1561 // work is done by Threads::parallel_threads_do() which ensures that 1562 // each Java thread is processed by exactly one worker thread, and 1563 // thus avoid conflicts that would arise when worker threads would 1564 // process the same monitor lists concurrently. 1565 // 1566 // See also ParallelSPCleanupTask and 1567 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 1568 // Threads::parallel_java_threads_do() in thread.cpp. 1569 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1570 ObjectMonitor** freeHeadp, 1571 ObjectMonitor** freeTailp) { 1572 ObjectMonitor* mid; 1573 ObjectMonitor* next; 1574 ObjectMonitor* cur_mid_in_use = NULL; 1575 int deflated_count = 0; 1576 1577 for (mid = *listHeadp; mid != NULL;) { 1578 oop obj = (oop) mid->object(); 1579 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1580 // if deflate_monitor succeeded, 1581 // extract from per-thread in-use list 1582 if (mid == *listHeadp) { 1583 *listHeadp = mid->FreeNext; 1584 } else if (cur_mid_in_use != NULL) { 1585 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1586 } 1587 next = mid->FreeNext; 1588 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1589 mid = next; 1590 deflated_count++; 1591 } else { 1592 cur_mid_in_use = mid; 1593 mid = mid->FreeNext; 1594 } 1595 } 1596 return deflated_count; 1597 } 1598 1599 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1600 counters->nInuse = 0; // currently associated with objects 1601 counters->nInCirculation = 0; // extant 1602 counters->nScavenged = 0; // reclaimed (global and per-thread) 1603 counters->perThreadScavenged = 0; // per-thread scavenge total 1604 counters->perThreadTimes = 0.0; // per-thread scavenge times 1605 } 1606 1607 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 1608 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1609 bool deflated = false; 1610 1611 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1612 ObjectMonitor * freeTailp = NULL; 1613 elapsedTimer timer; 1614 1615 if (log_is_enabled(Info, monitorinflation)) { 1616 timer.start(); 1617 } 1618 1619 // Prevent omFlush from changing mids in Thread dtor's during deflation 1620 // And in case the vm thread is acquiring a lock during a safepoint 1621 // See e.g. 6320749 1622 Thread::muxAcquire(&gListLock, "deflate_idle_monitors"); 1623 1624 // Note: the thread-local monitors lists get deflated in 1625 // a separate pass. See deflate_thread_local_monitors(). 1626 1627 // For moribund threads, scan gOmInUseList 1628 int deflated_count = 0; 1629 if (gOmInUseList) { 1630 counters->nInCirculation += gOmInUseCount; 1631 deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1632 gOmInUseCount -= deflated_count; 1633 counters->nScavenged += deflated_count; 1634 counters->nInuse += gOmInUseCount; 1635 } 1636 1637 // Move the scavenged monitors back to the global free list. 1638 if (freeHeadp != NULL) { 1639 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant"); 1640 assert(freeTailp->FreeNext == NULL, "invariant"); 1641 // constant-time list splice - prepend scavenged segment to gFreeList 1642 freeTailp->FreeNext = gFreeList; 1643 gFreeList = freeHeadp; 1644 } 1645 Thread::muxRelease(&gListLock); 1646 timer.stop(); 1647 1648 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1649 LogStreamHandle(Info, monitorinflation) lsh_info; 1650 LogStream * ls = NULL; 1651 if (log_is_enabled(Debug, monitorinflation)) { 1652 ls = &lsh_debug; 1653 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 1654 ls = &lsh_info; 1655 } 1656 if (ls != NULL) { 1657 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 1658 } 1659 } 1660 1661 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1662 // Report the cumulative time for deflating each thread's idle 1663 // monitors. Note: if the work is split among more than one 1664 // worker thread, then the reported time will likely be more 1665 // than a beginning to end measurement of the phase. 1666 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged); 1667 1668 gMonitorFreeCount += counters->nScavenged; 1669 1670 if (log_is_enabled(Debug, monitorinflation)) { 1671 // exit_globals()'s call to audit_and_print_stats() is done 1672 // at the Info level. 1673 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 1674 } else if (log_is_enabled(Info, monitorinflation)) { 1675 Thread::muxAcquire(&gListLock, "finish_deflate_idle_monitors"); 1676 log_info(monitorinflation)("gMonitorPopulation=%d, gOmInUseCount=%d, " 1677 "gMonitorFreeCount=%d", gMonitorPopulation, 1678 gOmInUseCount, gMonitorFreeCount); 1679 Thread::muxRelease(&gListLock); 1680 } 1681 1682 ForceMonitorScavenge = 0; // Reset 1683 1684 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); 1685 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); 1686 1687 GVars.stwRandom = os::random(); 1688 GVars.stwCycle++; 1689 } 1690 1691 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 1692 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1693 1694 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1695 ObjectMonitor * freeTailp = NULL; 1696 elapsedTimer timer; 1697 1698 if (log_is_enabled(Info, safepoint, cleanup) || 1699 log_is_enabled(Info, monitorinflation)) { 1700 timer.start(); 1701 } 1702 1703 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp); 1704 1705 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors"); 1706 1707 // Adjust counters 1708 counters->nInCirculation += thread->omInUseCount; 1709 thread->omInUseCount -= deflated_count; 1710 counters->nScavenged += deflated_count; 1711 counters->nInuse += thread->omInUseCount; 1712 counters->perThreadScavenged += deflated_count; 1713 1714 // Move the scavenged monitors back to the global free list. 1715 if (freeHeadp != NULL) { 1716 guarantee(freeTailp != NULL && deflated_count > 0, "invariant"); 1717 assert(freeTailp->FreeNext == NULL, "invariant"); 1718 1719 // constant-time list splice - prepend scavenged segment to gFreeList 1720 freeTailp->FreeNext = gFreeList; 1721 gFreeList = freeHeadp; 1722 } 1723 1724 timer.stop(); 1725 // Safepoint logging cares about cumulative perThreadTimes and 1726 // we'll capture most of the cost, but not the muxRelease() which 1727 // should be cheap. 1728 counters->perThreadTimes += timer.seconds(); 1729 1730 Thread::muxRelease(&gListLock); 1731 1732 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1733 LogStreamHandle(Info, monitorinflation) lsh_info; 1734 LogStream * ls = NULL; 1735 if (log_is_enabled(Debug, monitorinflation)) { 1736 ls = &lsh_debug; 1737 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 1738 ls = &lsh_info; 1739 } 1740 if (ls != NULL) { 1741 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 1742 } 1743 } 1744 1745 // Monitor cleanup on JavaThread::exit 1746 1747 // Iterate through monitor cache and attempt to release thread's monitors 1748 // Gives up on a particular monitor if an exception occurs, but continues 1749 // the overall iteration, swallowing the exception. 1750 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1751 private: 1752 TRAPS; 1753 1754 public: 1755 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1756 void do_monitor(ObjectMonitor* mid) { 1757 if (mid->owner() == THREAD) { 1758 (void)mid->complete_exit(CHECK); 1759 } 1760 } 1761 }; 1762 1763 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1764 // ignored. This is meant to be called during JNI thread detach which assumes 1765 // all remaining monitors are heavyweight. All exceptions are swallowed. 1766 // Scanning the extant monitor list can be time consuming. 1767 // A simple optimization is to add a per-thread flag that indicates a thread 1768 // called jni_monitorenter() during its lifetime. 1769 // 1770 // Instead of No_Savepoint_Verifier it might be cheaper to 1771 // use an idiom of the form: 1772 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1773 // <code that must not run at safepoint> 1774 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1775 // Since the tests are extremely cheap we could leave them enabled 1776 // for normal product builds. 1777 1778 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1779 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1780 NoSafepointVerifier nsv; 1781 ReleaseJavaMonitorsClosure rjmc(THREAD); 1782 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1783 ObjectSynchronizer::monitors_iterate(&rjmc); 1784 Thread::muxRelease(&gListLock); 1785 THREAD->clear_pending_exception(); 1786 } 1787 1788 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1789 switch (cause) { 1790 case inflate_cause_vm_internal: return "VM Internal"; 1791 case inflate_cause_monitor_enter: return "Monitor Enter"; 1792 case inflate_cause_wait: return "Monitor Wait"; 1793 case inflate_cause_notify: return "Monitor Notify"; 1794 case inflate_cause_hash_code: return "Monitor Hash Code"; 1795 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1796 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1797 default: 1798 ShouldNotReachHere(); 1799 } 1800 return "Unknown"; 1801 } 1802 1803 //------------------------------------------------------------------------------ 1804 // Debugging code 1805 1806 u_char* ObjectSynchronizer::get_gvars_addr() { 1807 return (u_char*)&GVars; 1808 } 1809 1810 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() { 1811 return (u_char*)&GVars.hcSequence; 1812 } 1813 1814 size_t ObjectSynchronizer::get_gvars_size() { 1815 return sizeof(SharedGlobals); 1816 } 1817 1818 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() { 1819 return (u_char*)&GVars.stwRandom; 1820 } 1821 1822 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 1823 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 1824 1825 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1826 LogStreamHandle(Info, monitorinflation) lsh_info; 1827 LogStreamHandle(Trace, monitorinflation) lsh_trace; 1828 LogStream * ls = NULL; 1829 if (log_is_enabled(Trace, monitorinflation)) { 1830 ls = &lsh_trace; 1831 } else if (log_is_enabled(Debug, monitorinflation)) { 1832 ls = &lsh_debug; 1833 } else if (log_is_enabled(Info, monitorinflation)) { 1834 ls = &lsh_info; 1835 } 1836 assert(ls != NULL, "sanity check"); 1837 1838 if (!on_exit) { 1839 // Not at VM exit so grab the global list lock. 1840 Thread::muxAcquire(&gListLock, "audit_and_print_stats"); 1841 } 1842 1843 // Log counts for the global and per-thread monitor lists: 1844 int chkMonitorPopulation = log_monitor_list_counts(ls); 1845 int error_cnt = 0; 1846 1847 ls->print_cr("Checking global lists:"); 1848 1849 // Check gMonitorPopulation: 1850 if (gMonitorPopulation == chkMonitorPopulation) { 1851 ls->print_cr("gMonitorPopulation=%d equals chkMonitorPopulation=%d", 1852 gMonitorPopulation, chkMonitorPopulation); 1853 } else { 1854 ls->print_cr("ERROR: gMonitorPopulation=%d is not equal to " 1855 "chkMonitorPopulation=%d", gMonitorPopulation, 1856 chkMonitorPopulation); 1857 error_cnt++; 1858 } 1859 1860 // Check gOmInUseList and gOmInUseCount: 1861 chk_global_in_use_list_and_count(ls, &error_cnt); 1862 1863 // Check gFreeList and gMonitorFreeCount: 1864 chk_global_free_list_and_count(ls, &error_cnt); 1865 1866 if (!on_exit) { 1867 Thread::muxRelease(&gListLock); 1868 } 1869 1870 ls->print_cr("Checking per-thread lists:"); 1871 1872 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1873 // Check omInUseList and omInUseCount: 1874 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 1875 1876 // Check omFreeList and omFreeCount: 1877 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 1878 } 1879 1880 if (error_cnt == 0) { 1881 ls->print_cr("No errors found in monitor list checks."); 1882 } else { 1883 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 1884 } 1885 1886 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 1887 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 1888 // When exiting this log output is at the Info level. When called 1889 // at a safepoint, this log output is at the Trace level since 1890 // there can be a lot of it. 1891 log_in_use_monitor_details(ls, on_exit); 1892 } 1893 1894 ls->flush(); 1895 1896 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 1897 } 1898 1899 // Check a free monitor entry; log any errors. 1900 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n, 1901 outputStream * out, int *error_cnt_p) { 1902 stringStream ss; 1903 if (n->is_busy()) { 1904 if (jt != NULL) { 1905 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1906 ": free per-thread monitor must not be busy: %s", p2i(jt), 1907 p2i(n), n->is_busy_to_string(&ss)); 1908 } else { 1909 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 1910 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 1911 } 1912 *error_cnt_p = *error_cnt_p + 1; 1913 } 1914 if (n->header().value() != 0) { 1915 if (jt != NULL) { 1916 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1917 ": free per-thread monitor must have NULL _header " 1918 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 1919 n->header().value()); 1920 } else { 1921 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 1922 "must have NULL _header field: _header=" INTPTR_FORMAT, 1923 p2i(n), n->header().value()); 1924 } 1925 *error_cnt_p = *error_cnt_p + 1; 1926 } 1927 if (n->object() != NULL) { 1928 if (jt != NULL) { 1929 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1930 ": free per-thread monitor must have NULL _object " 1931 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 1932 p2i(n->object())); 1933 } else { 1934 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 1935 "must have NULL _object field: _object=" INTPTR_FORMAT, 1936 p2i(n), p2i(n->object())); 1937 } 1938 *error_cnt_p = *error_cnt_p + 1; 1939 } 1940 } 1941 1942 // Check the global free list and count; log the results of the checks. 1943 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 1944 int *error_cnt_p) { 1945 int chkMonitorFreeCount = 0; 1946 for (ObjectMonitor * n = gFreeList; n != NULL; n = n->FreeNext) { 1947 chk_free_entry(NULL /* jt */, n, out, error_cnt_p); 1948 chkMonitorFreeCount++; 1949 } 1950 if (gMonitorFreeCount == chkMonitorFreeCount) { 1951 out->print_cr("gMonitorFreeCount=%d equals chkMonitorFreeCount=%d", 1952 gMonitorFreeCount, chkMonitorFreeCount); 1953 } else { 1954 out->print_cr("ERROR: gMonitorFreeCount=%d is not equal to " 1955 "chkMonitorFreeCount=%d", gMonitorFreeCount, 1956 chkMonitorFreeCount); 1957 *error_cnt_p = *error_cnt_p + 1; 1958 } 1959 } 1960 1961 // Check the global in-use list and count; log the results of the checks. 1962 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 1963 int *error_cnt_p) { 1964 int chkOmInUseCount = 0; 1965 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 1966 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p); 1967 chkOmInUseCount++; 1968 } 1969 if (gOmInUseCount == chkOmInUseCount) { 1970 out->print_cr("gOmInUseCount=%d equals chkOmInUseCount=%d", gOmInUseCount, 1971 chkOmInUseCount); 1972 } else { 1973 out->print_cr("ERROR: gOmInUseCount=%d is not equal to chkOmInUseCount=%d", 1974 gOmInUseCount, chkOmInUseCount); 1975 *error_cnt_p = *error_cnt_p + 1; 1976 } 1977 } 1978 1979 // Check an in-use monitor entry; log any errors. 1980 void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n, 1981 outputStream * out, int *error_cnt_p) { 1982 if (n->header().value() == 0) { 1983 if (jt != NULL) { 1984 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1985 ": in-use per-thread monitor must have non-NULL _header " 1986 "field.", p2i(jt), p2i(n)); 1987 } else { 1988 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 1989 "must have non-NULL _header field.", p2i(n)); 1990 } 1991 *error_cnt_p = *error_cnt_p + 1; 1992 } 1993 if (n->object() == NULL) { 1994 if (jt != NULL) { 1995 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1996 ": in-use per-thread monitor must have non-NULL _object " 1997 "field.", p2i(jt), p2i(n)); 1998 } else { 1999 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2000 "must have non-NULL _object field.", p2i(n)); 2001 } 2002 *error_cnt_p = *error_cnt_p + 1; 2003 } 2004 const oop obj = (oop)n->object(); 2005 const markWord mark = obj->mark(); 2006 if (!mark.has_monitor()) { 2007 if (jt != NULL) { 2008 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2009 ": in-use per-thread monitor's object does not think " 2010 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 2011 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 2012 } else { 2013 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2014 "monitor's object does not think it has a monitor: obj=" 2015 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 2016 p2i(obj), mark.value()); 2017 } 2018 *error_cnt_p = *error_cnt_p + 1; 2019 } 2020 ObjectMonitor * const obj_mon = mark.monitor(); 2021 if (n != obj_mon) { 2022 if (jt != NULL) { 2023 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2024 ": in-use per-thread monitor's object does not refer " 2025 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 2026 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 2027 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 2028 } else { 2029 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2030 "monitor's object does not refer to the same monitor: obj=" 2031 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 2032 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 2033 } 2034 *error_cnt_p = *error_cnt_p + 1; 2035 } 2036 } 2037 2038 // Check the thread's free list and count; log the results of the checks. 2039 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 2040 outputStream * out, 2041 int *error_cnt_p) { 2042 int chkOmFreeCount = 0; 2043 for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) { 2044 chk_free_entry(jt, n, out, error_cnt_p); 2045 chkOmFreeCount++; 2046 } 2047 if (jt->omFreeCount == chkOmFreeCount) { 2048 out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals " 2049 "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount); 2050 } else { 2051 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not " 2052 "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, 2053 chkOmFreeCount); 2054 *error_cnt_p = *error_cnt_p + 1; 2055 } 2056 } 2057 2058 // Check the thread's in-use list and count; log the results of the checks. 2059 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 2060 outputStream * out, 2061 int *error_cnt_p) { 2062 int chkOmInUseCount = 0; 2063 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2064 chk_in_use_entry(jt, n, out, error_cnt_p); 2065 chkOmInUseCount++; 2066 } 2067 if (jt->omInUseCount == chkOmInUseCount) { 2068 out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals " 2069 "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2070 chkOmInUseCount); 2071 } else { 2072 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not " 2073 "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2074 chkOmInUseCount); 2075 *error_cnt_p = *error_cnt_p + 1; 2076 } 2077 } 2078 2079 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 2080 // flags indicate why the entry is in-use, 'object' and 'object type' 2081 // indicate the associated object and its type. 2082 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out, 2083 bool on_exit) { 2084 if (!on_exit) { 2085 // Not at VM exit so grab the global list lock. 2086 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details"); 2087 } 2088 2089 stringStream ss; 2090 if (gOmInUseCount > 0) { 2091 out->print_cr("In-use global monitor info:"); 2092 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2093 out->print_cr("%18s %s %18s %18s", 2094 "monitor", "BHL", "object", "object type"); 2095 out->print_cr("================== === ================== =================="); 2096 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 2097 const oop obj = (oop) n->object(); 2098 const markWord mark = n->header(); 2099 ResourceMark rm; 2100 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n), 2101 n->is_busy() != 0, mark.hash() != 0, n->owner() != NULL, 2102 p2i(obj), obj->klass()->external_name()); 2103 if (n->is_busy() != 0) { 2104 out->print(" (%s)", n->is_busy_to_string(&ss)); 2105 ss.reset(); 2106 } 2107 out->cr(); 2108 } 2109 } 2110 2111 if (!on_exit) { 2112 Thread::muxRelease(&gListLock); 2113 } 2114 2115 out->print_cr("In-use per-thread monitor info:"); 2116 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2117 out->print_cr("%18s %18s %s %18s %18s", 2118 "jt", "monitor", "BHL", "object", "object type"); 2119 out->print_cr("================== ================== === ================== =================="); 2120 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2121 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2122 const oop obj = (oop) n->object(); 2123 const markWord mark = n->header(); 2124 ResourceMark rm; 2125 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT 2126 " %s", p2i(jt), p2i(n), n->is_busy() != 0, 2127 mark.hash() != 0, n->owner() != NULL, p2i(obj), 2128 obj->klass()->external_name()); 2129 if (n->is_busy() != 0) { 2130 out->print(" (%s)", n->is_busy_to_string(&ss)); 2131 ss.reset(); 2132 } 2133 out->cr(); 2134 } 2135 } 2136 2137 out->flush(); 2138 } 2139 2140 // Log counts for the global and per-thread monitor lists and return 2141 // the population count. 2142 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 2143 int popCount = 0; 2144 out->print_cr("%18s %10s %10s %10s", 2145 "Global Lists:", "InUse", "Free", "Total"); 2146 out->print_cr("================== ========== ========== =========="); 2147 out->print_cr("%18s %10d %10d %10d", "", 2148 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation); 2149 popCount += gOmInUseCount + gMonitorFreeCount; 2150 2151 out->print_cr("%18s %10s %10s %10s", 2152 "Per-Thread Lists:", "InUse", "Free", "Provision"); 2153 out->print_cr("================== ========== ========== =========="); 2154 2155 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2156 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 2157 jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision); 2158 popCount += jt->omInUseCount + jt->omFreeCount; 2159 } 2160 return popCount; 2161 } 2162 2163 #ifndef PRODUCT 2164 2165 // Check if monitor belongs to the monitor cache 2166 // The list is grow-only so it's *relatively* safe to traverse 2167 // the list of extant blocks without taking a lock. 2168 2169 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 2170 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 2171 while (block != NULL) { 2172 assert(block->object() == CHAINMARKER, "must be a block header"); 2173 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 2174 address mon = (address)monitor; 2175 address blk = (address)block; 2176 size_t diff = mon - blk; 2177 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 2178 return 1; 2179 } 2180 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 2181 } 2182 return 0; 2183 } 2184 2185 #endif