1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "memory/padded.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "oops/markOop.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/atomic.inline.hpp" 32 #include "runtime/biasedLocking.hpp" 33 #include "runtime/handles.inline.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/objectMonitor.hpp" 37 #include "runtime/objectMonitor.inline.hpp" 38 #include "runtime/osThread.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/synchronizer.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "utilities/dtrace.hpp" 43 #include "utilities/events.hpp" 44 #include "utilities/preserveException.hpp" 45 46 #if defined(__GNUC__) && !defined(PPC64) 47 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 48 #define NOINLINE __attribute__((noinline)) 49 #else 50 #define NOINLINE 51 #endif 52 53 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 54 55 // The "core" versions of monitor enter and exit reside in this file. 56 // The interpreter and compilers contain specialized transliterated 57 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 58 // for instance. If you make changes here, make sure to modify the 59 // interpreter, and both C1 and C2 fast-path inline locking code emission. 60 // 61 // ----------------------------------------------------------------------------- 62 63 #ifdef DTRACE_ENABLED 64 65 // Only bother with this argument setup if dtrace is available 66 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 67 68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 69 char* bytes = NULL; \ 70 int len = 0; \ 71 jlong jtid = SharedRuntime::get_java_tid(thread); \ 72 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 73 if (klassname != NULL) { \ 74 bytes = (char*)klassname->bytes(); \ 75 len = klassname->utf8_length(); \ 76 } 77 78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 79 { \ 80 if (DTraceMonitorProbes) { \ 81 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 82 HOTSPOT_MONITOR_WAIT(jtid, \ 83 (uintptr_t)(monitor), bytes, len, (millis)); \ 84 } \ 85 } 86 87 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 88 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 89 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 90 91 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 92 { \ 93 if (DTraceMonitorProbes) { \ 94 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 95 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 96 (uintptr_t)(monitor), bytes, len); \ 97 } \ 98 } 99 100 #else // ndef DTRACE_ENABLED 101 102 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 103 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 104 105 #endif // ndef DTRACE_ENABLED 106 107 // This exists only as a workaround of dtrace bug 6254741 108 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 109 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 110 return 0; 111 } 112 113 #define NINFLATIONLOCKS 256 114 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 115 116 // global list of blocks of monitors 117 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 118 // want to expose the PaddedEnd template more than necessary. 119 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL; 120 // global monitor free list 121 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 122 // global monitor in-use list, for moribund threads, 123 // monitors they inflated need to be scanned for deflation 124 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 125 // count of entries in gOmInUseList 126 int ObjectSynchronizer::gOmInUseCount = 0; 127 128 static volatile intptr_t gListLock = 0; // protects global monitor lists 129 static volatile int gMonitorFreeCount = 0; // # on gFreeList 130 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 131 132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 133 134 135 // =====================> Quick functions 136 137 // The quick_* forms are special fast-path variants used to improve 138 // performance. In the simplest case, a "quick_*" implementation could 139 // simply return false, in which case the caller will perform the necessary 140 // state transitions and call the slow-path form. 141 // The fast-path is designed to handle frequently arising cases in an efficient 142 // manner and is just a degenerate "optimistic" variant of the slow-path. 143 // returns true -- to indicate the call was satisfied. 144 // returns false -- to indicate the call needs the services of the slow-path. 145 // A no-loitering ordinance is in effect for code in the quick_* family 146 // operators: safepoints or indefinite blocking (blocking that might span a 147 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 148 // entry. 149 // 150 // An interesting optimization is to have the JIT recognize the following 151 // common idiom: 152 // synchronized (someobj) { .... ; notify(); } 153 // That is, we find a notify() or notifyAll() call that immediately precedes 154 // the monitorexit operation. In that case the JIT could fuse the operations 155 // into a single notifyAndExit() runtime primitive. 156 157 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * Self, bool All) { 158 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 159 assert(Self->is_Java_thread(), "invariant"); 160 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 161 No_Safepoint_Verifier nsv; 162 if (obj == NULL) return false; 163 const markOop mark = obj->mark(); 164 165 if (mark->has_locker() && Self->is_lock_owned((address)mark->locker())) { 166 // Degenerate notify 167 // stack-locked by caller so by definition the implied waitset is empty. 168 return true; 169 } 170 171 if (mark->has_monitor()) { 172 ObjectMonitor * const mon = mark->monitor(); 173 assert(mon->object() == obj, "invariant"); 174 if (mon->owner() != Self) return false; 175 176 // As long as the object is unbiased and doesn't require safepoint revocation 177 // and is owned by the caller we can transfer a thread or threads from 178 // the waitset to the entrylist here and now, avoiding the slow-path. 179 // That is, the only case where the slow-path is mandatory is 180 // when the object is biased or we need to throw IMSX exceptions. 181 if (mon->first_waiter() != NULL) { 182 if (All) { 183 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, Self); 184 } else { 185 DTRACE_MONITOR_PROBE(notify, mon, obj, Self); 186 } 187 int tally = 0; 188 for (;;) { 189 if (mon->first_waiter() == NULL) break; 190 mon->INotify(Self); 191 ++tally; 192 if (!All) break; 193 } 194 if (ObjectMonitor::_sync_Notifications != NULL) { 195 ObjectMonitor::_sync_Notifications->inc(tally); 196 } 197 } 198 return true; 199 } 200 201 return false; // revert to slow-path 202 } 203 204 205 // The LockNode emitted directly at the synchronization site would have 206 // been too big if it were to have included support for the cases of inflated 207 // recursive enter and exit, so they go here instead. 208 // Note that we can't safely call AsyncPrintJavaStack() from within 209 // quick_enter() as our thread state remains _in_Java. 210 211 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 212 BasicLock * Lock) { 213 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 214 assert(Self->is_Java_thread(), "invariant"); 215 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 216 No_Safepoint_Verifier nsv; 217 if (obj == NULL) return false; // Need to throw NPE 218 const markOop mark = obj->mark(); 219 220 if (mark->has_monitor()) { 221 ObjectMonitor * const m = mark->monitor(); 222 assert(m->object() == obj, "invariant"); 223 Thread * const owner = (Thread *) m->_owner; 224 225 // Lock contention and Transactional Lock Elision (TLE) diagnostics 226 // and observability 227 // Case: light contention possibly amenable to TLE 228 // Case: TLE inimical operations such as nested/recursive synchronization 229 230 if (owner == Self) { 231 m->_recursions++; 232 return true; 233 } 234 235 if (owner == NULL && 236 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 237 assert(m->_recursions == 0, "invariant"); 238 assert(m->_owner == Self, "invariant"); 239 return true; 240 } 241 } 242 243 // Note that we could inflate in quick_enter. 244 // This is likely a useful optimization 245 // Critically, in quick_enter() we must not: 246 // -- perform bias revocation, or 247 // -- block indefinitely, or 248 // -- reach a safepoint 249 250 return false; // revert to slow-path 251 } 252 253 // ----------------------------------------------------------------------------- 254 // Fast Monitor Enter/Exit 255 // This the fast monitor enter. The interpreter and compiler use 256 // some assembly copies of this code. Make sure update those code 257 // if the following function is changed. The implementation is 258 // extremely sensitive to race condition. Be careful. 259 260 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 261 bool attempt_rebias, TRAPS) { 262 if (UseBiasedLocking) { 263 if (!SafepointSynchronize::is_at_safepoint()) { 264 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 265 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 266 return; 267 } 268 } else { 269 assert(!attempt_rebias, "can not rebias toward VM thread"); 270 BiasedLocking::revoke_at_safepoint(obj); 271 } 272 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 273 } 274 275 slow_enter(obj, lock, THREAD); 276 } 277 278 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 279 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 280 // if displaced header is null, the previous enter is recursive enter, no-op 281 markOop dhw = lock->displaced_header(); 282 markOop mark; 283 if (dhw == NULL) { 284 // Recursive stack-lock. 285 // Diagnostics -- Could be: stack-locked, inflating, inflated. 286 mark = object->mark(); 287 assert(!mark->is_neutral(), "invariant"); 288 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 289 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 290 } 291 if (mark->has_monitor()) { 292 ObjectMonitor * m = mark->monitor(); 293 assert(((oop)(m->object()))->mark() == mark, "invariant"); 294 assert(m->is_entered(THREAD), "invariant"); 295 } 296 return; 297 } 298 299 mark = object->mark(); 300 301 // If the object is stack-locked by the current thread, try to 302 // swing the displaced header from the box back to the mark. 303 if (mark == (markOop) lock) { 304 assert(dhw->is_neutral(), "invariant"); 305 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 306 TEVENT(fast_exit: release stacklock); 307 return; 308 } 309 } 310 311 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD); 312 } 313 314 // ----------------------------------------------------------------------------- 315 // Interpreter/Compiler Slow Case 316 // This routine is used to handle interpreter/compiler slow case 317 // We don't need to use fast path here, because it must have been 318 // failed in the interpreter/compiler code. 319 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 320 markOop mark = obj->mark(); 321 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 322 323 if (mark->is_neutral()) { 324 // Anticipate successful CAS -- the ST of the displaced mark must 325 // be visible <= the ST performed by the CAS. 326 lock->set_displaced_header(mark); 327 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 328 TEVENT(slow_enter: release stacklock); 329 return; 330 } 331 // Fall through to inflate() ... 332 } else if (mark->has_locker() && 333 THREAD->is_lock_owned((address)mark->locker())) { 334 assert(lock != mark->locker(), "must not re-lock the same lock"); 335 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 336 lock->set_displaced_header(NULL); 337 return; 338 } 339 340 // The object header will never be displaced to this lock, 341 // so it does not matter what the value is, except that it 342 // must be non-zero to avoid looking like a re-entrant lock, 343 // and must not look locked either. 344 lock->set_displaced_header(markOopDesc::unused_mark()); 345 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 346 } 347 348 // This routine is used to handle interpreter/compiler slow case 349 // We don't need to use fast path here, because it must have 350 // failed in the interpreter/compiler code. Simply use the heavy 351 // weight monitor should be ok, unless someone find otherwise. 352 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 353 fast_exit(object, lock, THREAD); 354 } 355 356 // ----------------------------------------------------------------------------- 357 // Class Loader support to workaround deadlocks on the class loader lock objects 358 // Also used by GC 359 // complete_exit()/reenter() are used to wait on a nested lock 360 // i.e. to give up an outer lock completely and then re-enter 361 // Used when holding nested locks - lock acquisition order: lock1 then lock2 362 // 1) complete_exit lock1 - saving recursion count 363 // 2) wait on lock2 364 // 3) when notified on lock2, unlock lock2 365 // 4) reenter lock1 with original recursion count 366 // 5) lock lock2 367 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 368 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 369 TEVENT(complete_exit); 370 if (UseBiasedLocking) { 371 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 372 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 373 } 374 375 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 376 377 return monitor->complete_exit(THREAD); 378 } 379 380 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 381 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 382 TEVENT(reenter); 383 if (UseBiasedLocking) { 384 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 385 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 386 } 387 388 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 389 390 monitor->reenter(recursion, THREAD); 391 } 392 // ----------------------------------------------------------------------------- 393 // JNI locks on java objects 394 // NOTE: must use heavy weight monitor to handle jni monitor enter 395 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 396 // the current locking is from JNI instead of Java code 397 TEVENT(jni_enter); 398 if (UseBiasedLocking) { 399 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 400 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 401 } 402 THREAD->set_current_pending_monitor_is_from_java(false); 403 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 404 THREAD->set_current_pending_monitor_is_from_java(true); 405 } 406 407 // NOTE: must use heavy weight monitor to handle jni monitor exit 408 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 409 TEVENT(jni_exit); 410 if (UseBiasedLocking) { 411 Handle h_obj(THREAD, obj); 412 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 413 obj = h_obj(); 414 } 415 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 416 417 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 418 // If this thread has locked the object, exit the monitor. Note: can't use 419 // monitor->check(CHECK); must exit even if an exception is pending. 420 if (monitor->check(THREAD)) { 421 monitor->exit(true, THREAD); 422 } 423 } 424 425 // ----------------------------------------------------------------------------- 426 // Internal VM locks on java objects 427 // standard constructor, allows locking failures 428 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 429 _dolock = doLock; 430 _thread = thread; 431 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 432 _obj = obj; 433 434 if (_dolock) { 435 TEVENT(ObjectLocker); 436 437 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 438 } 439 } 440 441 ObjectLocker::~ObjectLocker() { 442 if (_dolock) { 443 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 444 } 445 } 446 447 448 // ----------------------------------------------------------------------------- 449 // Wait/Notify/NotifyAll 450 // NOTE: must use heavy weight monitor to handle wait() 451 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 452 if (UseBiasedLocking) { 453 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 454 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 455 } 456 if (millis < 0) { 457 TEVENT(wait - throw IAX); 458 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 459 } 460 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 461 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 462 monitor->wait(millis, true, THREAD); 463 464 // This dummy call is in place to get around dtrace bug 6254741. Once 465 // that's fixed we can uncomment the following line, remove the call 466 // and change this function back into a "void" func. 467 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 468 return dtrace_waited_probe(monitor, obj, THREAD); 469 } 470 471 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 472 if (UseBiasedLocking) { 473 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 474 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 475 } 476 if (millis < 0) { 477 TEVENT(wait - throw IAX); 478 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 479 } 480 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD); 481 } 482 483 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 484 if (UseBiasedLocking) { 485 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 486 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 487 } 488 489 markOop mark = obj->mark(); 490 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 491 return; 492 } 493 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 494 } 495 496 // NOTE: see comment of notify() 497 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 498 if (UseBiasedLocking) { 499 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 500 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 501 } 502 503 markOop mark = obj->mark(); 504 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 505 return; 506 } 507 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 508 } 509 510 // ----------------------------------------------------------------------------- 511 // Hash Code handling 512 // 513 // Performance concern: 514 // OrderAccess::storestore() calls release() which at one time stored 0 515 // into the global volatile OrderAccess::dummy variable. This store was 516 // unnecessary for correctness. Many threads storing into a common location 517 // causes considerable cache migration or "sloshing" on large SMP systems. 518 // As such, I avoided using OrderAccess::storestore(). In some cases 519 // OrderAccess::fence() -- which incurs local latency on the executing 520 // processor -- is a better choice as it scales on SMP systems. 521 // 522 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 523 // a discussion of coherency costs. Note that all our current reference 524 // platforms provide strong ST-ST order, so the issue is moot on IA32, 525 // x64, and SPARC. 526 // 527 // As a general policy we use "volatile" to control compiler-based reordering 528 // and explicit fences (barriers) to control for architectural reordering 529 // performed by the CPU(s) or platform. 530 531 struct SharedGlobals { 532 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 533 // These are highly shared mostly-read variables. 534 // To avoid false-sharing they need to be the sole occupants of a cache line. 535 volatile int stwRandom; 536 volatile int stwCycle; 537 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 538 // Hot RW variable -- Sequester to avoid false-sharing 539 volatile int hcSequence; 540 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 541 }; 542 543 static SharedGlobals GVars; 544 static int MonitorScavengeThreshold = 1000000; 545 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 546 547 static markOop ReadStableMark(oop obj) { 548 markOop mark = obj->mark(); 549 if (!mark->is_being_inflated()) { 550 return mark; // normal fast-path return 551 } 552 553 int its = 0; 554 for (;;) { 555 markOop mark = obj->mark(); 556 if (!mark->is_being_inflated()) { 557 return mark; // normal fast-path return 558 } 559 560 // The object is being inflated by some other thread. 561 // The caller of ReadStableMark() must wait for inflation to complete. 562 // Avoid live-lock 563 // TODO: consider calling SafepointSynchronize::do_call_back() while 564 // spinning to see if there's a safepoint pending. If so, immediately 565 // yielding or blocking would be appropriate. Avoid spinning while 566 // there is a safepoint pending. 567 // TODO: add inflation contention performance counters. 568 // TODO: restrict the aggregate number of spinners. 569 570 ++its; 571 if (its > 10000 || !os::is_MP()) { 572 if (its & 1) { 573 os::naked_yield(); 574 TEVENT(Inflate: INFLATING - yield); 575 } else { 576 // Note that the following code attenuates the livelock problem but is not 577 // a complete remedy. A more complete solution would require that the inflating 578 // thread hold the associated inflation lock. The following code simply restricts 579 // the number of spinners to at most one. We'll have N-2 threads blocked 580 // on the inflationlock, 1 thread holding the inflation lock and using 581 // a yield/park strategy, and 1 thread in the midst of inflation. 582 // A more refined approach would be to change the encoding of INFLATING 583 // to allow encapsulation of a native thread pointer. Threads waiting for 584 // inflation to complete would use CAS to push themselves onto a singly linked 585 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 586 // and calling park(). When inflation was complete the thread that accomplished inflation 587 // would detach the list and set the markword to inflated with a single CAS and 588 // then for each thread on the list, set the flag and unpark() the thread. 589 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 590 // wakes at most one thread whereas we need to wake the entire list. 591 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 592 int YieldThenBlock = 0; 593 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 594 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 595 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 596 while (obj->mark() == markOopDesc::INFLATING()) { 597 // Beware: NakedYield() is advisory and has almost no effect on some platforms 598 // so we periodically call Self->_ParkEvent->park(1). 599 // We use a mixed spin/yield/block mechanism. 600 if ((YieldThenBlock++) >= 16) { 601 Thread::current()->_ParkEvent->park(1); 602 } else { 603 os::naked_yield(); 604 } 605 } 606 Thread::muxRelease(gInflationLocks + ix); 607 TEVENT(Inflate: INFLATING - yield/park); 608 } 609 } else { 610 SpinPause(); // SMP-polite spinning 611 } 612 } 613 } 614 615 // hashCode() generation : 616 // 617 // Possibilities: 618 // * MD5Digest of {obj,stwRandom} 619 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 620 // * A DES- or AES-style SBox[] mechanism 621 // * One of the Phi-based schemes, such as: 622 // 2654435761 = 2^32 * Phi (golden ratio) 623 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 624 // * A variation of Marsaglia's shift-xor RNG scheme. 625 // * (obj ^ stwRandom) is appealing, but can result 626 // in undesirable regularity in the hashCode values of adjacent objects 627 // (objects allocated back-to-back, in particular). This could potentially 628 // result in hashtable collisions and reduced hashtable efficiency. 629 // There are simple ways to "diffuse" the middle address bits over the 630 // generated hashCode values: 631 632 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 633 intptr_t value = 0; 634 if (hashCode == 0) { 635 // This form uses an unguarded global Park-Miller RNG, 636 // so it's possible for two threads to race and generate the same RNG. 637 // On MP system we'll have lots of RW access to a global, so the 638 // mechanism induces lots of coherency traffic. 639 value = os::random(); 640 } else if (hashCode == 1) { 641 // This variation has the property of being stable (idempotent) 642 // between STW operations. This can be useful in some of the 1-0 643 // synchronization schemes. 644 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 645 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 646 } else if (hashCode == 2) { 647 value = 1; // for sensitivity testing 648 } else if (hashCode == 3) { 649 value = ++GVars.hcSequence; 650 } else if (hashCode == 4) { 651 value = cast_from_oop<intptr_t>(obj); 652 } else { 653 // Marsaglia's xor-shift scheme with thread-specific state 654 // This is probably the best overall implementation -- we'll 655 // likely make this the default in future releases. 656 unsigned t = Self->_hashStateX; 657 t ^= (t << 11); 658 Self->_hashStateX = Self->_hashStateY; 659 Self->_hashStateY = Self->_hashStateZ; 660 Self->_hashStateZ = Self->_hashStateW; 661 unsigned v = Self->_hashStateW; 662 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 663 Self->_hashStateW = v; 664 value = v; 665 } 666 667 value &= markOopDesc::hash_mask; 668 if (value == 0) value = 0xBAD; 669 assert(value != markOopDesc::no_hash, "invariant"); 670 TEVENT(hashCode: GENERATE); 671 return value; 672 } 673 674 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 675 if (UseBiasedLocking) { 676 // NOTE: many places throughout the JVM do not expect a safepoint 677 // to be taken here, in particular most operations on perm gen 678 // objects. However, we only ever bias Java instances and all of 679 // the call sites of identity_hash that might revoke biases have 680 // been checked to make sure they can handle a safepoint. The 681 // added check of the bias pattern is to avoid useless calls to 682 // thread-local storage. 683 if (obj->mark()->has_bias_pattern()) { 684 // Handle for oop obj in case of STW safepoint 685 Handle hobj(Self, obj); 686 // Relaxing assertion for bug 6320749. 687 assert(Universe::verify_in_progress() || 688 !SafepointSynchronize::is_at_safepoint(), 689 "biases should not be seen by VM thread here"); 690 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 691 obj = hobj(); 692 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 693 } 694 } 695 696 // hashCode() is a heap mutator ... 697 // Relaxing assertion for bug 6320749. 698 assert(Universe::verify_in_progress() || 699 !SafepointSynchronize::is_at_safepoint(), "invariant"); 700 assert(Universe::verify_in_progress() || 701 Self->is_Java_thread() , "invariant"); 702 assert(Universe::verify_in_progress() || 703 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 704 705 ObjectMonitor* monitor = NULL; 706 markOop temp, test; 707 intptr_t hash; 708 markOop mark = ReadStableMark(obj); 709 710 // object should remain ineligible for biased locking 711 assert(!mark->has_bias_pattern(), "invariant"); 712 713 if (mark->is_neutral()) { 714 hash = mark->hash(); // this is a normal header 715 if (hash) { // if it has hash, just return it 716 return hash; 717 } 718 hash = get_next_hash(Self, obj); // allocate a new hash code 719 temp = mark->copy_set_hash(hash); // merge the hash code into header 720 // use (machine word version) atomic operation to install the hash 721 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 722 if (test == mark) { 723 return hash; 724 } 725 // If atomic operation failed, we must inflate the header 726 // into heavy weight monitor. We could add more code here 727 // for fast path, but it does not worth the complexity. 728 } else if (mark->has_monitor()) { 729 monitor = mark->monitor(); 730 temp = monitor->header(); 731 assert(temp->is_neutral(), "invariant"); 732 hash = temp->hash(); 733 if (hash) { 734 return hash; 735 } 736 // Skip to the following code to reduce code size 737 } else if (Self->is_lock_owned((address)mark->locker())) { 738 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 739 assert(temp->is_neutral(), "invariant"); 740 hash = temp->hash(); // by current thread, check if the displaced 741 if (hash) { // header contains hash code 742 return hash; 743 } 744 // WARNING: 745 // The displaced header is strictly immutable. 746 // It can NOT be changed in ANY cases. So we have 747 // to inflate the header into heavyweight monitor 748 // even the current thread owns the lock. The reason 749 // is the BasicLock (stack slot) will be asynchronously 750 // read by other threads during the inflate() function. 751 // Any change to stack may not propagate to other threads 752 // correctly. 753 } 754 755 // Inflate the monitor to set hash code 756 monitor = ObjectSynchronizer::inflate(Self, obj); 757 // Load displaced header and check it has hash code 758 mark = monitor->header(); 759 assert(mark->is_neutral(), "invariant"); 760 hash = mark->hash(); 761 if (hash == 0) { 762 hash = get_next_hash(Self, obj); 763 temp = mark->copy_set_hash(hash); // merge hash code into header 764 assert(temp->is_neutral(), "invariant"); 765 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 766 if (test != mark) { 767 // The only update to the header in the monitor (outside GC) 768 // is install the hash code. If someone add new usage of 769 // displaced header, please update this code 770 hash = test->hash(); 771 assert(test->is_neutral(), "invariant"); 772 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 773 } 774 } 775 // We finally get the hash 776 return hash; 777 } 778 779 // Deprecated -- use FastHashCode() instead. 780 781 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 782 return FastHashCode(Thread::current(), obj()); 783 } 784 785 786 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 787 Handle h_obj) { 788 if (UseBiasedLocking) { 789 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 790 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 791 } 792 793 assert(thread == JavaThread::current(), "Can only be called on current thread"); 794 oop obj = h_obj(); 795 796 markOop mark = ReadStableMark(obj); 797 798 // Uncontended case, header points to stack 799 if (mark->has_locker()) { 800 return thread->is_lock_owned((address)mark->locker()); 801 } 802 // Contended case, header points to ObjectMonitor (tagged pointer) 803 if (mark->has_monitor()) { 804 ObjectMonitor* monitor = mark->monitor(); 805 return monitor->is_entered(thread) != 0; 806 } 807 // Unlocked case, header in place 808 assert(mark->is_neutral(), "sanity check"); 809 return false; 810 } 811 812 // Be aware of this method could revoke bias of the lock object. 813 // This method queries the ownership of the lock handle specified by 'h_obj'. 814 // If the current thread owns the lock, it returns owner_self. If no 815 // thread owns the lock, it returns owner_none. Otherwise, it will return 816 // owner_other. 817 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 818 (JavaThread *self, Handle h_obj) { 819 // The caller must beware this method can revoke bias, and 820 // revocation can result in a safepoint. 821 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 822 assert(self->thread_state() != _thread_blocked, "invariant"); 823 824 // Possible mark states: neutral, biased, stack-locked, inflated 825 826 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 827 // CASE: biased 828 BiasedLocking::revoke_and_rebias(h_obj, false, self); 829 assert(!h_obj->mark()->has_bias_pattern(), 830 "biases should be revoked by now"); 831 } 832 833 assert(self == JavaThread::current(), "Can only be called on current thread"); 834 oop obj = h_obj(); 835 markOop mark = ReadStableMark(obj); 836 837 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 838 if (mark->has_locker()) { 839 return self->is_lock_owned((address)mark->locker()) ? 840 owner_self : owner_other; 841 } 842 843 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 844 // The Object:ObjectMonitor relationship is stable as long as we're 845 // not at a safepoint. 846 if (mark->has_monitor()) { 847 void * owner = mark->monitor()->_owner; 848 if (owner == NULL) return owner_none; 849 return (owner == self || 850 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 851 } 852 853 // CASE: neutral 854 assert(mark->is_neutral(), "sanity check"); 855 return owner_none; // it's unlocked 856 } 857 858 // FIXME: jvmti should call this 859 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 860 if (UseBiasedLocking) { 861 if (SafepointSynchronize::is_at_safepoint()) { 862 BiasedLocking::revoke_at_safepoint(h_obj); 863 } else { 864 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 865 } 866 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 867 } 868 869 oop obj = h_obj(); 870 address owner = NULL; 871 872 markOop mark = ReadStableMark(obj); 873 874 // Uncontended case, header points to stack 875 if (mark->has_locker()) { 876 owner = (address) mark->locker(); 877 } 878 879 // Contended case, header points to ObjectMonitor (tagged pointer) 880 if (mark->has_monitor()) { 881 ObjectMonitor* monitor = mark->monitor(); 882 assert(monitor != NULL, "monitor should be non-null"); 883 owner = (address) monitor->owner(); 884 } 885 886 if (owner != NULL) { 887 // owning_thread_from_monitor_owner() may also return NULL here 888 return Threads::owning_thread_from_monitor_owner(owner, doLock); 889 } 890 891 // Unlocked case, header in place 892 // Cannot have assertion since this object may have been 893 // locked by another thread when reaching here. 894 // assert(mark->is_neutral(), "sanity check"); 895 896 return NULL; 897 } 898 // Visitors ... 899 900 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 901 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 902 ObjectMonitor* mid; 903 while (block) { 904 assert(block->object() == CHAINMARKER, "must be a block header"); 905 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 906 mid = (ObjectMonitor *)(block + i); 907 oop object = (oop) mid->object(); 908 if (object != NULL) { 909 closure->do_monitor(mid); 910 } 911 } 912 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 913 } 914 } 915 916 // Get the next block in the block list. 917 static inline ObjectMonitor* next(ObjectMonitor* block) { 918 assert(block->object() == CHAINMARKER, "must be a block header"); 919 block = block->FreeNext; 920 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 921 return block; 922 } 923 924 925 void ObjectSynchronizer::oops_do(OopClosure* f) { 926 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 927 for (PaddedEnd<ObjectMonitor> * block = 928 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 929 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 930 assert(block->object() == CHAINMARKER, "must be a block header"); 931 for (int i = 1; i < _BLOCKSIZE; i++) { 932 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 933 if (mid->object() != NULL) { 934 f->do_oop((oop*)mid->object_addr()); 935 } 936 } 937 } 938 } 939 940 941 // ----------------------------------------------------------------------------- 942 // ObjectMonitor Lifecycle 943 // ----------------------- 944 // Inflation unlinks monitors from the global gFreeList and 945 // associates them with objects. Deflation -- which occurs at 946 // STW-time -- disassociates idle monitors from objects. Such 947 // scavenged monitors are returned to the gFreeList. 948 // 949 // The global list is protected by gListLock. All the critical sections 950 // are short and operate in constant-time. 951 // 952 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 953 // 954 // Lifecycle: 955 // -- unassigned and on the global free list 956 // -- unassigned and on a thread's private omFreeList 957 // -- assigned to an object. The object is inflated and the mark refers 958 // to the objectmonitor. 959 960 961 // Constraining monitor pool growth via MonitorBound ... 962 // 963 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 964 // the rate of scavenging is driven primarily by GC. As such, we can find 965 // an inordinate number of monitors in circulation. 966 // To avoid that scenario we can artificially induce a STW safepoint 967 // if the pool appears to be growing past some reasonable bound. 968 // Generally we favor time in space-time tradeoffs, but as there's no 969 // natural back-pressure on the # of extant monitors we need to impose some 970 // type of limit. Beware that if MonitorBound is set to too low a value 971 // we could just loop. In addition, if MonitorBound is set to a low value 972 // we'll incur more safepoints, which are harmful to performance. 973 // See also: GuaranteedSafepointInterval 974 // 975 // The current implementation uses asynchronous VM operations. 976 977 static void InduceScavenge(Thread * Self, const char * Whence) { 978 // Induce STW safepoint to trim monitors 979 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 980 // More precisely, trigger an asynchronous STW safepoint as the number 981 // of active monitors passes the specified threshold. 982 // TODO: assert thread state is reasonable 983 984 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 985 if (ObjectMonitor::Knob_Verbose) { 986 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; 987 ::fflush(stdout); 988 } 989 // Induce a 'null' safepoint to scavenge monitors 990 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 991 // to the VMthread and have a lifespan longer than that of this activation record. 992 // The VMThread will delete the op when completed. 993 VMThread::execute(new VM_ForceAsyncSafepoint()); 994 995 if (ObjectMonitor::Knob_Verbose) { 996 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 997 ::fflush(stdout); 998 } 999 } 1000 } 1001 1002 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1003 ObjectMonitor* mid; 1004 int in_use_tally = 0; 1005 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1006 in_use_tally++; 1007 } 1008 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1009 1010 int free_tally = 0; 1011 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1012 free_tally++; 1013 } 1014 assert(free_tally == Self->omFreeCount, "free count off"); 1015 } 1016 1017 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { 1018 // A large MAXPRIVATE value reduces both list lock contention 1019 // and list coherency traffic, but also tends to increase the 1020 // number of objectMonitors in circulation as well as the STW 1021 // scavenge costs. As usual, we lean toward time in space-time 1022 // tradeoffs. 1023 const int MAXPRIVATE = 1024; 1024 for (;;) { 1025 ObjectMonitor * m; 1026 1027 // 1: try to allocate from the thread's local omFreeList. 1028 // Threads will attempt to allocate first from their local list, then 1029 // from the global list, and only after those attempts fail will the thread 1030 // attempt to instantiate new monitors. Thread-local free lists take 1031 // heat off the gListLock and improve allocation latency, as well as reducing 1032 // coherency traffic on the shared global list. 1033 m = Self->omFreeList; 1034 if (m != NULL) { 1035 Self->omFreeList = m->FreeNext; 1036 Self->omFreeCount--; 1037 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1038 guarantee(m->object() == NULL, "invariant"); 1039 if (MonitorInUseLists) { 1040 m->FreeNext = Self->omInUseList; 1041 Self->omInUseList = m; 1042 Self->omInUseCount++; 1043 if (ObjectMonitor::Knob_VerifyInUse) { 1044 verifyInUse(Self); 1045 } 1046 } else { 1047 m->FreeNext = NULL; 1048 } 1049 return m; 1050 } 1051 1052 // 2: try to allocate from the global gFreeList 1053 // CONSIDER: use muxTry() instead of muxAcquire(). 1054 // If the muxTry() fails then drop immediately into case 3. 1055 // If we're using thread-local free lists then try 1056 // to reprovision the caller's free list. 1057 if (gFreeList != NULL) { 1058 // Reprovision the thread's omFreeList. 1059 // Use bulk transfers to reduce the allocation rate and heat 1060 // on various locks. 1061 Thread::muxAcquire(&gListLock, "omAlloc"); 1062 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1063 gMonitorFreeCount--; 1064 ObjectMonitor * take = gFreeList; 1065 gFreeList = take->FreeNext; 1066 guarantee(take->object() == NULL, "invariant"); 1067 guarantee(!take->is_busy(), "invariant"); 1068 take->Recycle(); 1069 omRelease(Self, take, false); 1070 } 1071 Thread::muxRelease(&gListLock); 1072 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1073 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1074 TEVENT(omFirst - reprovision); 1075 1076 const int mx = MonitorBound; 1077 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1078 // We can't safely induce a STW safepoint from omAlloc() as our thread 1079 // state may not be appropriate for such activities and callers may hold 1080 // naked oops, so instead we defer the action. 1081 InduceScavenge(Self, "omAlloc"); 1082 } 1083 continue; 1084 } 1085 1086 // 3: allocate a block of new ObjectMonitors 1087 // Both the local and global free lists are empty -- resort to malloc(). 1088 // In the current implementation objectMonitors are TSM - immortal. 1089 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1090 // each ObjectMonitor to start at the beginning of a cache line, 1091 // so we use align_size_up(). 1092 // A better solution would be to use C++ placement-new. 1093 // BEWARE: As it stands currently, we don't run the ctors! 1094 assert(_BLOCKSIZE > 1, "invariant"); 1095 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1096 PaddedEnd<ObjectMonitor> * temp; 1097 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1098 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1099 mtInternal); 1100 temp = (PaddedEnd<ObjectMonitor> *) 1101 align_size_up((intptr_t)real_malloc_addr, 1102 DEFAULT_CACHE_LINE_SIZE); 1103 1104 // NOTE: (almost) no way to recover if allocation failed. 1105 // We might be able to induce a STW safepoint and scavenge enough 1106 // objectMonitors to permit progress. 1107 if (temp == NULL) { 1108 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1109 "Allocate ObjectMonitors"); 1110 } 1111 (void)memset((void *) temp, 0, neededsize); 1112 1113 // Format the block. 1114 // initialize the linked list, each monitor points to its next 1115 // forming the single linked free list, the very first monitor 1116 // will points to next block, which forms the block list. 1117 // The trick of using the 1st element in the block as gBlockList 1118 // linkage should be reconsidered. A better implementation would 1119 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1120 1121 for (int i = 1; i < _BLOCKSIZE; i++) { 1122 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1123 } 1124 1125 // terminate the last monitor as the end of list 1126 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1127 1128 // Element [0] is reserved for global list linkage 1129 temp[0].set_object(CHAINMARKER); 1130 1131 // Consider carving out this thread's current request from the 1132 // block in hand. This avoids some lock traffic and redundant 1133 // list activity. 1134 1135 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1136 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1137 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1138 gMonitorPopulation += _BLOCKSIZE-1; 1139 gMonitorFreeCount += _BLOCKSIZE-1; 1140 1141 // Add the new block to the list of extant blocks (gBlockList). 1142 // The very first objectMonitor in a block is reserved and dedicated. 1143 // It serves as blocklist "next" linkage. 1144 temp[0].FreeNext = gBlockList; 1145 gBlockList = temp; 1146 1147 // Add the new string of objectMonitors to the global free list 1148 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1149 gFreeList = temp + 1; 1150 Thread::muxRelease(&gListLock); 1151 TEVENT(Allocate block of monitors); 1152 } 1153 } 1154 1155 // Place "m" on the caller's private per-thread omFreeList. 1156 // In practice there's no need to clamp or limit the number of 1157 // monitors on a thread's omFreeList as the only time we'll call 1158 // omRelease is to return a monitor to the free list after a CAS 1159 // attempt failed. This doesn't allow unbounded #s of monitors to 1160 // accumulate on a thread's free list. 1161 // 1162 // Key constraint: all ObjectMonitors on a thread's free list and the global 1163 // free list must have their object field set to null. This prevents the 1164 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1165 1166 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1167 bool fromPerThreadAlloc) { 1168 guarantee(m->object() == NULL, "invariant"); 1169 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1170 // Remove from omInUseList 1171 if (MonitorInUseLists && fromPerThreadAlloc) { 1172 ObjectMonitor* cur_mid_in_use = NULL; 1173 bool extracted = false; 1174 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1175 if (m == mid) { 1176 // extract from per-thread in-use list 1177 if (mid == Self->omInUseList) { 1178 Self->omInUseList = mid->FreeNext; 1179 } else if (cur_mid_in_use != NULL) { 1180 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1181 } 1182 extracted = true; 1183 Self->omInUseCount--; 1184 if (ObjectMonitor::Knob_VerifyInUse) { 1185 verifyInUse(Self); 1186 } 1187 break; 1188 } 1189 } 1190 assert(extracted, "Should have extracted from in-use list"); 1191 } 1192 1193 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1194 m->FreeNext = Self->omFreeList; 1195 Self->omFreeList = m; 1196 Self->omFreeCount++; 1197 } 1198 1199 // Return the monitors of a moribund thread's local free list to 1200 // the global free list. Typically a thread calls omFlush() when 1201 // it's dying. We could also consider having the VM thread steal 1202 // monitors from threads that have not run java code over a few 1203 // consecutive STW safepoints. Relatedly, we might decay 1204 // omFreeProvision at STW safepoints. 1205 // 1206 // Also return the monitors of a moribund thread's omInUseList to 1207 // a global gOmInUseList under the global list lock so these 1208 // will continue to be scanned. 1209 // 1210 // We currently call omFlush() from the Thread:: dtor _after the thread 1211 // has been excised from the thread list and is no longer a mutator. 1212 // That means that omFlush() can run concurrently with a safepoint and 1213 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1214 // be a better choice as we could safely reason that that the JVM is 1215 // not at a safepoint at the time of the call, and thus there could 1216 // be not inopportune interleavings between omFlush() and the scavenge 1217 // operator. 1218 1219 void ObjectSynchronizer::omFlush(Thread * Self) { 1220 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1221 Self->omFreeList = NULL; 1222 ObjectMonitor * tail = NULL; 1223 int tally = 0; 1224 if (list != NULL) { 1225 ObjectMonitor * s; 1226 // The thread is going away, the per-thread free monitors 1227 // are freed via set_owner(NULL) 1228 // Link them to tail, which will be linked into the global free list 1229 // gFreeList below, under the gListLock 1230 for (s = list; s != NULL; s = s->FreeNext) { 1231 tally++; 1232 tail = s; 1233 guarantee(s->object() == NULL, "invariant"); 1234 guarantee(!s->is_busy(), "invariant"); 1235 s->set_owner(NULL); // redundant but good hygiene 1236 TEVENT(omFlush - Move one); 1237 } 1238 guarantee(tail != NULL && list != NULL, "invariant"); 1239 } 1240 1241 ObjectMonitor * inUseList = Self->omInUseList; 1242 ObjectMonitor * inUseTail = NULL; 1243 int inUseTally = 0; 1244 if (inUseList != NULL) { 1245 Self->omInUseList = NULL; 1246 ObjectMonitor *cur_om; 1247 // The thread is going away, however the omInUseList inflated 1248 // monitors may still be in-use by other threads. 1249 // Link them to inUseTail, which will be linked into the global in-use list 1250 // gOmInUseList below, under the gListLock 1251 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1252 inUseTail = cur_om; 1253 inUseTally++; 1254 } 1255 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1256 Self->omInUseCount = 0; 1257 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1258 } 1259 1260 Thread::muxAcquire(&gListLock, "omFlush"); 1261 if (tail != NULL) { 1262 tail->FreeNext = gFreeList; 1263 gFreeList = list; 1264 gMonitorFreeCount += tally; 1265 } 1266 1267 if (inUseTail != NULL) { 1268 inUseTail->FreeNext = gOmInUseList; 1269 gOmInUseList = inUseList; 1270 gOmInUseCount += inUseTally; 1271 } 1272 1273 Thread::muxRelease(&gListLock); 1274 TEVENT(omFlush); 1275 } 1276 1277 // Fast path code shared by multiple functions 1278 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1279 markOop mark = obj->mark(); 1280 if (mark->has_monitor()) { 1281 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1282 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1283 return mark->monitor(); 1284 } 1285 return ObjectSynchronizer::inflate(Thread::current(), obj); 1286 } 1287 1288 1289 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, 1290 oop object) { 1291 // Inflate mutates the heap ... 1292 // Relaxing assertion for bug 6320749. 1293 assert(Universe::verify_in_progress() || 1294 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1295 1296 for (;;) { 1297 const markOop mark = object->mark(); 1298 assert(!mark->has_bias_pattern(), "invariant"); 1299 1300 // The mark can be in one of the following states: 1301 // * Inflated - just return 1302 // * Stack-locked - coerce it to inflated 1303 // * INFLATING - busy wait for conversion to complete 1304 // * Neutral - aggressively inflate the object. 1305 // * BIASED - Illegal. We should never see this 1306 1307 // CASE: inflated 1308 if (mark->has_monitor()) { 1309 ObjectMonitor * inf = mark->monitor(); 1310 assert(inf->header()->is_neutral(), "invariant"); 1311 assert(inf->object() == object, "invariant"); 1312 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1313 return inf; 1314 } 1315 1316 // CASE: inflation in progress - inflating over a stack-lock. 1317 // Some other thread is converting from stack-locked to inflated. 1318 // Only that thread can complete inflation -- other threads must wait. 1319 // The INFLATING value is transient. 1320 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1321 // We could always eliminate polling by parking the thread on some auxiliary list. 1322 if (mark == markOopDesc::INFLATING()) { 1323 TEVENT(Inflate: spin while INFLATING); 1324 ReadStableMark(object); 1325 continue; 1326 } 1327 1328 // CASE: stack-locked 1329 // Could be stack-locked either by this thread or by some other thread. 1330 // 1331 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1332 // to install INFLATING into the mark word. We originally installed INFLATING, 1333 // allocated the objectmonitor, and then finally STed the address of the 1334 // objectmonitor into the mark. This was correct, but artificially lengthened 1335 // the interval in which INFLATED appeared in the mark, thus increasing 1336 // the odds of inflation contention. 1337 // 1338 // We now use per-thread private objectmonitor free lists. 1339 // These list are reprovisioned from the global free list outside the 1340 // critical INFLATING...ST interval. A thread can transfer 1341 // multiple objectmonitors en-mass from the global free list to its local free list. 1342 // This reduces coherency traffic and lock contention on the global free list. 1343 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1344 // before or after the CAS(INFLATING) operation. 1345 // See the comments in omAlloc(). 1346 1347 if (mark->has_locker()) { 1348 ObjectMonitor * m = omAlloc(Self); 1349 // Optimistically prepare the objectmonitor - anticipate successful CAS 1350 // We do this before the CAS in order to minimize the length of time 1351 // in which INFLATING appears in the mark. 1352 m->Recycle(); 1353 m->_Responsible = NULL; 1354 m->_recursions = 0; 1355 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1356 1357 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1358 if (cmp != mark) { 1359 omRelease(Self, m, true); 1360 continue; // Interference -- just retry 1361 } 1362 1363 // We've successfully installed INFLATING (0) into the mark-word. 1364 // This is the only case where 0 will appear in a mark-work. 1365 // Only the singular thread that successfully swings the mark-word 1366 // to 0 can perform (or more precisely, complete) inflation. 1367 // 1368 // Why do we CAS a 0 into the mark-word instead of just CASing the 1369 // mark-word from the stack-locked value directly to the new inflated state? 1370 // Consider what happens when a thread unlocks a stack-locked object. 1371 // It attempts to use CAS to swing the displaced header value from the 1372 // on-stack basiclock back into the object header. Recall also that the 1373 // header value (hashcode, etc) can reside in (a) the object header, or 1374 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1375 // header in an objectMonitor. The inflate() routine must copy the header 1376 // value from the basiclock on the owner's stack to the objectMonitor, all 1377 // the while preserving the hashCode stability invariants. If the owner 1378 // decides to release the lock while the value is 0, the unlock will fail 1379 // and control will eventually pass from slow_exit() to inflate. The owner 1380 // will then spin, waiting for the 0 value to disappear. Put another way, 1381 // the 0 causes the owner to stall if the owner happens to try to 1382 // drop the lock (restoring the header from the basiclock to the object) 1383 // while inflation is in-progress. This protocol avoids races that might 1384 // would otherwise permit hashCode values to change or "flicker" for an object. 1385 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1386 // 0 serves as a "BUSY" inflate-in-progress indicator. 1387 1388 1389 // fetch the displaced mark from the owner's stack. 1390 // The owner can't die or unwind past the lock while our INFLATING 1391 // object is in the mark. Furthermore the owner can't complete 1392 // an unlock on the object, either. 1393 markOop dmw = mark->displaced_mark_helper(); 1394 assert(dmw->is_neutral(), "invariant"); 1395 1396 // Setup monitor fields to proper values -- prepare the monitor 1397 m->set_header(dmw); 1398 1399 // Optimization: if the mark->locker stack address is associated 1400 // with this thread we could simply set m->_owner = Self. 1401 // Note that a thread can inflate an object 1402 // that it has stack-locked -- as might happen in wait() -- directly 1403 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1404 m->set_owner(mark->locker()); 1405 m->set_object(object); 1406 // TODO-FIXME: assert BasicLock->dhw != 0. 1407 1408 // Must preserve store ordering. The monitor state must 1409 // be stable at the time of publishing the monitor address. 1410 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1411 object->release_set_mark(markOopDesc::encode(m)); 1412 1413 // Hopefully the performance counters are allocated on distinct cache lines 1414 // to avoid false sharing on MP systems ... 1415 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1416 TEVENT(Inflate: overwrite stacklock); 1417 if (TraceMonitorInflation) { 1418 if (object->is_instance()) { 1419 ResourceMark rm; 1420 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1421 (void *) object, (intptr_t) object->mark(), 1422 object->klass()->external_name()); 1423 } 1424 } 1425 return m; 1426 } 1427 1428 // CASE: neutral 1429 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1430 // If we know we're inflating for entry it's better to inflate by swinging a 1431 // pre-locked objectMonitor pointer into the object header. A successful 1432 // CAS inflates the object *and* confers ownership to the inflating thread. 1433 // In the current implementation we use a 2-step mechanism where we CAS() 1434 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1435 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1436 // would be useful. 1437 1438 assert(mark->is_neutral(), "invariant"); 1439 ObjectMonitor * m = omAlloc(Self); 1440 // prepare m for installation - set monitor to initial state 1441 m->Recycle(); 1442 m->set_header(mark); 1443 m->set_owner(NULL); 1444 m->set_object(object); 1445 m->_recursions = 0; 1446 m->_Responsible = NULL; 1447 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1448 1449 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1450 m->set_object(NULL); 1451 m->set_owner(NULL); 1452 m->Recycle(); 1453 omRelease(Self, m, true); 1454 m = NULL; 1455 continue; 1456 // interference - the markword changed - just retry. 1457 // The state-transitions are one-way, so there's no chance of 1458 // live-lock -- "Inflated" is an absorbing state. 1459 } 1460 1461 // Hopefully the performance counters are allocated on distinct 1462 // cache lines to avoid false sharing on MP systems ... 1463 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1464 TEVENT(Inflate: overwrite neutral); 1465 if (TraceMonitorInflation) { 1466 if (object->is_instance()) { 1467 ResourceMark rm; 1468 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1469 (void *) object, (intptr_t) object->mark(), 1470 object->klass()->external_name()); 1471 } 1472 } 1473 return m; 1474 } 1475 } 1476 1477 1478 // Deflate_idle_monitors() is called at all safepoints, immediately 1479 // after all mutators are stopped, but before any objects have moved. 1480 // It traverses the list of known monitors, deflating where possible. 1481 // The scavenged monitor are returned to the monitor free list. 1482 // 1483 // Beware that we scavenge at *every* stop-the-world point. 1484 // Having a large number of monitors in-circulation negatively 1485 // impacts the performance of some applications (e.g., PointBase). 1486 // Broadly, we want to minimize the # of monitors in circulation. 1487 // 1488 // We have added a flag, MonitorInUseLists, which creates a list 1489 // of active monitors for each thread. deflate_idle_monitors() 1490 // only scans the per-thread in-use lists. omAlloc() puts all 1491 // assigned monitors on the per-thread list. deflate_idle_monitors() 1492 // returns the non-busy monitors to the global free list. 1493 // When a thread dies, omFlush() adds the list of active monitors for 1494 // that thread to a global gOmInUseList acquiring the 1495 // global list lock. deflate_idle_monitors() acquires the global 1496 // list lock to scan for non-busy monitors to the global free list. 1497 // An alternative could have used a single global in-use list. The 1498 // downside would have been the additional cost of acquiring the global list lock 1499 // for every omAlloc(). 1500 // 1501 // Perversely, the heap size -- and thus the STW safepoint rate -- 1502 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1503 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1504 // This is an unfortunate aspect of this design. 1505 1506 enum ManifestConstants { 1507 ClearResponsibleAtSTW = 0, 1508 MaximumRecheckInterval = 1000 1509 }; 1510 1511 // Deflate a single monitor if not in-use 1512 // Return true if deflated, false if in-use 1513 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1514 ObjectMonitor** freeHeadp, 1515 ObjectMonitor** freeTailp) { 1516 bool deflated; 1517 // Normal case ... The monitor is associated with obj. 1518 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1519 guarantee(mid == obj->mark()->monitor(), "invariant"); 1520 guarantee(mid->header()->is_neutral(), "invariant"); 1521 1522 if (mid->is_busy()) { 1523 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1524 deflated = false; 1525 } else { 1526 // Deflate the monitor if it is no longer being used 1527 // It's idle - scavenge and return to the global free list 1528 // plain old deflation ... 1529 TEVENT(deflate_idle_monitors - scavenge1); 1530 if (TraceMonitorInflation) { 1531 if (obj->is_instance()) { 1532 ResourceMark rm; 1533 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1534 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); 1535 } 1536 } 1537 1538 // Restore the header back to obj 1539 obj->release_set_mark(mid->header()); 1540 mid->clear(); 1541 1542 assert(mid->object() == NULL, "invariant"); 1543 1544 // Move the object to the working free list defined by freeHeadp, freeTailp 1545 if (*freeHeadp == NULL) *freeHeadp = mid; 1546 if (*freeTailp != NULL) { 1547 ObjectMonitor * prevtail = *freeTailp; 1548 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1549 prevtail->FreeNext = mid; 1550 } 1551 *freeTailp = mid; 1552 deflated = true; 1553 } 1554 return deflated; 1555 } 1556 1557 // Walk a given monitor list, and deflate idle monitors 1558 // The given list could be a per-thread list or a global list 1559 // Caller acquires gListLock 1560 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1561 ObjectMonitor** freeHeadp, 1562 ObjectMonitor** freeTailp) { 1563 ObjectMonitor* mid; 1564 ObjectMonitor* next; 1565 ObjectMonitor* cur_mid_in_use = NULL; 1566 int deflated_count = 0; 1567 1568 for (mid = *listHeadp; mid != NULL;) { 1569 oop obj = (oop) mid->object(); 1570 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1571 // if deflate_monitor succeeded, 1572 // extract from per-thread in-use list 1573 if (mid == *listHeadp) { 1574 *listHeadp = mid->FreeNext; 1575 } else if (cur_mid_in_use != NULL) { 1576 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1577 } 1578 next = mid->FreeNext; 1579 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1580 mid = next; 1581 deflated_count++; 1582 } else { 1583 cur_mid_in_use = mid; 1584 mid = mid->FreeNext; 1585 } 1586 } 1587 return deflated_count; 1588 } 1589 1590 void ObjectSynchronizer::deflate_idle_monitors() { 1591 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1592 int nInuse = 0; // currently associated with objects 1593 int nInCirculation = 0; // extant 1594 int nScavenged = 0; // reclaimed 1595 bool deflated = false; 1596 1597 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1598 ObjectMonitor * freeTailp = NULL; 1599 1600 TEVENT(deflate_idle_monitors); 1601 // Prevent omFlush from changing mids in Thread dtor's during deflation 1602 // And in case the vm thread is acquiring a lock during a safepoint 1603 // See e.g. 6320749 1604 Thread::muxAcquire(&gListLock, "scavenge - return"); 1605 1606 if (MonitorInUseLists) { 1607 int inUse = 0; 1608 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1609 nInCirculation+= cur->omInUseCount; 1610 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1611 cur->omInUseCount-= deflated_count; 1612 if (ObjectMonitor::Knob_VerifyInUse) { 1613 verifyInUse(cur); 1614 } 1615 nScavenged += deflated_count; 1616 nInuse += cur->omInUseCount; 1617 } 1618 1619 // For moribund threads, scan gOmInUseList 1620 if (gOmInUseList) { 1621 nInCirculation += gOmInUseCount; 1622 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1623 gOmInUseCount-= deflated_count; 1624 nScavenged += deflated_count; 1625 nInuse += gOmInUseCount; 1626 } 1627 1628 } else for (PaddedEnd<ObjectMonitor> * block = 1629 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 1630 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1631 // Iterate over all extant monitors - Scavenge all idle monitors. 1632 assert(block->object() == CHAINMARKER, "must be a block header"); 1633 nInCirculation += _BLOCKSIZE; 1634 for (int i = 1; i < _BLOCKSIZE; i++) { 1635 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1636 oop obj = (oop) mid->object(); 1637 1638 if (obj == NULL) { 1639 // The monitor is not associated with an object. 1640 // The monitor should either be a thread-specific private 1641 // free list or the global free list. 1642 // obj == NULL IMPLIES mid->is_busy() == 0 1643 guarantee(!mid->is_busy(), "invariant"); 1644 continue; 1645 } 1646 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1647 1648 if (deflated) { 1649 mid->FreeNext = NULL; 1650 nScavenged++; 1651 } else { 1652 nInuse++; 1653 } 1654 } 1655 } 1656 1657 gMonitorFreeCount += nScavenged; 1658 1659 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1660 1661 if (ObjectMonitor::Knob_Verbose) { 1662 ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", 1663 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1664 gMonitorPopulation, gMonitorFreeCount); 1665 ::fflush(stdout); 1666 } 1667 1668 ForceMonitorScavenge = 0; // Reset 1669 1670 // Move the scavenged monitors back to the global free list. 1671 if (freeHeadp != NULL) { 1672 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1673 assert(freeTailp->FreeNext == NULL, "invariant"); 1674 // constant-time list splice - prepend scavenged segment to gFreeList 1675 freeTailp->FreeNext = gFreeList; 1676 gFreeList = freeHeadp; 1677 } 1678 Thread::muxRelease(&gListLock); 1679 1680 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged); 1681 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); 1682 1683 // TODO: Add objectMonitor leak detection. 1684 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1685 GVars.stwRandom = os::random(); 1686 GVars.stwCycle++; 1687 } 1688 1689 // Monitor cleanup on JavaThread::exit 1690 1691 // Iterate through monitor cache and attempt to release thread's monitors 1692 // Gives up on a particular monitor if an exception occurs, but continues 1693 // the overall iteration, swallowing the exception. 1694 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1695 private: 1696 TRAPS; 1697 1698 public: 1699 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1700 void do_monitor(ObjectMonitor* mid) { 1701 if (mid->owner() == THREAD) { 1702 (void)mid->complete_exit(CHECK); 1703 } 1704 } 1705 }; 1706 1707 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1708 // ignored. This is meant to be called during JNI thread detach which assumes 1709 // all remaining monitors are heavyweight. All exceptions are swallowed. 1710 // Scanning the extant monitor list can be time consuming. 1711 // A simple optimization is to add a per-thread flag that indicates a thread 1712 // called jni_monitorenter() during its lifetime. 1713 // 1714 // Instead of No_Savepoint_Verifier it might be cheaper to 1715 // use an idiom of the form: 1716 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1717 // <code that must not run at safepoint> 1718 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1719 // Since the tests are extremely cheap we could leave them enabled 1720 // for normal product builds. 1721 1722 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1723 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1724 No_Safepoint_Verifier nsv; 1725 ReleaseJavaMonitorsClosure rjmc(THREAD); 1726 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1727 ObjectSynchronizer::monitors_iterate(&rjmc); 1728 Thread::muxRelease(&gListLock); 1729 THREAD->clear_pending_exception(); 1730 } 1731 1732 //------------------------------------------------------------------------------ 1733 // Debugging code 1734 1735 void ObjectSynchronizer::sanity_checks(const bool verbose, 1736 const uint cache_line_size, 1737 int *error_cnt_ptr, 1738 int *warning_cnt_ptr) { 1739 u_char *addr_begin = (u_char*)&GVars; 1740 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1741 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1742 1743 if (verbose) { 1744 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1745 sizeof(SharedGlobals)); 1746 } 1747 1748 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1749 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1750 1751 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1752 if (verbose) { 1753 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1754 } 1755 1756 if (cache_line_size != 0) { 1757 // We were able to determine the L1 data cache line size so 1758 // do some cache line specific sanity checks 1759 1760 if (offset_stwRandom < cache_line_size) { 1761 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1762 "to the struct beginning than a cache line which permits " 1763 "false sharing."); 1764 (*warning_cnt_ptr)++; 1765 } 1766 1767 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1768 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1769 "SharedGlobals.hcSequence fields are closer than a cache " 1770 "line which permits false sharing."); 1771 (*warning_cnt_ptr)++; 1772 } 1773 1774 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1775 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1776 "to the struct end than a cache line which permits false " 1777 "sharing."); 1778 (*warning_cnt_ptr)++; 1779 } 1780 } 1781 } 1782 1783 #ifndef PRODUCT 1784 1785 // Verify all monitors in the monitor cache, the verification is weak. 1786 void ObjectSynchronizer::verify() { 1787 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1788 ObjectMonitor* mid; 1789 while (block) { 1790 assert(block->object() == CHAINMARKER, "must be a block header"); 1791 for (int i = 1; i < _BLOCKSIZE; i++) { 1792 mid = (ObjectMonitor *)(block + i); 1793 oop object = (oop) mid->object(); 1794 if (object != NULL) { 1795 mid->verify(); 1796 } 1797 } 1798 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1799 } 1800 } 1801 1802 // Check if monitor belongs to the monitor cache 1803 // The list is grow-only so it's *relatively* safe to traverse 1804 // the list of extant blocks without taking a lock. 1805 1806 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1807 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1808 1809 while (block) { 1810 assert(block->object() == CHAINMARKER, "must be a block header"); 1811 if (monitor > (ObjectMonitor *)&block[0] && 1812 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1813 address mon = (address) monitor; 1814 address blk = (address) block; 1815 size_t diff = mon - blk; 1816 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check"); 1817 return 1; 1818 } 1819 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1820 } 1821 return 0; 1822 } 1823 1824 #endif