1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "memory/padded.hpp" 28 #include "memory/resourceArea.hpp" 29 #include "oops/markOop.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/atomic.inline.hpp" 32 #include "runtime/biasedLocking.hpp" 33 #include "runtime/handles.inline.hpp" 34 #include "runtime/interfaceSupport.hpp" 35 #include "runtime/mutexLocker.hpp" 36 #include "runtime/objectMonitor.hpp" 37 #include "runtime/objectMonitor.inline.hpp" 38 #include "runtime/osThread.hpp" 39 #include "runtime/stubRoutines.hpp" 40 #include "runtime/synchronizer.hpp" 41 #include "runtime/thread.inline.hpp" 42 #include "utilities/dtrace.hpp" 43 #include "utilities/events.hpp" 44 #include "utilities/preserveException.hpp" 45 46 #if defined(__GNUC__) && !defined(PPC64) 47 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 48 #define NOINLINE __attribute__((noinline)) 49 #else 50 #define NOINLINE 51 #endif 52 53 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 54 55 // The "core" versions of monitor enter and exit reside in this file. 56 // The interpreter and compilers contain specialized transliterated 57 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 58 // for instance. If you make changes here, make sure to modify the 59 // interpreter, and both C1 and C2 fast-path inline locking code emission. 60 // 61 // ----------------------------------------------------------------------------- 62 63 #ifdef DTRACE_ENABLED 64 65 // Only bother with this argument setup if dtrace is available 66 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 67 68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 69 char* bytes = NULL; \ 70 int len = 0; \ 71 jlong jtid = SharedRuntime::get_java_tid(thread); \ 72 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 73 if (klassname != NULL) { \ 74 bytes = (char*)klassname->bytes(); \ 75 len = klassname->utf8_length(); \ 76 } 77 78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 79 { \ 80 if (DTraceMonitorProbes) { \ 81 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 82 HOTSPOT_MONITOR_WAIT(jtid, \ 83 (uintptr_t)(monitor), bytes, len, (millis)); \ 84 } \ 85 } 86 87 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 88 89 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 90 { \ 91 if (DTraceMonitorProbes) { \ 92 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 93 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 94 (uintptr_t)(monitor), bytes, len); \ 95 } \ 96 } 97 98 #else // ndef DTRACE_ENABLED 99 100 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 101 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 102 103 #endif // ndef DTRACE_ENABLED 104 105 // This exists only as a workaround of dtrace bug 6254741 106 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 107 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 108 return 0; 109 } 110 111 #define NINFLATIONLOCKS 256 112 static volatile intptr_t InflationLocks[NINFLATIONLOCKS]; 113 114 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 115 // want to expose the PaddedEnd template more than necessary. 116 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL; 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 118 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 119 int ObjectSynchronizer::gOmInUseCount = 0; 120 static volatile intptr_t ListLock = 0; // protects global monitor free-list cache 121 static volatile int MonitorFreeCount = 0; // # on gFreeList 122 static volatile int MonitorPopulation = 0; // # Extant -- in circulation 123 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 124 125 126 // =====================> Quick functions 127 128 // The quick_* forms are special fast-path variants used to improve 129 // performance. In the simplest case, a "quick_*" implementation could 130 // simply return false, in which case the caller will perform the necessary 131 // state transitions and call the slow-path form. 132 // The fast-path is designed to handle frequently arising cases in an efficient 133 // manner and is just a degenerate "optimistic" variant of the slow-path. 134 // returns true -- to indicate the call was satisfied. 135 // returns false -- to indicate the call needs the services of the slow-path. 136 // A no-loitering ordinance is in effect for code in the quick_* family 137 // operators: safepoints or indefinite blocking (blocking that might span a 138 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 139 // entry. 140 141 // The LockNode emitted directly at the synchronization site would have 142 // been too big if it were to have included support for the cases of inflated 143 // recursive enter and exit, so they go here instead. 144 // Note that we can't safely call AsyncPrintJavaStack() from within 145 // quick_enter() as our thread state remains _in_Java. 146 147 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 148 BasicLock * Lock) { 149 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 150 assert(Self->is_Java_thread(), "invariant"); 151 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 152 No_Safepoint_Verifier nsv; 153 if (obj == NULL) return false; // Need to throw NPE 154 const markOop mark = obj->mark(); 155 156 if (mark->has_monitor()) { 157 ObjectMonitor * const m = mark->monitor(); 158 assert(m->object() == obj, "invariant"); 159 Thread * const owner = (Thread *) m->_owner; 160 161 // Lock contention and Transactional Lock Elision (TLE) diagnostics 162 // and observability 163 // Case: light contention possibly amenable to TLE 164 // Case: TLE inimical operations such as nested/recursive synchronization 165 166 if (owner == Self) { 167 m->_recursions++; 168 return true; 169 } 170 171 if (owner == NULL && 172 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 173 assert(m->_recursions == 0, "invariant"); 174 assert(m->_owner == Self, "invariant"); 175 return true; 176 } 177 } 178 179 // Note that we could inflate in quick_enter. 180 // This is likely a useful optimization 181 // Critically, in quick_enter() we must not: 182 // -- perform bias revocation, or 183 // -- block indefinitely, or 184 // -- reach a safepoint 185 186 return false; // revert to slow-path 187 } 188 189 // ----------------------------------------------------------------------------- 190 // Fast Monitor Enter/Exit 191 // This the fast monitor enter. The interpreter and compiler use 192 // some assembly copies of this code. Make sure update those code 193 // if the following function is changed. The implementation is 194 // extremely sensitive to race condition. Be careful. 195 196 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 197 bool attempt_rebias, TRAPS) { 198 if (UseBiasedLocking) { 199 if (!SafepointSynchronize::is_at_safepoint()) { 200 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 201 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 202 return; 203 } 204 } else { 205 assert(!attempt_rebias, "can not rebias toward VM thread"); 206 BiasedLocking::revoke_at_safepoint(obj); 207 } 208 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 209 } 210 211 slow_enter(obj, lock, THREAD); 212 } 213 214 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 215 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 216 // if displaced header is null, the previous enter is recursive enter, no-op 217 markOop dhw = lock->displaced_header(); 218 markOop mark; 219 if (dhw == NULL) { 220 // Recursive stack-lock. 221 // Diagnostics -- Could be: stack-locked, inflating, inflated. 222 mark = object->mark(); 223 assert(!mark->is_neutral(), "invariant"); 224 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 225 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 226 } 227 if (mark->has_monitor()) { 228 ObjectMonitor * m = mark->monitor(); 229 assert(((oop)(m->object()))->mark() == mark, "invariant"); 230 assert(m->is_entered(THREAD), "invariant"); 231 } 232 return; 233 } 234 235 mark = object->mark(); 236 237 // If the object is stack-locked by the current thread, try to 238 // swing the displaced header from the box back to the mark. 239 if (mark == (markOop) lock) { 240 assert(dhw->is_neutral(), "invariant"); 241 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 242 TEVENT(fast_exit: release stacklock); 243 return; 244 } 245 } 246 247 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD); 248 } 249 250 // ----------------------------------------------------------------------------- 251 // Interpreter/Compiler Slow Case 252 // This routine is used to handle interpreter/compiler slow case 253 // We don't need to use fast path here, because it must have been 254 // failed in the interpreter/compiler code. 255 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 256 markOop mark = obj->mark(); 257 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 258 259 if (mark->is_neutral()) { 260 // Anticipate successful CAS -- the ST of the displaced mark must 261 // be visible <= the ST performed by the CAS. 262 lock->set_displaced_header(mark); 263 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 264 TEVENT(slow_enter: release stacklock); 265 return; 266 } 267 // Fall through to inflate() ... 268 } else if (mark->has_locker() && 269 THREAD->is_lock_owned((address)mark->locker())) { 270 assert(lock != mark->locker(), "must not re-lock the same lock"); 271 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 272 lock->set_displaced_header(NULL); 273 return; 274 } 275 276 // The object header will never be displaced to this lock, 277 // so it does not matter what the value is, except that it 278 // must be non-zero to avoid looking like a re-entrant lock, 279 // and must not look locked either. 280 lock->set_displaced_header(markOopDesc::unused_mark()); 281 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 282 } 283 284 // This routine is used to handle interpreter/compiler slow case 285 // We don't need to use fast path here, because it must have 286 // failed in the interpreter/compiler code. Simply use the heavy 287 // weight monitor should be ok, unless someone find otherwise. 288 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 289 fast_exit(object, lock, THREAD); 290 } 291 292 // ----------------------------------------------------------------------------- 293 // Class Loader support to workaround deadlocks on the class loader lock objects 294 // Also used by GC 295 // complete_exit()/reenter() are used to wait on a nested lock 296 // i.e. to give up an outer lock completely and then re-enter 297 // Used when holding nested locks - lock acquisition order: lock1 then lock2 298 // 1) complete_exit lock1 - saving recursion count 299 // 2) wait on lock2 300 // 3) when notified on lock2, unlock lock2 301 // 4) reenter lock1 with original recursion count 302 // 5) lock lock2 303 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 304 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 305 TEVENT(complete_exit); 306 if (UseBiasedLocking) { 307 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 308 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 309 } 310 311 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 312 313 return monitor->complete_exit(THREAD); 314 } 315 316 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 317 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 318 TEVENT(reenter); 319 if (UseBiasedLocking) { 320 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 321 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 322 } 323 324 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 325 326 monitor->reenter(recursion, THREAD); 327 } 328 // ----------------------------------------------------------------------------- 329 // JNI locks on java objects 330 // NOTE: must use heavy weight monitor to handle jni monitor enter 331 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 332 // the current locking is from JNI instead of Java code 333 TEVENT(jni_enter); 334 if (UseBiasedLocking) { 335 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 336 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 337 } 338 THREAD->set_current_pending_monitor_is_from_java(false); 339 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 340 THREAD->set_current_pending_monitor_is_from_java(true); 341 } 342 343 // NOTE: must use heavy weight monitor to handle jni monitor exit 344 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 345 TEVENT(jni_exit); 346 if (UseBiasedLocking) { 347 Handle h_obj(THREAD, obj); 348 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 349 obj = h_obj(); 350 } 351 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 352 353 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 354 // If this thread has locked the object, exit the monitor. Note: can't use 355 // monitor->check(CHECK); must exit even if an exception is pending. 356 if (monitor->check(THREAD)) { 357 monitor->exit(true, THREAD); 358 } 359 } 360 361 // ----------------------------------------------------------------------------- 362 // Internal VM locks on java objects 363 // standard constructor, allows locking failures 364 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 365 _dolock = doLock; 366 _thread = thread; 367 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 368 _obj = obj; 369 370 if (_dolock) { 371 TEVENT(ObjectLocker); 372 373 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 374 } 375 } 376 377 ObjectLocker::~ObjectLocker() { 378 if (_dolock) { 379 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 380 } 381 } 382 383 384 // ----------------------------------------------------------------------------- 385 // Wait/Notify/NotifyAll 386 // NOTE: must use heavy weight monitor to handle wait() 387 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 388 if (UseBiasedLocking) { 389 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 390 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 391 } 392 if (millis < 0) { 393 TEVENT(wait - throw IAX); 394 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 395 } 396 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 397 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 398 monitor->wait(millis, true, THREAD); 399 400 // This dummy call is in place to get around dtrace bug 6254741. Once 401 // that's fixed we can uncomment the following line, remove the call 402 // and change this function back into a "void" func. 403 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 404 return dtrace_waited_probe(monitor, obj, THREAD); 405 } 406 407 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 408 if (UseBiasedLocking) { 409 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 410 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 411 } 412 if (millis < 0) { 413 TEVENT(wait - throw IAX); 414 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 415 } 416 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD); 417 } 418 419 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 420 if (UseBiasedLocking) { 421 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 422 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 423 } 424 425 markOop mark = obj->mark(); 426 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 427 return; 428 } 429 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 430 } 431 432 // NOTE: see comment of notify() 433 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 434 if (UseBiasedLocking) { 435 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 436 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 437 } 438 439 markOop mark = obj->mark(); 440 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 441 return; 442 } 443 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 444 } 445 446 // ----------------------------------------------------------------------------- 447 // Hash Code handling 448 // 449 // Performance concern: 450 // OrderAccess::storestore() calls release() which at one time stored 0 451 // into the global volatile OrderAccess::dummy variable. This store was 452 // unnecessary for correctness. Many threads storing into a common location 453 // causes considerable cache migration or "sloshing" on large SMP systems. 454 // As such, I avoided using OrderAccess::storestore(). In some cases 455 // OrderAccess::fence() -- which incurs local latency on the executing 456 // processor -- is a better choice as it scales on SMP systems. 457 // 458 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 459 // a discussion of coherency costs. Note that all our current reference 460 // platforms provide strong ST-ST order, so the issue is moot on IA32, 461 // x64, and SPARC. 462 // 463 // As a general policy we use "volatile" to control compiler-based reordering 464 // and explicit fences (barriers) to control for architectural reordering 465 // performed by the CPU(s) or platform. 466 467 struct SharedGlobals { 468 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 469 // These are highly shared mostly-read variables. 470 // To avoid false-sharing they need to be the sole occupants of a cache line. 471 volatile int stwRandom; 472 volatile int stwCycle; 473 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 474 // Hot RW variable -- Sequester to avoid false-sharing 475 volatile int hcSequence; 476 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 477 }; 478 479 static SharedGlobals GVars; 480 static int MonitorScavengeThreshold = 1000000; 481 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 482 483 static markOop ReadStableMark(oop obj) { 484 markOop mark = obj->mark(); 485 if (!mark->is_being_inflated()) { 486 return mark; // normal fast-path return 487 } 488 489 int its = 0; 490 for (;;) { 491 markOop mark = obj->mark(); 492 if (!mark->is_being_inflated()) { 493 return mark; // normal fast-path return 494 } 495 496 // The object is being inflated by some other thread. 497 // The caller of ReadStableMark() must wait for inflation to complete. 498 // Avoid live-lock 499 // TODO: consider calling SafepointSynchronize::do_call_back() while 500 // spinning to see if there's a safepoint pending. If so, immediately 501 // yielding or blocking would be appropriate. Avoid spinning while 502 // there is a safepoint pending. 503 // TODO: add inflation contention performance counters. 504 // TODO: restrict the aggregate number of spinners. 505 506 ++its; 507 if (its > 10000 || !os::is_MP()) { 508 if (its & 1) { 509 os::naked_yield(); 510 TEVENT(Inflate: INFLATING - yield); 511 } else { 512 // Note that the following code attenuates the livelock problem but is not 513 // a complete remedy. A more complete solution would require that the inflating 514 // thread hold the associated inflation lock. The following code simply restricts 515 // the number of spinners to at most one. We'll have N-2 threads blocked 516 // on the inflationlock, 1 thread holding the inflation lock and using 517 // a yield/park strategy, and 1 thread in the midst of inflation. 518 // A more refined approach would be to change the encoding of INFLATING 519 // to allow encapsulation of a native thread pointer. Threads waiting for 520 // inflation to complete would use CAS to push themselves onto a singly linked 521 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 522 // and calling park(). When inflation was complete the thread that accomplished inflation 523 // would detach the list and set the markword to inflated with a single CAS and 524 // then for each thread on the list, set the flag and unpark() the thread. 525 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 526 // wakes at most one thread whereas we need to wake the entire list. 527 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 528 int YieldThenBlock = 0; 529 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 530 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 531 Thread::muxAcquire(InflationLocks + ix, "InflationLock"); 532 while (obj->mark() == markOopDesc::INFLATING()) { 533 // Beware: NakedYield() is advisory and has almost no effect on some platforms 534 // so we periodically call Self->_ParkEvent->park(1). 535 // We use a mixed spin/yield/block mechanism. 536 if ((YieldThenBlock++) >= 16) { 537 Thread::current()->_ParkEvent->park(1); 538 } else { 539 os::naked_yield(); 540 } 541 } 542 Thread::muxRelease(InflationLocks + ix); 543 TEVENT(Inflate: INFLATING - yield/park); 544 } 545 } else { 546 SpinPause(); // SMP-polite spinning 547 } 548 } 549 } 550 551 // hashCode() generation : 552 // 553 // Possibilities: 554 // * MD5Digest of {obj,stwRandom} 555 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 556 // * A DES- or AES-style SBox[] mechanism 557 // * One of the Phi-based schemes, such as: 558 // 2654435761 = 2^32 * Phi (golden ratio) 559 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 560 // * A variation of Marsaglia's shift-xor RNG scheme. 561 // * (obj ^ stwRandom) is appealing, but can result 562 // in undesirable regularity in the hashCode values of adjacent objects 563 // (objects allocated back-to-back, in particular). This could potentially 564 // result in hashtable collisions and reduced hashtable efficiency. 565 // There are simple ways to "diffuse" the middle address bits over the 566 // generated hashCode values: 567 568 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 569 intptr_t value = 0; 570 if (hashCode == 0) { 571 // This form uses an unguarded global Park-Miller RNG, 572 // so it's possible for two threads to race and generate the same RNG. 573 // On MP system we'll have lots of RW access to a global, so the 574 // mechanism induces lots of coherency traffic. 575 value = os::random(); 576 } else if (hashCode == 1) { 577 // This variation has the property of being stable (idempotent) 578 // between STW operations. This can be useful in some of the 1-0 579 // synchronization schemes. 580 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 581 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 582 } else if (hashCode == 2) { 583 value = 1; // for sensitivity testing 584 } else if (hashCode == 3) { 585 value = ++GVars.hcSequence; 586 } else if (hashCode == 4) { 587 value = cast_from_oop<intptr_t>(obj); 588 } else { 589 // Marsaglia's xor-shift scheme with thread-specific state 590 // This is probably the best overall implementation -- we'll 591 // likely make this the default in future releases. 592 unsigned t = Self->_hashStateX; 593 t ^= (t << 11); 594 Self->_hashStateX = Self->_hashStateY; 595 Self->_hashStateY = Self->_hashStateZ; 596 Self->_hashStateZ = Self->_hashStateW; 597 unsigned v = Self->_hashStateW; 598 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 599 Self->_hashStateW = v; 600 value = v; 601 } 602 603 value &= markOopDesc::hash_mask; 604 if (value == 0) value = 0xBAD; 605 assert(value != markOopDesc::no_hash, "invariant"); 606 TEVENT(hashCode: GENERATE); 607 return value; 608 } 609 610 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 611 if (UseBiasedLocking) { 612 // NOTE: many places throughout the JVM do not expect a safepoint 613 // to be taken here, in particular most operations on perm gen 614 // objects. However, we only ever bias Java instances and all of 615 // the call sites of identity_hash that might revoke biases have 616 // been checked to make sure they can handle a safepoint. The 617 // added check of the bias pattern is to avoid useless calls to 618 // thread-local storage. 619 if (obj->mark()->has_bias_pattern()) { 620 // Handle for oop obj in case of STW safepoint 621 Handle hobj(Self, obj); 622 // Relaxing assertion for bug 6320749. 623 assert(Universe::verify_in_progress() || 624 !SafepointSynchronize::is_at_safepoint(), 625 "biases should not be seen by VM thread here"); 626 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 627 obj = hobj(); 628 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 629 } 630 } 631 632 // hashCode() is a heap mutator ... 633 // Relaxing assertion for bug 6320749. 634 assert(Universe::verify_in_progress() || 635 !SafepointSynchronize::is_at_safepoint(), "invariant"); 636 assert(Universe::verify_in_progress() || 637 Self->is_Java_thread() , "invariant"); 638 assert(Universe::verify_in_progress() || 639 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 640 641 ObjectMonitor* monitor = NULL; 642 markOop temp, test; 643 intptr_t hash; 644 markOop mark = ReadStableMark(obj); 645 646 // object should remain ineligible for biased locking 647 assert(!mark->has_bias_pattern(), "invariant"); 648 649 if (mark->is_neutral()) { 650 hash = mark->hash(); // this is a normal header 651 if (hash) { // if it has hash, just return it 652 return hash; 653 } 654 hash = get_next_hash(Self, obj); // allocate a new hash code 655 temp = mark->copy_set_hash(hash); // merge the hash code into header 656 // use (machine word version) atomic operation to install the hash 657 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 658 if (test == mark) { 659 return hash; 660 } 661 // If atomic operation failed, we must inflate the header 662 // into heavy weight monitor. We could add more code here 663 // for fast path, but it does not worth the complexity. 664 } else if (mark->has_monitor()) { 665 monitor = mark->monitor(); 666 temp = monitor->header(); 667 assert(temp->is_neutral(), "invariant"); 668 hash = temp->hash(); 669 if (hash) { 670 return hash; 671 } 672 // Skip to the following code to reduce code size 673 } else if (Self->is_lock_owned((address)mark->locker())) { 674 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 675 assert(temp->is_neutral(), "invariant"); 676 hash = temp->hash(); // by current thread, check if the displaced 677 if (hash) { // header contains hash code 678 return hash; 679 } 680 // WARNING: 681 // The displaced header is strictly immutable. 682 // It can NOT be changed in ANY cases. So we have 683 // to inflate the header into heavyweight monitor 684 // even the current thread owns the lock. The reason 685 // is the BasicLock (stack slot) will be asynchronously 686 // read by other threads during the inflate() function. 687 // Any change to stack may not propagate to other threads 688 // correctly. 689 } 690 691 // Inflate the monitor to set hash code 692 monitor = ObjectSynchronizer::inflate(Self, obj); 693 // Load displaced header and check it has hash code 694 mark = monitor->header(); 695 assert(mark->is_neutral(), "invariant"); 696 hash = mark->hash(); 697 if (hash == 0) { 698 hash = get_next_hash(Self, obj); 699 temp = mark->copy_set_hash(hash); // merge hash code into header 700 assert(temp->is_neutral(), "invariant"); 701 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 702 if (test != mark) { 703 // The only update to the header in the monitor (outside GC) 704 // is install the hash code. If someone add new usage of 705 // displaced header, please update this code 706 hash = test->hash(); 707 assert(test->is_neutral(), "invariant"); 708 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 709 } 710 } 711 // We finally get the hash 712 return hash; 713 } 714 715 // Deprecated -- use FastHashCode() instead. 716 717 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 718 return FastHashCode(Thread::current(), obj()); 719 } 720 721 722 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 723 Handle h_obj) { 724 if (UseBiasedLocking) { 725 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 726 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 727 } 728 729 assert(thread == JavaThread::current(), "Can only be called on current thread"); 730 oop obj = h_obj(); 731 732 markOop mark = ReadStableMark(obj); 733 734 // Uncontended case, header points to stack 735 if (mark->has_locker()) { 736 return thread->is_lock_owned((address)mark->locker()); 737 } 738 // Contended case, header points to ObjectMonitor (tagged pointer) 739 if (mark->has_monitor()) { 740 ObjectMonitor* monitor = mark->monitor(); 741 return monitor->is_entered(thread) != 0; 742 } 743 // Unlocked case, header in place 744 assert(mark->is_neutral(), "sanity check"); 745 return false; 746 } 747 748 // Be aware of this method could revoke bias of the lock object. 749 // This method queries the ownership of the lock handle specified by 'h_obj'. 750 // If the current thread owns the lock, it returns owner_self. If no 751 // thread owns the lock, it returns owner_none. Otherwise, it will return 752 // owner_other. 753 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 754 (JavaThread *self, Handle h_obj) { 755 // The caller must beware this method can revoke bias, and 756 // revocation can result in a safepoint. 757 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 758 assert(self->thread_state() != _thread_blocked, "invariant"); 759 760 // Possible mark states: neutral, biased, stack-locked, inflated 761 762 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 763 // CASE: biased 764 BiasedLocking::revoke_and_rebias(h_obj, false, self); 765 assert(!h_obj->mark()->has_bias_pattern(), 766 "biases should be revoked by now"); 767 } 768 769 assert(self == JavaThread::current(), "Can only be called on current thread"); 770 oop obj = h_obj(); 771 markOop mark = ReadStableMark(obj); 772 773 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 774 if (mark->has_locker()) { 775 return self->is_lock_owned((address)mark->locker()) ? 776 owner_self : owner_other; 777 } 778 779 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 780 // The Object:ObjectMonitor relationship is stable as long as we're 781 // not at a safepoint. 782 if (mark->has_monitor()) { 783 void * owner = mark->monitor()->_owner; 784 if (owner == NULL) return owner_none; 785 return (owner == self || 786 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 787 } 788 789 // CASE: neutral 790 assert(mark->is_neutral(), "sanity check"); 791 return owner_none; // it's unlocked 792 } 793 794 // FIXME: jvmti should call this 795 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 796 if (UseBiasedLocking) { 797 if (SafepointSynchronize::is_at_safepoint()) { 798 BiasedLocking::revoke_at_safepoint(h_obj); 799 } else { 800 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 801 } 802 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 803 } 804 805 oop obj = h_obj(); 806 address owner = NULL; 807 808 markOop mark = ReadStableMark(obj); 809 810 // Uncontended case, header points to stack 811 if (mark->has_locker()) { 812 owner = (address) mark->locker(); 813 } 814 815 // Contended case, header points to ObjectMonitor (tagged pointer) 816 if (mark->has_monitor()) { 817 ObjectMonitor* monitor = mark->monitor(); 818 assert(monitor != NULL, "monitor should be non-null"); 819 owner = (address) monitor->owner(); 820 } 821 822 if (owner != NULL) { 823 // owning_thread_from_monitor_owner() may also return NULL here 824 return Threads::owning_thread_from_monitor_owner(owner, doLock); 825 } 826 827 // Unlocked case, header in place 828 // Cannot have assertion since this object may have been 829 // locked by another thread when reaching here. 830 // assert(mark->is_neutral(), "sanity check"); 831 832 return NULL; 833 } 834 // Visitors ... 835 836 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 837 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 838 ObjectMonitor* mid; 839 while (block) { 840 assert(block->object() == CHAINMARKER, "must be a block header"); 841 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 842 mid = (ObjectMonitor *)(block + i); 843 oop object = (oop) mid->object(); 844 if (object != NULL) { 845 closure->do_monitor(mid); 846 } 847 } 848 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 849 } 850 } 851 852 // Get the next block in the block list. 853 static inline ObjectMonitor* next(ObjectMonitor* block) { 854 assert(block->object() == CHAINMARKER, "must be a block header"); 855 block = block->FreeNext; 856 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 857 return block; 858 } 859 860 861 void ObjectSynchronizer::oops_do(OopClosure* f) { 862 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 863 for (PaddedEnd<ObjectMonitor> * block = 864 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 865 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 866 assert(block->object() == CHAINMARKER, "must be a block header"); 867 for (int i = 1; i < _BLOCKSIZE; i++) { 868 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 869 if (mid->object() != NULL) { 870 f->do_oop((oop*)mid->object_addr()); 871 } 872 } 873 } 874 } 875 876 877 // ----------------------------------------------------------------------------- 878 // ObjectMonitor Lifecycle 879 // ----------------------- 880 // Inflation unlinks monitors from the global gFreeList and 881 // associates them with objects. Deflation -- which occurs at 882 // STW-time -- disassociates idle monitors from objects. Such 883 // scavenged monitors are returned to the gFreeList. 884 // 885 // The global list is protected by ListLock. All the critical sections 886 // are short and operate in constant-time. 887 // 888 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 889 // 890 // Lifecycle: 891 // -- unassigned and on the global free list 892 // -- unassigned and on a thread's private omFreeList 893 // -- assigned to an object. The object is inflated and the mark refers 894 // to the objectmonitor. 895 896 897 // Constraining monitor pool growth via MonitorBound ... 898 // 899 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 900 // the rate of scavenging is driven primarily by GC. As such, we can find 901 // an inordinate number of monitors in circulation. 902 // To avoid that scenario we can artificially induce a STW safepoint 903 // if the pool appears to be growing past some reasonable bound. 904 // Generally we favor time in space-time tradeoffs, but as there's no 905 // natural back-pressure on the # of extant monitors we need to impose some 906 // type of limit. Beware that if MonitorBound is set to too low a value 907 // we could just loop. In addition, if MonitorBound is set to a low value 908 // we'll incur more safepoints, which are harmful to performance. 909 // See also: GuaranteedSafepointInterval 910 // 911 // The current implementation uses asynchronous VM operations. 912 913 static void InduceScavenge(Thread * Self, const char * Whence) { 914 // Induce STW safepoint to trim monitors 915 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 916 // More precisely, trigger an asynchronous STW safepoint as the number 917 // of active monitors passes the specified threshold. 918 // TODO: assert thread state is reasonable 919 920 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 921 if (ObjectMonitor::Knob_Verbose) { 922 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; 923 ::fflush(stdout); 924 } 925 // Induce a 'null' safepoint to scavenge monitors 926 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 927 // to the VMthread and have a lifespan longer than that of this activation record. 928 // The VMThread will delete the op when completed. 929 VMThread::execute(new VM_ForceAsyncSafepoint()); 930 931 if (ObjectMonitor::Knob_Verbose) { 932 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 933 ::fflush(stdout); 934 } 935 } 936 } 937 938 void ObjectSynchronizer::verifyInUse(Thread *Self) { 939 ObjectMonitor* mid; 940 int inusetally = 0; 941 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 942 inusetally++; 943 } 944 assert(inusetally == Self->omInUseCount, "in use count off"); 945 946 int freetally = 0; 947 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 948 freetally++; 949 } 950 assert(freetally == Self->omFreeCount, "free count off"); 951 } 952 953 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { 954 // A large MAXPRIVATE value reduces both list lock contention 955 // and list coherency traffic, but also tends to increase the 956 // number of objectMonitors in circulation as well as the STW 957 // scavenge costs. As usual, we lean toward time in space-time 958 // tradeoffs. 959 const int MAXPRIVATE = 1024; 960 for (;;) { 961 ObjectMonitor * m; 962 963 // 1: try to allocate from the thread's local omFreeList. 964 // Threads will attempt to allocate first from their local list, then 965 // from the global list, and only after those attempts fail will the thread 966 // attempt to instantiate new monitors. Thread-local free lists take 967 // heat off the ListLock and improve allocation latency, as well as reducing 968 // coherency traffic on the shared global list. 969 m = Self->omFreeList; 970 if (m != NULL) { 971 Self->omFreeList = m->FreeNext; 972 Self->omFreeCount--; 973 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 974 guarantee(m->object() == NULL, "invariant"); 975 if (MonitorInUseLists) { 976 m->FreeNext = Self->omInUseList; 977 Self->omInUseList = m; 978 Self->omInUseCount++; 979 if (ObjectMonitor::Knob_VerifyInUse) { 980 verifyInUse(Self); 981 } 982 } else { 983 m->FreeNext = NULL; 984 } 985 return m; 986 } 987 988 // 2: try to allocate from the global gFreeList 989 // CONSIDER: use muxTry() instead of muxAcquire(). 990 // If the muxTry() fails then drop immediately into case 3. 991 // If we're using thread-local free lists then try 992 // to reprovision the caller's free list. 993 if (gFreeList != NULL) { 994 // Reprovision the thread's omFreeList. 995 // Use bulk transfers to reduce the allocation rate and heat 996 // on various locks. 997 Thread::muxAcquire(&ListLock, "omAlloc"); 998 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 999 MonitorFreeCount--; 1000 ObjectMonitor * take = gFreeList; 1001 gFreeList = take->FreeNext; 1002 guarantee(take->object() == NULL, "invariant"); 1003 guarantee(!take->is_busy(), "invariant"); 1004 take->Recycle(); 1005 omRelease(Self, take, false); 1006 } 1007 Thread::muxRelease(&ListLock); 1008 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1009 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1010 TEVENT(omFirst - reprovision); 1011 1012 const int mx = MonitorBound; 1013 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) { 1014 // We can't safely induce a STW safepoint from omAlloc() as our thread 1015 // state may not be appropriate for such activities and callers may hold 1016 // naked oops, so instead we defer the action. 1017 InduceScavenge(Self, "omAlloc"); 1018 } 1019 continue; 1020 } 1021 1022 // 3: allocate a block of new ObjectMonitors 1023 // Both the local and global free lists are empty -- resort to malloc(). 1024 // In the current implementation objectMonitors are TSM - immortal. 1025 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1026 // each ObjectMonitor to start at the beginning of a cache line, 1027 // so we use align_size_up(). 1028 // A better solution would be to use C++ placement-new. 1029 // BEWARE: As it stands currently, we don't run the ctors! 1030 assert(_BLOCKSIZE > 1, "invariant"); 1031 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1032 PaddedEnd<ObjectMonitor> * temp; 1033 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1034 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1035 mtInternal); 1036 temp = (PaddedEnd<ObjectMonitor> *) 1037 align_size_up((intptr_t)real_malloc_addr, 1038 DEFAULT_CACHE_LINE_SIZE); 1039 1040 // NOTE: (almost) no way to recover if allocation failed. 1041 // We might be able to induce a STW safepoint and scavenge enough 1042 // objectMonitors to permit progress. 1043 if (temp == NULL) { 1044 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1045 "Allocate ObjectMonitors"); 1046 } 1047 (void)memset((void *) temp, 0, neededsize); 1048 1049 // Format the block. 1050 // initialize the linked list, each monitor points to its next 1051 // forming the single linked free list, the very first monitor 1052 // will points to next block, which forms the block list. 1053 // The trick of using the 1st element in the block as gBlockList 1054 // linkage should be reconsidered. A better implementation would 1055 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1056 1057 for (int i = 1; i < _BLOCKSIZE; i++) { 1058 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1059 } 1060 1061 // terminate the last monitor as the end of list 1062 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1063 1064 // Element [0] is reserved for global list linkage 1065 temp[0].set_object(CHAINMARKER); 1066 1067 // Consider carving out this thread's current request from the 1068 // block in hand. This avoids some lock traffic and redundant 1069 // list activity. 1070 1071 // Acquire the ListLock to manipulate BlockList and FreeList. 1072 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1073 Thread::muxAcquire(&ListLock, "omAlloc [2]"); 1074 MonitorPopulation += _BLOCKSIZE-1; 1075 MonitorFreeCount += _BLOCKSIZE-1; 1076 1077 // Add the new block to the list of extant blocks (gBlockList). 1078 // The very first objectMonitor in a block is reserved and dedicated. 1079 // It serves as blocklist "next" linkage. 1080 temp[0].FreeNext = gBlockList; 1081 gBlockList = temp; 1082 1083 // Add the new string of objectMonitors to the global free list 1084 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1085 gFreeList = temp + 1; 1086 Thread::muxRelease(&ListLock); 1087 TEVENT(Allocate block of monitors); 1088 } 1089 } 1090 1091 // Place "m" on the caller's private per-thread omFreeList. 1092 // In practice there's no need to clamp or limit the number of 1093 // monitors on a thread's omFreeList as the only time we'll call 1094 // omRelease is to return a monitor to the free list after a CAS 1095 // attempt failed. This doesn't allow unbounded #s of monitors to 1096 // accumulate on a thread's free list. 1097 // 1098 // Key constraint: all ObjectMonitors on a thread's free list and the global 1099 // free list must have their object field set to null. This prevents the 1100 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1101 1102 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1103 bool fromPerThreadAlloc) { 1104 guarantee(m->object() == NULL, "invariant"); 1105 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in use monitor"); 1106 // Remove from omInUseList 1107 if (MonitorInUseLists && fromPerThreadAlloc) { 1108 ObjectMonitor* curmidinuse = NULL; 1109 bool extracted = false; 1110 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; curmidinuse = mid, mid = mid->FreeNext) { 1111 if (m == mid) { 1112 // extract from per-thread in-use-list 1113 if (mid == Self->omInUseList) { 1114 Self->omInUseList = mid->FreeNext; 1115 } else if (curmidinuse != NULL) { 1116 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1117 } 1118 extracted = true; 1119 Self->omInUseCount--; 1120 if (ObjectMonitor::Knob_VerifyInUse) { 1121 verifyInUse(Self); 1122 } 1123 break; 1124 } 1125 } 1126 assert(extracted, "Should have extracted from in use list"); 1127 } 1128 1129 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1130 m->FreeNext = Self->omFreeList; 1131 Self->omFreeList = m; 1132 Self->omFreeCount++; 1133 } 1134 1135 // Return the monitors of a moribund thread's local free list to 1136 // the global free list. Typically a thread calls omFlush() when 1137 // it's dying. We could also consider having the VM thread steal 1138 // monitors from threads that have not run java code over a few 1139 // consecutive STW safepoints. Relatedly, we might decay 1140 // omFreeProvision at STW safepoints. 1141 // 1142 // Also return the monitors of a moribund thread's omInUseList to 1143 // a global gOmInUseList under the global list lock so these 1144 // will continue to be scanned. 1145 // 1146 // We currently call omFlush() from the Thread:: dtor _after the thread 1147 // has been excised from the thread list and is no longer a mutator. 1148 // That means that omFlush() can run concurrently with a safepoint and 1149 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1150 // be a better choice as we could safely reason that that the JVM is 1151 // not at a safepoint at the time of the call, and thus there could 1152 // be not inopportune interleavings between omFlush() and the scavenge 1153 // operator. 1154 1155 void ObjectSynchronizer::omFlush(Thread * Self) { 1156 ObjectMonitor * List = Self->omFreeList; // Null-terminated SLL 1157 Self->omFreeList = NULL; 1158 ObjectMonitor * Tail = NULL; 1159 int Tally = 0; 1160 if (List != NULL) { 1161 ObjectMonitor * s; 1162 // The thread is going away, the per-thread free monitors 1163 // are freed via set_owner(NULL) 1164 // Link them to Tail, which will be linked into the global free list 1165 // gFreeList below, under the ListLock 1166 for (s = List; s != NULL; s = s->FreeNext) { 1167 Tally++; 1168 Tail = s; 1169 guarantee(s->object() == NULL, "invariant"); 1170 guarantee(!s->is_busy(), "invariant"); 1171 s->set_owner(NULL); // redundant but good hygiene 1172 TEVENT(omFlush - Move one); 1173 } 1174 guarantee(Tail != NULL && List != NULL, "invariant"); 1175 } 1176 1177 ObjectMonitor * InUseList = Self->omInUseList; 1178 ObjectMonitor * InUseTail = NULL; 1179 int InUseTally = 0; 1180 if (InUseList != NULL) { 1181 Self->omInUseList = NULL; 1182 ObjectMonitor *curom; 1183 // The thread is going away, however the omInUseList inflated 1184 // monitors may still be in use by other threads. 1185 // Link them to InUseTail, which will be linked into the global in use list 1186 // gOmInUseList below, under the ListLock 1187 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { 1188 InUseTail = curom; 1189 InUseTally++; 1190 } 1191 assert(Self->omInUseCount == InUseTally, "in use count off"); 1192 Self->omInUseCount = 0; 1193 guarantee(InUseTail != NULL && InUseList != NULL, "invariant"); 1194 } 1195 1196 Thread::muxAcquire(&ListLock, "omFlush"); 1197 if (Tail != NULL) { 1198 Tail->FreeNext = gFreeList; 1199 gFreeList = List; 1200 MonitorFreeCount += Tally; 1201 } 1202 1203 if (InUseTail != NULL) { 1204 InUseTail->FreeNext = gOmInUseList; 1205 gOmInUseList = InUseList; 1206 gOmInUseCount += InUseTally; 1207 } 1208 1209 Thread::muxRelease(&ListLock); 1210 TEVENT(omFlush); 1211 } 1212 1213 // Fast path code shared by multiple functions 1214 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1215 markOop mark = obj->mark(); 1216 if (mark->has_monitor()) { 1217 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1218 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1219 return mark->monitor(); 1220 } 1221 return ObjectSynchronizer::inflate(Thread::current(), obj); 1222 } 1223 1224 1225 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, 1226 oop object) { 1227 // Inflate mutates the heap ... 1228 // Relaxing assertion for bug 6320749. 1229 assert(Universe::verify_in_progress() || 1230 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1231 1232 for (;;) { 1233 const markOop mark = object->mark(); 1234 assert(!mark->has_bias_pattern(), "invariant"); 1235 1236 // The mark can be in one of the following states: 1237 // * Inflated - just return 1238 // * Stack-locked - coerce it to inflated 1239 // * INFLATING - busy wait for conversion to complete 1240 // * Neutral - aggressively inflate the object. 1241 // * BIASED - Illegal. We should never see this 1242 1243 // CASE: inflated 1244 if (mark->has_monitor()) { 1245 ObjectMonitor * inf = mark->monitor(); 1246 assert(inf->header()->is_neutral(), "invariant"); 1247 assert(inf->object() == object, "invariant"); 1248 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1249 return inf; 1250 } 1251 1252 // CASE: inflation in progress - inflating over a stack-lock. 1253 // Some other thread is converting from stack-locked to inflated. 1254 // Only that thread can complete inflation -- other threads must wait. 1255 // The INFLATING value is transient. 1256 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1257 // We could always eliminate polling by parking the thread on some auxiliary list. 1258 if (mark == markOopDesc::INFLATING()) { 1259 TEVENT(Inflate: spin while INFLATING); 1260 ReadStableMark(object); 1261 continue; 1262 } 1263 1264 // CASE: stack-locked 1265 // Could be stack-locked either by this thread or by some other thread. 1266 // 1267 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1268 // to install INFLATING into the mark word. We originally installed INFLATING, 1269 // allocated the objectmonitor, and then finally STed the address of the 1270 // objectmonitor into the mark. This was correct, but artificially lengthened 1271 // the interval in which INFLATED appeared in the mark, thus increasing 1272 // the odds of inflation contention. 1273 // 1274 // We now use per-thread private objectmonitor free lists. 1275 // These list are reprovisioned from the global free list outside the 1276 // critical INFLATING...ST interval. A thread can transfer 1277 // multiple objectmonitors en-mass from the global free list to its local free list. 1278 // This reduces coherency traffic and lock contention on the global free list. 1279 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1280 // before or after the CAS(INFLATING) operation. 1281 // See the comments in omAlloc(). 1282 1283 if (mark->has_locker()) { 1284 ObjectMonitor * m = omAlloc(Self); 1285 // Optimistically prepare the objectmonitor - anticipate successful CAS 1286 // We do this before the CAS in order to minimize the length of time 1287 // in which INFLATING appears in the mark. 1288 m->Recycle(); 1289 m->_Responsible = NULL; 1290 m->_recursions = 0; 1291 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1292 1293 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1294 if (cmp != mark) { 1295 omRelease(Self, m, true); 1296 continue; // Interference -- just retry 1297 } 1298 1299 // We've successfully installed INFLATING (0) into the mark-word. 1300 // This is the only case where 0 will appear in a mark-work. 1301 // Only the singular thread that successfully swings the mark-word 1302 // to 0 can perform (or more precisely, complete) inflation. 1303 // 1304 // Why do we CAS a 0 into the mark-word instead of just CASing the 1305 // mark-word from the stack-locked value directly to the new inflated state? 1306 // Consider what happens when a thread unlocks a stack-locked object. 1307 // It attempts to use CAS to swing the displaced header value from the 1308 // on-stack basiclock back into the object header. Recall also that the 1309 // header value (hashcode, etc) can reside in (a) the object header, or 1310 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1311 // header in an objectMonitor. The inflate() routine must copy the header 1312 // value from the basiclock on the owner's stack to the objectMonitor, all 1313 // the while preserving the hashCode stability invariants. If the owner 1314 // decides to release the lock while the value is 0, the unlock will fail 1315 // and control will eventually pass from slow_exit() to inflate. The owner 1316 // will then spin, waiting for the 0 value to disappear. Put another way, 1317 // the 0 causes the owner to stall if the owner happens to try to 1318 // drop the lock (restoring the header from the basiclock to the object) 1319 // while inflation is in-progress. This protocol avoids races that might 1320 // would otherwise permit hashCode values to change or "flicker" for an object. 1321 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1322 // 0 serves as a "BUSY" inflate-in-progress indicator. 1323 1324 1325 // fetch the displaced mark from the owner's stack. 1326 // The owner can't die or unwind past the lock while our INFLATING 1327 // object is in the mark. Furthermore the owner can't complete 1328 // an unlock on the object, either. 1329 markOop dmw = mark->displaced_mark_helper(); 1330 assert(dmw->is_neutral(), "invariant"); 1331 1332 // Setup monitor fields to proper values -- prepare the monitor 1333 m->set_header(dmw); 1334 1335 // Optimization: if the mark->locker stack address is associated 1336 // with this thread we could simply set m->_owner = Self. 1337 // Note that a thread can inflate an object 1338 // that it has stack-locked -- as might happen in wait() -- directly 1339 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1340 m->set_owner(mark->locker()); 1341 m->set_object(object); 1342 // TODO-FIXME: assert BasicLock->dhw != 0. 1343 1344 // Must preserve store ordering. The monitor state must 1345 // be stable at the time of publishing the monitor address. 1346 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1347 object->release_set_mark(markOopDesc::encode(m)); 1348 1349 // Hopefully the performance counters are allocated on distinct cache lines 1350 // to avoid false sharing on MP systems ... 1351 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1352 TEVENT(Inflate: overwrite stacklock); 1353 if (TraceMonitorInflation) { 1354 if (object->is_instance()) { 1355 ResourceMark rm; 1356 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1357 (void *) object, (intptr_t) object->mark(), 1358 object->klass()->external_name()); 1359 } 1360 } 1361 return m; 1362 } 1363 1364 // CASE: neutral 1365 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1366 // If we know we're inflating for entry it's better to inflate by swinging a 1367 // pre-locked objectMonitor pointer into the object header. A successful 1368 // CAS inflates the object *and* confers ownership to the inflating thread. 1369 // In the current implementation we use a 2-step mechanism where we CAS() 1370 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1371 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1372 // would be useful. 1373 1374 assert(mark->is_neutral(), "invariant"); 1375 ObjectMonitor * m = omAlloc(Self); 1376 // prepare m for installation - set monitor to initial state 1377 m->Recycle(); 1378 m->set_header(mark); 1379 m->set_owner(NULL); 1380 m->set_object(object); 1381 m->_recursions = 0; 1382 m->_Responsible = NULL; 1383 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1384 1385 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1386 m->set_object(NULL); 1387 m->set_owner(NULL); 1388 m->Recycle(); 1389 omRelease(Self, m, true); 1390 m = NULL; 1391 continue; 1392 // interference - the markword changed - just retry. 1393 // The state-transitions are one-way, so there's no chance of 1394 // live-lock -- "Inflated" is an absorbing state. 1395 } 1396 1397 // Hopefully the performance counters are allocated on distinct 1398 // cache lines to avoid false sharing on MP systems ... 1399 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1400 TEVENT(Inflate: overwrite neutral); 1401 if (TraceMonitorInflation) { 1402 if (object->is_instance()) { 1403 ResourceMark rm; 1404 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1405 (void *) object, (intptr_t) object->mark(), 1406 object->klass()->external_name()); 1407 } 1408 } 1409 return m; 1410 } 1411 } 1412 1413 1414 // Deflate_idle_monitors() is called at all safepoints, immediately 1415 // after all mutators are stopped, but before any objects have moved. 1416 // It traverses the list of known monitors, deflating where possible. 1417 // The scavenged monitor are returned to the monitor free list. 1418 // 1419 // Beware that we scavenge at *every* stop-the-world point. 1420 // Having a large number of monitors in-circulation negatively 1421 // impacts the performance of some applications (e.g., PointBase). 1422 // Broadly, we want to minimize the # of monitors in circulation. 1423 // 1424 // We have added a flag, MonitorInUseLists, which creates a list 1425 // of active monitors for each thread. deflate_idle_monitors() 1426 // only scans the per-thread in use lists. omAlloc() puts all 1427 // assigned monitors on the per-thread list. deflate_idle_monitors() 1428 // returns the non-busy monitors to the global free list. 1429 // When a thread dies, omFlush() adds the list of active monitors for 1430 // that thread to a global gOmInUseList acquiring the 1431 // global list lock. deflate_idle_monitors() acquires the global 1432 // list lock to scan for non-busy monitors to the global free list. 1433 // An alternative could have used a single global in use list. The 1434 // downside would have been the additional cost of acquiring the global list lock 1435 // for every omAlloc(). 1436 // 1437 // Perversely, the heap size -- and thus the STW safepoint rate -- 1438 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1439 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1440 // This is an unfortunate aspect of this design. 1441 1442 enum ManifestConstants { 1443 ClearResponsibleAtSTW = 0, 1444 MaximumRecheckInterval = 1000 1445 }; 1446 1447 // Deflate a single monitor if not in use 1448 // Return true if deflated, false if in use 1449 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1450 ObjectMonitor** freeHeadp, 1451 ObjectMonitor** freeTailp) { 1452 bool deflated; 1453 // Normal case ... The monitor is associated with obj. 1454 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1455 guarantee(mid == obj->mark()->monitor(), "invariant"); 1456 guarantee(mid->header()->is_neutral(), "invariant"); 1457 1458 if (mid->is_busy()) { 1459 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1460 deflated = false; 1461 } else { 1462 // Deflate the monitor if it is no longer being used 1463 // It's idle - scavenge and return to the global free list 1464 // plain old deflation ... 1465 TEVENT(deflate_idle_monitors - scavenge1); 1466 if (TraceMonitorInflation) { 1467 if (obj->is_instance()) { 1468 ResourceMark rm; 1469 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1470 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); 1471 } 1472 } 1473 1474 // Restore the header back to obj 1475 obj->release_set_mark(mid->header()); 1476 mid->clear(); 1477 1478 assert(mid->object() == NULL, "invariant"); 1479 1480 // Move the object to the working free list defined by FreeHead,FreeTail. 1481 if (*freeHeadp == NULL) *freeHeadp = mid; 1482 if (*freeTailp != NULL) { 1483 ObjectMonitor * prevtail = *freeTailp; 1484 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK 1485 prevtail->FreeNext = mid; 1486 } 1487 *freeTailp = mid; 1488 deflated = true; 1489 } 1490 return deflated; 1491 } 1492 1493 // Walk a given monitor list, and deflate idle monitors 1494 // The given list could be a per-thread list or a global list 1495 // Caller acquires ListLock 1496 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listheadp, 1497 ObjectMonitor** freeHeadp, 1498 ObjectMonitor** freeTailp) { 1499 ObjectMonitor* mid; 1500 ObjectMonitor* next; 1501 ObjectMonitor* curmidinuse = NULL; 1502 int deflatedcount = 0; 1503 1504 for (mid = *listheadp; mid != NULL;) { 1505 oop obj = (oop) mid->object(); 1506 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1507 // if deflate_monitor succeeded,/moribund 1508 // extract from per-thread in-use-list 1509 if (mid == *listheadp) { 1510 *listheadp = mid->FreeNext; 1511 } else if (curmidinuse != NULL) { 1512 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1513 } 1514 next = mid->FreeNext; 1515 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list 1516 mid = next; 1517 deflatedcount++; 1518 } else { 1519 curmidinuse = mid; 1520 mid = mid->FreeNext; 1521 } 1522 } 1523 return deflatedcount; 1524 } 1525 1526 void ObjectSynchronizer::deflate_idle_monitors() { 1527 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1528 int nInuse = 0; // currently associated with objects 1529 int nInCirculation = 0; // extant 1530 int nScavenged = 0; // reclaimed 1531 bool deflated = false; 1532 1533 ObjectMonitor * FreeHead = NULL; // Local SLL of scavenged monitors 1534 ObjectMonitor * FreeTail = NULL; 1535 1536 TEVENT(deflate_idle_monitors); 1537 // Prevent omFlush from changing mids in Thread dtor's during deflation 1538 // And in case the vm thread is acquiring a lock during a safepoint 1539 // See e.g. 6320749 1540 Thread::muxAcquire(&ListLock, "scavenge - return"); 1541 1542 if (MonitorInUseLists) { 1543 int inUse = 0; 1544 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1545 nInCirculation+= cur->omInUseCount; 1546 int deflatedcount = deflate_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); 1547 cur->omInUseCount-= deflatedcount; 1548 if (ObjectMonitor::Knob_VerifyInUse) { 1549 verifyInUse(cur); 1550 } 1551 nScavenged += deflatedcount; 1552 nInuse += cur->omInUseCount; 1553 } 1554 1555 // For moribund threads, scan gOmInUseList 1556 if (gOmInUseList) { 1557 nInCirculation += gOmInUseCount; 1558 int deflatedcount = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail); 1559 gOmInUseCount-= deflatedcount; 1560 nScavenged += deflatedcount; 1561 nInuse += gOmInUseCount; 1562 } 1563 1564 } else for (PaddedEnd<ObjectMonitor> * block = 1565 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 1566 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1567 // Iterate over all extant monitors - Scavenge all idle monitors. 1568 assert(block->object() == CHAINMARKER, "must be a block header"); 1569 nInCirculation += _BLOCKSIZE; 1570 for (int i = 1; i < _BLOCKSIZE; i++) { 1571 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1572 oop obj = (oop) mid->object(); 1573 1574 if (obj == NULL) { 1575 // The monitor is not associated with an object. 1576 // The monitor should either be a thread-specific private 1577 // free list or the global free list. 1578 // obj == NULL IMPLIES mid->is_busy() == 0 1579 guarantee(!mid->is_busy(), "invariant"); 1580 continue; 1581 } 1582 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail); 1583 1584 if (deflated) { 1585 mid->FreeNext = NULL; 1586 nScavenged++; 1587 } else { 1588 nInuse++; 1589 } 1590 } 1591 } 1592 1593 MonitorFreeCount += nScavenged; 1594 1595 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree. 1596 1597 if (ObjectMonitor::Knob_Verbose) { 1598 ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", 1599 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1600 MonitorPopulation, MonitorFreeCount); 1601 ::fflush(stdout); 1602 } 1603 1604 ForceMonitorScavenge = 0; // Reset 1605 1606 // Move the scavenged monitors back to the global free list. 1607 if (FreeHead != NULL) { 1608 guarantee(FreeTail != NULL && nScavenged > 0, "invariant"); 1609 assert(FreeTail->FreeNext == NULL, "invariant"); 1610 // constant-time list splice - prepend scavenged segment to gFreeList 1611 FreeTail->FreeNext = gFreeList; 1612 gFreeList = FreeHead; 1613 } 1614 Thread::muxRelease(&ListLock); 1615 1616 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged); 1617 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); 1618 1619 // TODO: Add objectMonitor leak detection. 1620 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1621 GVars.stwRandom = os::random(); 1622 GVars.stwCycle++; 1623 } 1624 1625 // Monitor cleanup on JavaThread::exit 1626 1627 // Iterate through monitor cache and attempt to release thread's monitors 1628 // Gives up on a particular monitor if an exception occurs, but continues 1629 // the overall iteration, swallowing the exception. 1630 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1631 private: 1632 TRAPS; 1633 1634 public: 1635 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1636 void do_monitor(ObjectMonitor* mid) { 1637 if (mid->owner() == THREAD) { 1638 (void)mid->complete_exit(CHECK); 1639 } 1640 } 1641 }; 1642 1643 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1644 // ignored. This is meant to be called during JNI thread detach which assumes 1645 // all remaining monitors are heavyweight. All exceptions are swallowed. 1646 // Scanning the extant monitor list can be time consuming. 1647 // A simple optimization is to add a per-thread flag that indicates a thread 1648 // called jni_monitorenter() during its lifetime. 1649 // 1650 // Instead of No_Savepoint_Verifier it might be cheaper to 1651 // use an idiom of the form: 1652 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1653 // <code that must not run at safepoint> 1654 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1655 // Since the tests are extremely cheap we could leave them enabled 1656 // for normal product builds. 1657 1658 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1659 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1660 No_Safepoint_Verifier nsv; 1661 ReleaseJavaMonitorsClosure rjmc(THREAD); 1662 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); 1663 ObjectSynchronizer::monitors_iterate(&rjmc); 1664 Thread::muxRelease(&ListLock); 1665 THREAD->clear_pending_exception(); 1666 } 1667 1668 //------------------------------------------------------------------------------ 1669 // Debugging code 1670 1671 void ObjectSynchronizer::sanity_checks(const bool verbose, 1672 const uint cache_line_size, 1673 int *error_cnt_ptr, 1674 int *warning_cnt_ptr) { 1675 u_char *addr_begin = (u_char*)&GVars; 1676 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1677 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1678 1679 if (verbose) { 1680 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1681 sizeof(SharedGlobals)); 1682 } 1683 1684 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1685 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1686 1687 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1688 if (verbose) { 1689 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1690 } 1691 1692 if (cache_line_size != 0) { 1693 // We were able to determine the L1 data cache line size so 1694 // do some cache line specific sanity checks 1695 1696 if (offset_stwRandom < cache_line_size) { 1697 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1698 "to the struct beginning than a cache line which permits " 1699 "false sharing."); 1700 (*warning_cnt_ptr)++; 1701 } 1702 1703 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1704 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1705 "SharedGlobals.hcSequence fields are closer than a cache " 1706 "line which permits false sharing."); 1707 (*warning_cnt_ptr)++; 1708 } 1709 1710 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1711 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1712 "to the struct end than a cache line which permits false " 1713 "sharing."); 1714 (*warning_cnt_ptr)++; 1715 } 1716 } 1717 } 1718 1719 #ifndef PRODUCT 1720 1721 // Verify all monitors in the monitor cache, the verification is weak. 1722 void ObjectSynchronizer::verify() { 1723 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1724 ObjectMonitor* mid; 1725 while (block) { 1726 assert(block->object() == CHAINMARKER, "must be a block header"); 1727 for (int i = 1; i < _BLOCKSIZE; i++) { 1728 mid = (ObjectMonitor *)(block + i); 1729 oop object = (oop) mid->object(); 1730 if (object != NULL) { 1731 mid->verify(); 1732 } 1733 } 1734 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1735 } 1736 } 1737 1738 // Check if monitor belongs to the monitor cache 1739 // The list is grow-only so it's *relatively* safe to traverse 1740 // the list of extant blocks without taking a lock. 1741 1742 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1743 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1744 1745 while (block) { 1746 assert(block->object() == CHAINMARKER, "must be a block header"); 1747 if (monitor > (ObjectMonitor *)&block[0] && 1748 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1749 address mon = (address) monitor; 1750 address blk = (address) block; 1751 size_t diff = mon - blk; 1752 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check"); 1753 return 1; 1754 } 1755 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1756 } 1757 return 0; 1758 } 1759 1760 #endif