1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "oops/markOop.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/biasedLocking.hpp" 31 #include "runtime/handles.inline.hpp" 32 #include "runtime/interfaceSupport.hpp" 33 #include "runtime/mutexLocker.hpp" 34 #include "runtime/objectMonitor.hpp" 35 #include "runtime/objectMonitor.inline.hpp" 36 #include "runtime/osThread.hpp" 37 #include "runtime/stubRoutines.hpp" 38 #include "runtime/synchronizer.hpp" 39 #include "runtime/thread.inline.hpp" 40 #include "utilities/dtrace.hpp" 41 #include "utilities/events.hpp" 42 #include "utilities/preserveException.hpp" 43 #ifdef TARGET_OS_FAMILY_linux 44 # include "os_linux.inline.hpp" 45 #endif 46 #ifdef TARGET_OS_FAMILY_solaris 47 # include "os_solaris.inline.hpp" 48 #endif 49 #ifdef TARGET_OS_FAMILY_windows 50 # include "os_windows.inline.hpp" 51 #endif 52 #ifdef TARGET_OS_FAMILY_bsd 53 # include "os_bsd.inline.hpp" 54 #endif 55 56 #if defined(__GNUC__) && !defined(PPC64) 57 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 58 #define ATTR __attribute__((noinline)) 59 #else 60 #define ATTR 61 #endif 62 63 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 64 65 // The "core" versions of monitor enter and exit reside in this file. 66 // The interpreter and compilers contain specialized transliterated 67 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 68 // for instance. If you make changes here, make sure to modify the 69 // interpreter, and both C1 and C2 fast-path inline locking code emission. 70 // 71 // 72 // ----------------------------------------------------------------------------- 73 74 #ifdef DTRACE_ENABLED 75 76 // Only bother with this argument setup if dtrace is available 77 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 78 79 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 80 char* bytes = NULL; \ 81 int len = 0; \ 82 jlong jtid = SharedRuntime::get_java_tid(thread); \ 83 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 84 if (klassname != NULL) { \ 85 bytes = (char*)klassname->bytes(); \ 86 len = klassname->utf8_length(); \ 87 } 88 89 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 90 { \ 91 if (DTraceMonitorProbes) { \ 92 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 93 HOTSPOT_MONITOR_WAIT(jtid, \ 94 (uintptr_t)(monitor), bytes, len, (millis)); \ 95 } \ 96 } 97 98 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 99 100 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 101 { \ 102 if (DTraceMonitorProbes) { \ 103 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 104 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 105 (uintptr_t)(monitor), bytes, len); \ 106 } \ 107 } 108 109 #else // ndef DTRACE_ENABLED 110 111 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 112 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 113 114 #endif // ndef DTRACE_ENABLED 115 116 // This exists only as a workaround of dtrace bug 6254741 117 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 118 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 119 return 0; 120 } 121 122 #define NINFLATIONLOCKS 256 123 static volatile intptr_t InflationLocks [NINFLATIONLOCKS] ; 124 125 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL ; 126 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL ; 127 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL ; 128 int ObjectSynchronizer::gOmInUseCount = 0; 129 static volatile intptr_t ListLock = 0 ; // protects global monitor free-list cache 130 static volatile int MonitorFreeCount = 0 ; // # on gFreeList 131 static volatile int MonitorPopulation = 0 ; // # Extant -- in circulation 132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 133 134 // ----------------------------------------------------------------------------- 135 // Fast Monitor Enter/Exit 136 // This the fast monitor enter. The interpreter and compiler use 137 // some assembly copies of this code. Make sure update those code 138 // if the following function is changed. The implementation is 139 // extremely sensitive to race condition. Be careful. 140 141 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { 142 if (UseBiasedLocking) { 143 if (!SafepointSynchronize::is_at_safepoint()) { 144 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 145 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 146 return; 147 } 148 } else { 149 assert(!attempt_rebias, "can not rebias toward VM thread"); 150 BiasedLocking::revoke_at_safepoint(obj); 151 } 152 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 153 } 154 155 slow_enter (obj, lock, THREAD) ; 156 } 157 158 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 159 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 160 // if displaced header is null, the previous enter is recursive enter, no-op 161 markOop dhw = lock->displaced_header(); 162 markOop mark ; 163 if (dhw == NULL) { 164 // Recursive stack-lock. 165 // Diagnostics -- Could be: stack-locked, inflating, inflated. 166 mark = object->mark() ; 167 assert (!mark->is_neutral(), "invariant") ; 168 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 169 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant") ; 170 } 171 if (mark->has_monitor()) { 172 ObjectMonitor * m = mark->monitor() ; 173 assert(((oop)(m->object()))->mark() == mark, "invariant") ; 174 assert(m->is_entered(THREAD), "invariant") ; 175 } 176 return ; 177 } 178 179 mark = object->mark() ; 180 181 // If the object is stack-locked by the current thread, try to 182 // swing the displaced header from the box back to the mark. 183 if (mark == (markOop) lock) { 184 assert (dhw->is_neutral(), "invariant") ; 185 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 186 TEVENT (fast_exit: release stacklock) ; 187 return; 188 } 189 } 190 191 ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ; 192 } 193 194 // ----------------------------------------------------------------------------- 195 // Interpreter/Compiler Slow Case 196 // This routine is used to handle interpreter/compiler slow case 197 // We don't need to use fast path here, because it must have been 198 // failed in the interpreter/compiler code. 199 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 200 markOop mark = obj->mark(); 201 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 202 203 if (mark->is_neutral()) { 204 // Anticipate successful CAS -- the ST of the displaced mark must 205 // be visible <= the ST performed by the CAS. 206 lock->set_displaced_header(mark); 207 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 208 TEVENT (slow_enter: release stacklock) ; 209 return ; 210 } 211 // Fall through to inflate() ... 212 } else 213 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 214 assert(lock != mark->locker(), "must not re-lock the same lock"); 215 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 216 lock->set_displaced_header(NULL); 217 return; 218 } 219 220 #if 0 221 // The following optimization isn't particularly useful. 222 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { 223 lock->set_displaced_header (NULL) ; 224 return ; 225 } 226 #endif 227 228 // The object header will never be displaced to this lock, 229 // so it does not matter what the value is, except that it 230 // must be non-zero to avoid looking like a re-entrant lock, 231 // and must not look locked either. 232 lock->set_displaced_header(markOopDesc::unused_mark()); 233 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 234 } 235 236 // This routine is used to handle interpreter/compiler slow case 237 // We don't need to use fast path here, because it must have 238 // failed in the interpreter/compiler code. Simply use the heavy 239 // weight monitor should be ok, unless someone find otherwise. 240 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 241 fast_exit (object, lock, THREAD) ; 242 } 243 244 // ----------------------------------------------------------------------------- 245 // Class Loader support to workaround deadlocks on the class loader lock objects 246 // Also used by GC 247 // complete_exit()/reenter() are used to wait on a nested lock 248 // i.e. to give up an outer lock completely and then re-enter 249 // Used when holding nested locks - lock acquisition order: lock1 then lock2 250 // 1) complete_exit lock1 - saving recursion count 251 // 2) wait on lock2 252 // 3) when notified on lock2, unlock lock2 253 // 4) reenter lock1 with original recursion count 254 // 5) lock lock2 255 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 256 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 257 TEVENT (complete_exit) ; 258 if (UseBiasedLocking) { 259 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 260 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 261 } 262 263 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 264 265 return monitor->complete_exit(THREAD); 266 } 267 268 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 269 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 270 TEVENT (reenter) ; 271 if (UseBiasedLocking) { 272 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 273 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 274 } 275 276 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 277 278 monitor->reenter(recursion, THREAD); 279 } 280 // ----------------------------------------------------------------------------- 281 // JNI locks on java objects 282 // NOTE: must use heavy weight monitor to handle jni monitor enter 283 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter 284 // the current locking is from JNI instead of Java code 285 TEVENT (jni_enter) ; 286 if (UseBiasedLocking) { 287 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 288 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 289 } 290 THREAD->set_current_pending_monitor_is_from_java(false); 291 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 292 THREAD->set_current_pending_monitor_is_from_java(true); 293 } 294 295 // NOTE: must use heavy weight monitor to handle jni monitor enter 296 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { 297 if (UseBiasedLocking) { 298 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 299 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 300 } 301 302 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); 303 return monitor->try_enter(THREAD); 304 } 305 306 307 // NOTE: must use heavy weight monitor to handle jni monitor exit 308 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 309 TEVENT (jni_exit) ; 310 if (UseBiasedLocking) { 311 Handle h_obj(THREAD, obj); 312 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 313 obj = h_obj(); 314 } 315 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 316 317 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 318 // If this thread has locked the object, exit the monitor. Note: can't use 319 // monitor->check(CHECK); must exit even if an exception is pending. 320 if (monitor->check(THREAD)) { 321 monitor->exit(true, THREAD); 322 } 323 } 324 325 // ----------------------------------------------------------------------------- 326 // Internal VM locks on java objects 327 // standard constructor, allows locking failures 328 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 329 _dolock = doLock; 330 _thread = thread; 331 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 332 _obj = obj; 333 334 if (_dolock) { 335 TEVENT (ObjectLocker) ; 336 337 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 338 } 339 } 340 341 ObjectLocker::~ObjectLocker() { 342 if (_dolock) { 343 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 344 } 345 } 346 347 348 // ----------------------------------------------------------------------------- 349 // Wait/Notify/NotifyAll 350 // NOTE: must use heavy weight monitor to handle wait() 351 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 352 if (UseBiasedLocking) { 353 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 354 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 355 } 356 if (millis < 0) { 357 TEVENT (wait - throw IAX) ; 358 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 359 } 360 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 361 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 362 monitor->wait(millis, true, THREAD); 363 364 // This dummy call is in place to get around dtrace bug 6254741. Once 365 // that's fixed we can uncomment the following line, remove the call 366 // and change this function back into a "void" func. 367 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 368 return dtrace_waited_probe(monitor, obj, THREAD); 369 } 370 371 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { 372 if (UseBiasedLocking) { 373 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 374 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 375 } 376 if (millis < 0) { 377 TEVENT (wait - throw IAX) ; 378 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 379 } 380 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD) ; 381 } 382 383 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 384 if (UseBiasedLocking) { 385 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 386 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 387 } 388 389 markOop mark = obj->mark(); 390 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 391 return; 392 } 393 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 394 } 395 396 // NOTE: see comment of notify() 397 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 398 if (UseBiasedLocking) { 399 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 400 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 401 } 402 403 markOop mark = obj->mark(); 404 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 405 return; 406 } 407 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 408 } 409 410 // ----------------------------------------------------------------------------- 411 // Hash Code handling 412 // 413 // Performance concern: 414 // OrderAccess::storestore() calls release() which STs 0 into the global volatile 415 // OrderAccess::Dummy variable. This store is unnecessary for correctness. 416 // Many threads STing into a common location causes considerable cache migration 417 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() 418 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local 419 // latency on the executing processor -- is a better choice as it scales on SMP 420 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a 421 // discussion of coherency costs. Note that all our current reference platforms 422 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. 423 // 424 // As a general policy we use "volatile" to control compiler-based reordering 425 // and explicit fences (barriers) to control for architectural reordering performed 426 // by the CPU(s) or platform. 427 428 struct SharedGlobals { 429 // These are highly shared mostly-read variables. 430 // To avoid false-sharing they need to be the sole occupants of a $ line. 431 double padPrefix [8]; 432 volatile int stwRandom ; 433 volatile int stwCycle ; 434 435 // Hot RW variables -- Sequester to avoid false-sharing 436 double padSuffix [16]; 437 volatile int hcSequence ; 438 double padFinal [8] ; 439 } ; 440 441 static SharedGlobals GVars ; 442 static int MonitorScavengeThreshold = 1000000 ; 443 static volatile int ForceMonitorScavenge = 0 ; // Scavenge required and pending 444 445 static markOop ReadStableMark (oop obj) { 446 markOop mark = obj->mark() ; 447 if (!mark->is_being_inflated()) { 448 return mark ; // normal fast-path return 449 } 450 451 int its = 0 ; 452 for (;;) { 453 markOop mark = obj->mark() ; 454 if (!mark->is_being_inflated()) { 455 return mark ; // normal fast-path return 456 } 457 458 // The object is being inflated by some other thread. 459 // The caller of ReadStableMark() must wait for inflation to complete. 460 // Avoid live-lock 461 // TODO: consider calling SafepointSynchronize::do_call_back() while 462 // spinning to see if there's a safepoint pending. If so, immediately 463 // yielding or blocking would be appropriate. Avoid spinning while 464 // there is a safepoint pending. 465 // TODO: add inflation contention performance counters. 466 // TODO: restrict the aggregate number of spinners. 467 468 ++its ; 469 if (its > 10000 || !os::is_MP()) { 470 if (its & 1) { 471 os::NakedYield() ; 472 TEVENT (Inflate: INFLATING - yield) ; 473 } else { 474 // Note that the following code attenuates the livelock problem but is not 475 // a complete remedy. A more complete solution would require that the inflating 476 // thread hold the associated inflation lock. The following code simply restricts 477 // the number of spinners to at most one. We'll have N-2 threads blocked 478 // on the inflationlock, 1 thread holding the inflation lock and using 479 // a yield/park strategy, and 1 thread in the midst of inflation. 480 // A more refined approach would be to change the encoding of INFLATING 481 // to allow encapsulation of a native thread pointer. Threads waiting for 482 // inflation to complete would use CAS to push themselves onto a singly linked 483 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 484 // and calling park(). When inflation was complete the thread that accomplished inflation 485 // would detach the list and set the markword to inflated with a single CAS and 486 // then for each thread on the list, set the flag and unpark() the thread. 487 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 488 // wakes at most one thread whereas we need to wake the entire list. 489 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ; 490 int YieldThenBlock = 0 ; 491 assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ; 492 assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ; 493 Thread::muxAcquire (InflationLocks + ix, "InflationLock") ; 494 while (obj->mark() == markOopDesc::INFLATING()) { 495 // Beware: NakedYield() is advisory and has almost no effect on some platforms 496 // so we periodically call Self->_ParkEvent->park(1). 497 // We use a mixed spin/yield/block mechanism. 498 if ((YieldThenBlock++) >= 16) { 499 Thread::current()->_ParkEvent->park(1) ; 500 } else { 501 os::NakedYield() ; 502 } 503 } 504 Thread::muxRelease (InflationLocks + ix ) ; 505 TEVENT (Inflate: INFLATING - yield/park) ; 506 } 507 } else { 508 SpinPause() ; // SMP-polite spinning 509 } 510 } 511 } 512 513 // hashCode() generation : 514 // 515 // Possibilities: 516 // * MD5Digest of {obj,stwRandom} 517 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 518 // * A DES- or AES-style SBox[] mechanism 519 // * One of the Phi-based schemes, such as: 520 // 2654435761 = 2^32 * Phi (golden ratio) 521 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 522 // * A variation of Marsaglia's shift-xor RNG scheme. 523 // * (obj ^ stwRandom) is appealing, but can result 524 // in undesirable regularity in the hashCode values of adjacent objects 525 // (objects allocated back-to-back, in particular). This could potentially 526 // result in hashtable collisions and reduced hashtable efficiency. 527 // There are simple ways to "diffuse" the middle address bits over the 528 // generated hashCode values: 529 // 530 531 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 532 intptr_t value = 0 ; 533 if (hashCode == 0) { 534 // This form uses an unguarded global Park-Miller RNG, 535 // so it's possible for two threads to race and generate the same RNG. 536 // On MP system we'll have lots of RW access to a global, so the 537 // mechanism induces lots of coherency traffic. 538 value = os::random() ; 539 } else 540 if (hashCode == 1) { 541 // This variation has the property of being stable (idempotent) 542 // between STW operations. This can be useful in some of the 1-0 543 // synchronization schemes. 544 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3 ; 545 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ; 546 } else 547 if (hashCode == 2) { 548 value = 1 ; // for sensitivity testing 549 } else 550 if (hashCode == 3) { 551 value = ++GVars.hcSequence ; 552 } else 553 if (hashCode == 4) { 554 value = cast_from_oop<intptr_t>(obj) ; 555 } else { 556 // Marsaglia's xor-shift scheme with thread-specific state 557 // This is probably the best overall implementation -- we'll 558 // likely make this the default in future releases. 559 unsigned t = Self->_hashStateX ; 560 t ^= (t << 11) ; 561 Self->_hashStateX = Self->_hashStateY ; 562 Self->_hashStateY = Self->_hashStateZ ; 563 Self->_hashStateZ = Self->_hashStateW ; 564 unsigned v = Self->_hashStateW ; 565 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)) ; 566 Self->_hashStateW = v ; 567 value = v ; 568 } 569 570 value &= markOopDesc::hash_mask; 571 if (value == 0) value = 0xBAD ; 572 assert (value != markOopDesc::no_hash, "invariant") ; 573 TEVENT (hashCode: GENERATE) ; 574 return value; 575 } 576 // 577 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { 578 if (UseBiasedLocking) { 579 // NOTE: many places throughout the JVM do not expect a safepoint 580 // to be taken here, in particular most operations on perm gen 581 // objects. However, we only ever bias Java instances and all of 582 // the call sites of identity_hash that might revoke biases have 583 // been checked to make sure they can handle a safepoint. The 584 // added check of the bias pattern is to avoid useless calls to 585 // thread-local storage. 586 if (obj->mark()->has_bias_pattern()) { 587 // Box and unbox the raw reference just in case we cause a STW safepoint. 588 Handle hobj (Self, obj) ; 589 // Relaxing assertion for bug 6320749. 590 assert (Universe::verify_in_progress() || 591 !SafepointSynchronize::is_at_safepoint(), 592 "biases should not be seen by VM thread here"); 593 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 594 obj = hobj() ; 595 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 596 } 597 } 598 599 // hashCode() is a heap mutator ... 600 // Relaxing assertion for bug 6320749. 601 assert (Universe::verify_in_progress() || 602 !SafepointSynchronize::is_at_safepoint(), "invariant") ; 603 assert (Universe::verify_in_progress() || 604 Self->is_Java_thread() , "invariant") ; 605 assert (Universe::verify_in_progress() || 606 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant") ; 607 608 ObjectMonitor* monitor = NULL; 609 markOop temp, test; 610 intptr_t hash; 611 markOop mark = ReadStableMark (obj); 612 613 // object should remain ineligible for biased locking 614 assert (!mark->has_bias_pattern(), "invariant") ; 615 616 if (mark->is_neutral()) { 617 hash = mark->hash(); // this is a normal header 618 if (hash) { // if it has hash, just return it 619 return hash; 620 } 621 hash = get_next_hash(Self, obj); // allocate a new hash code 622 temp = mark->copy_set_hash(hash); // merge the hash code into header 623 // use (machine word version) atomic operation to install the hash 624 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 625 if (test == mark) { 626 return hash; 627 } 628 // If atomic operation failed, we must inflate the header 629 // into heavy weight monitor. We could add more code here 630 // for fast path, but it does not worth the complexity. 631 } else if (mark->has_monitor()) { 632 monitor = mark->monitor(); 633 temp = monitor->header(); 634 assert (temp->is_neutral(), "invariant") ; 635 hash = temp->hash(); 636 if (hash) { 637 return hash; 638 } 639 // Skip to the following code to reduce code size 640 } else if (Self->is_lock_owned((address)mark->locker())) { 641 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 642 assert (temp->is_neutral(), "invariant") ; 643 hash = temp->hash(); // by current thread, check if the displaced 644 if (hash) { // header contains hash code 645 return hash; 646 } 647 // WARNING: 648 // The displaced header is strictly immutable. 649 // It can NOT be changed in ANY cases. So we have 650 // to inflate the header into heavyweight monitor 651 // even the current thread owns the lock. The reason 652 // is the BasicLock (stack slot) will be asynchronously 653 // read by other threads during the inflate() function. 654 // Any change to stack may not propagate to other threads 655 // correctly. 656 } 657 658 // Inflate the monitor to set hash code 659 monitor = ObjectSynchronizer::inflate(Self, obj); 660 // Load displaced header and check it has hash code 661 mark = monitor->header(); 662 assert (mark->is_neutral(), "invariant") ; 663 hash = mark->hash(); 664 if (hash == 0) { 665 hash = get_next_hash(Self, obj); 666 temp = mark->copy_set_hash(hash); // merge hash code into header 667 assert (temp->is_neutral(), "invariant") ; 668 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 669 if (test != mark) { 670 // The only update to the header in the monitor (outside GC) 671 // is install the hash code. If someone add new usage of 672 // displaced header, please update this code 673 hash = test->hash(); 674 assert (test->is_neutral(), "invariant") ; 675 assert (hash != 0, "Trivial unexpected object/monitor header usage."); 676 } 677 } 678 // We finally get the hash 679 return hash; 680 } 681 682 // Deprecated -- use FastHashCode() instead. 683 684 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 685 return FastHashCode (Thread::current(), obj()) ; 686 } 687 688 689 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 690 Handle h_obj) { 691 if (UseBiasedLocking) { 692 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 693 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 694 } 695 696 assert(thread == JavaThread::current(), "Can only be called on current thread"); 697 oop obj = h_obj(); 698 699 markOop mark = ReadStableMark (obj) ; 700 701 // Uncontended case, header points to stack 702 if (mark->has_locker()) { 703 return thread->is_lock_owned((address)mark->locker()); 704 } 705 // Contended case, header points to ObjectMonitor (tagged pointer) 706 if (mark->has_monitor()) { 707 ObjectMonitor* monitor = mark->monitor(); 708 return monitor->is_entered(thread) != 0 ; 709 } 710 // Unlocked case, header in place 711 assert(mark->is_neutral(), "sanity check"); 712 return false; 713 } 714 715 // Be aware of this method could revoke bias of the lock object. 716 // This method queries the ownership of the lock handle specified by 'h_obj'. 717 // If the current thread owns the lock, it returns owner_self. If no 718 // thread owns the lock, it returns owner_none. Otherwise, it will return 719 // owner_other. 720 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 721 (JavaThread *self, Handle h_obj) { 722 // The caller must beware this method can revoke bias, and 723 // revocation can result in a safepoint. 724 assert (!SafepointSynchronize::is_at_safepoint(), "invariant") ; 725 assert (self->thread_state() != _thread_blocked , "invariant") ; 726 727 // Possible mark states: neutral, biased, stack-locked, inflated 728 729 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 730 // CASE: biased 731 BiasedLocking::revoke_and_rebias(h_obj, false, self); 732 assert(!h_obj->mark()->has_bias_pattern(), 733 "biases should be revoked by now"); 734 } 735 736 assert(self == JavaThread::current(), "Can only be called on current thread"); 737 oop obj = h_obj(); 738 markOop mark = ReadStableMark (obj) ; 739 740 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 741 if (mark->has_locker()) { 742 return self->is_lock_owned((address)mark->locker()) ? 743 owner_self : owner_other; 744 } 745 746 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 747 // The Object:ObjectMonitor relationship is stable as long as we're 748 // not at a safepoint. 749 if (mark->has_monitor()) { 750 void * owner = mark->monitor()->_owner ; 751 if (owner == NULL) return owner_none ; 752 return (owner == self || 753 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 754 } 755 756 // CASE: neutral 757 assert(mark->is_neutral(), "sanity check"); 758 return owner_none ; // it's unlocked 759 } 760 761 // FIXME: jvmti should call this 762 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 763 if (UseBiasedLocking) { 764 if (SafepointSynchronize::is_at_safepoint()) { 765 BiasedLocking::revoke_at_safepoint(h_obj); 766 } else { 767 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 768 } 769 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 770 } 771 772 oop obj = h_obj(); 773 address owner = NULL; 774 775 markOop mark = ReadStableMark (obj) ; 776 777 // Uncontended case, header points to stack 778 if (mark->has_locker()) { 779 owner = (address) mark->locker(); 780 } 781 782 // Contended case, header points to ObjectMonitor (tagged pointer) 783 if (mark->has_monitor()) { 784 ObjectMonitor* monitor = mark->monitor(); 785 assert(monitor != NULL, "monitor should be non-null"); 786 owner = (address) monitor->owner(); 787 } 788 789 if (owner != NULL) { 790 // owning_thread_from_monitor_owner() may also return NULL here 791 return Threads::owning_thread_from_monitor_owner(owner, doLock); 792 } 793 794 // Unlocked case, header in place 795 // Cannot have assertion since this object may have been 796 // locked by another thread when reaching here. 797 // assert(mark->is_neutral(), "sanity check"); 798 799 return NULL; 800 } 801 // Visitors ... 802 803 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 804 ObjectMonitor* block = gBlockList; 805 ObjectMonitor* mid; 806 while (block) { 807 assert(block->object() == CHAINMARKER, "must be a block header"); 808 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 809 mid = block + i; 810 oop object = (oop) mid->object(); 811 if (object != NULL) { 812 closure->do_monitor(mid); 813 } 814 } 815 block = (ObjectMonitor*) block->FreeNext; 816 } 817 } 818 819 // Get the next block in the block list. 820 static inline ObjectMonitor* next(ObjectMonitor* block) { 821 assert(block->object() == CHAINMARKER, "must be a block header"); 822 block = block->FreeNext ; 823 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 824 return block; 825 } 826 827 828 void ObjectSynchronizer::oops_do(OopClosure* f) { 829 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 830 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { 831 assert(block->object() == CHAINMARKER, "must be a block header"); 832 for (int i = 1; i < _BLOCKSIZE; i++) { 833 ObjectMonitor* mid = &block[i]; 834 if (mid->object() != NULL) { 835 f->do_oop((oop*)mid->object_addr()); 836 } 837 } 838 } 839 } 840 841 842 // ----------------------------------------------------------------------------- 843 // ObjectMonitor Lifecycle 844 // ----------------------- 845 // Inflation unlinks monitors from the global gFreeList and 846 // associates them with objects. Deflation -- which occurs at 847 // STW-time -- disassociates idle monitors from objects. Such 848 // scavenged monitors are returned to the gFreeList. 849 // 850 // The global list is protected by ListLock. All the critical sections 851 // are short and operate in constant-time. 852 // 853 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 854 // 855 // Lifecycle: 856 // -- unassigned and on the global free list 857 // -- unassigned and on a thread's private omFreeList 858 // -- assigned to an object. The object is inflated and the mark refers 859 // to the objectmonitor. 860 // 861 862 863 // Constraining monitor pool growth via MonitorBound ... 864 // 865 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 866 // the rate of scavenging is driven primarily by GC. As such, we can find 867 // an inordinate number of monitors in circulation. 868 // To avoid that scenario we can artificially induce a STW safepoint 869 // if the pool appears to be growing past some reasonable bound. 870 // Generally we favor time in space-time tradeoffs, but as there's no 871 // natural back-pressure on the # of extant monitors we need to impose some 872 // type of limit. Beware that if MonitorBound is set to too low a value 873 // we could just loop. In addition, if MonitorBound is set to a low value 874 // we'll incur more safepoints, which are harmful to performance. 875 // See also: GuaranteedSafepointInterval 876 // 877 // The current implementation uses asynchronous VM operations. 878 // 879 880 static void InduceScavenge (Thread * Self, const char * Whence) { 881 // Induce STW safepoint to trim monitors 882 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 883 // More precisely, trigger an asynchronous STW safepoint as the number 884 // of active monitors passes the specified threshold. 885 // TODO: assert thread state is reasonable 886 887 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 888 if (ObjectMonitor::Knob_Verbose) { 889 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; 890 ::fflush(stdout) ; 891 } 892 // Induce a 'null' safepoint to scavenge monitors 893 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 894 // to the VMthread and have a lifespan longer than that of this activation record. 895 // The VMThread will delete the op when completed. 896 VMThread::execute (new VM_ForceAsyncSafepoint()) ; 897 898 if (ObjectMonitor::Knob_Verbose) { 899 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 900 ::fflush(stdout) ; 901 } 902 } 903 } 904 /* Too slow for general assert or debug 905 void ObjectSynchronizer::verifyInUse (Thread *Self) { 906 ObjectMonitor* mid; 907 int inusetally = 0; 908 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 909 inusetally ++; 910 } 911 assert(inusetally == Self->omInUseCount, "inuse count off"); 912 913 int freetally = 0; 914 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 915 freetally ++; 916 } 917 assert(freetally == Self->omFreeCount, "free count off"); 918 } 919 */ 920 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { 921 // A large MAXPRIVATE value reduces both list lock contention 922 // and list coherency traffic, but also tends to increase the 923 // number of objectMonitors in circulation as well as the STW 924 // scavenge costs. As usual, we lean toward time in space-time 925 // tradeoffs. 926 const int MAXPRIVATE = 1024 ; 927 for (;;) { 928 ObjectMonitor * m ; 929 930 // 1: try to allocate from the thread's local omFreeList. 931 // Threads will attempt to allocate first from their local list, then 932 // from the global list, and only after those attempts fail will the thread 933 // attempt to instantiate new monitors. Thread-local free lists take 934 // heat off the ListLock and improve allocation latency, as well as reducing 935 // coherency traffic on the shared global list. 936 m = Self->omFreeList ; 937 if (m != NULL) { 938 Self->omFreeList = m->FreeNext ; 939 Self->omFreeCount -- ; 940 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 941 guarantee (m->object() == NULL, "invariant") ; 942 if (MonitorInUseLists) { 943 m->FreeNext = Self->omInUseList; 944 Self->omInUseList = m; 945 Self->omInUseCount ++; 946 // verifyInUse(Self); 947 } else { 948 m->FreeNext = NULL; 949 } 950 return m ; 951 } 952 953 // 2: try to allocate from the global gFreeList 954 // CONSIDER: use muxTry() instead of muxAcquire(). 955 // If the muxTry() fails then drop immediately into case 3. 956 // If we're using thread-local free lists then try 957 // to reprovision the caller's free list. 958 if (gFreeList != NULL) { 959 // Reprovision the thread's omFreeList. 960 // Use bulk transfers to reduce the allocation rate and heat 961 // on various locks. 962 Thread::muxAcquire (&ListLock, "omAlloc") ; 963 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL; ) { 964 MonitorFreeCount --; 965 ObjectMonitor * take = gFreeList ; 966 gFreeList = take->FreeNext ; 967 guarantee (take->object() == NULL, "invariant") ; 968 guarantee (!take->is_busy(), "invariant") ; 969 take->Recycle() ; 970 omRelease (Self, take, false) ; 971 } 972 Thread::muxRelease (&ListLock) ; 973 Self->omFreeProvision += 1 + (Self->omFreeProvision/2) ; 974 if (Self->omFreeProvision > MAXPRIVATE ) Self->omFreeProvision = MAXPRIVATE ; 975 TEVENT (omFirst - reprovision) ; 976 977 const int mx = MonitorBound ; 978 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) { 979 // We can't safely induce a STW safepoint from omAlloc() as our thread 980 // state may not be appropriate for such activities and callers may hold 981 // naked oops, so instead we defer the action. 982 InduceScavenge (Self, "omAlloc") ; 983 } 984 continue; 985 } 986 987 // 3: allocate a block of new ObjectMonitors 988 // Both the local and global free lists are empty -- resort to malloc(). 989 // In the current implementation objectMonitors are TSM - immortal. 990 assert (_BLOCKSIZE > 1, "invariant") ; 991 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE]; 992 993 // NOTE: (almost) no way to recover if allocation failed. 994 // We might be able to induce a STW safepoint and scavenge enough 995 // objectMonitors to permit progress. 996 if (temp == NULL) { 997 vm_exit_out_of_memory (sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR, 998 "Allocate ObjectMonitors"); 999 } 1000 1001 // Format the block. 1002 // initialize the linked list, each monitor points to its next 1003 // forming the single linked free list, the very first monitor 1004 // will points to next block, which forms the block list. 1005 // The trick of using the 1st element in the block as gBlockList 1006 // linkage should be reconsidered. A better implementation would 1007 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1008 1009 for (int i = 1; i < _BLOCKSIZE ; i++) { 1010 temp[i].FreeNext = &temp[i+1]; 1011 } 1012 1013 // terminate the last monitor as the end of list 1014 temp[_BLOCKSIZE - 1].FreeNext = NULL ; 1015 1016 // Element [0] is reserved for global list linkage 1017 temp[0].set_object(CHAINMARKER); 1018 1019 // Consider carving out this thread's current request from the 1020 // block in hand. This avoids some lock traffic and redundant 1021 // list activity. 1022 1023 // Acquire the ListLock to manipulate BlockList and FreeList. 1024 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1025 Thread::muxAcquire (&ListLock, "omAlloc [2]") ; 1026 MonitorPopulation += _BLOCKSIZE-1; 1027 MonitorFreeCount += _BLOCKSIZE-1; 1028 1029 // Add the new block to the list of extant blocks (gBlockList). 1030 // The very first objectMonitor in a block is reserved and dedicated. 1031 // It serves as blocklist "next" linkage. 1032 temp[0].FreeNext = gBlockList; 1033 gBlockList = temp; 1034 1035 // Add the new string of objectMonitors to the global free list 1036 temp[_BLOCKSIZE - 1].FreeNext = gFreeList ; 1037 gFreeList = temp + 1; 1038 Thread::muxRelease (&ListLock) ; 1039 TEVENT (Allocate block of monitors) ; 1040 } 1041 } 1042 1043 // Place "m" on the caller's private per-thread omFreeList. 1044 // In practice there's no need to clamp or limit the number of 1045 // monitors on a thread's omFreeList as the only time we'll call 1046 // omRelease is to return a monitor to the free list after a CAS 1047 // attempt failed. This doesn't allow unbounded #s of monitors to 1048 // accumulate on a thread's free list. 1049 // 1050 1051 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) { 1052 guarantee (m->object() == NULL, "invariant") ; 1053 1054 // Remove from omInUseList 1055 if (MonitorInUseLists && fromPerThreadAlloc) { 1056 ObjectMonitor* curmidinuse = NULL; 1057 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; ) { 1058 if (m == mid) { 1059 // extract from per-thread in-use-list 1060 if (mid == Self->omInUseList) { 1061 Self->omInUseList = mid->FreeNext; 1062 } else if (curmidinuse != NULL) { 1063 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1064 } 1065 Self->omInUseCount --; 1066 // verifyInUse(Self); 1067 break; 1068 } else { 1069 curmidinuse = mid; 1070 mid = mid->FreeNext; 1071 } 1072 } 1073 } 1074 1075 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new 1076 m->FreeNext = Self->omFreeList ; 1077 Self->omFreeList = m ; 1078 Self->omFreeCount ++ ; 1079 } 1080 1081 // Return the monitors of a moribund thread's local free list to 1082 // the global free list. Typically a thread calls omFlush() when 1083 // it's dying. We could also consider having the VM thread steal 1084 // monitors from threads that have not run java code over a few 1085 // consecutive STW safepoints. Relatedly, we might decay 1086 // omFreeProvision at STW safepoints. 1087 // 1088 // Also return the monitors of a moribund thread"s omInUseList to 1089 // a global gOmInUseList under the global list lock so these 1090 // will continue to be scanned. 1091 // 1092 // We currently call omFlush() from the Thread:: dtor _after the thread 1093 // has been excised from the thread list and is no longer a mutator. 1094 // That means that omFlush() can run concurrently with a safepoint and 1095 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1096 // be a better choice as we could safely reason that that the JVM is 1097 // not at a safepoint at the time of the call, and thus there could 1098 // be not inopportune interleavings between omFlush() and the scavenge 1099 // operator. 1100 1101 void ObjectSynchronizer::omFlush (Thread * Self) { 1102 ObjectMonitor * List = Self->omFreeList ; // Null-terminated SLL 1103 Self->omFreeList = NULL ; 1104 ObjectMonitor * Tail = NULL ; 1105 int Tally = 0; 1106 if (List != NULL) { 1107 ObjectMonitor * s ; 1108 for (s = List ; s != NULL ; s = s->FreeNext) { 1109 Tally ++ ; 1110 Tail = s ; 1111 guarantee (s->object() == NULL, "invariant") ; 1112 guarantee (!s->is_busy(), "invariant") ; 1113 s->set_owner (NULL) ; // redundant but good hygiene 1114 TEVENT (omFlush - Move one) ; 1115 } 1116 guarantee (Tail != NULL && List != NULL, "invariant") ; 1117 } 1118 1119 ObjectMonitor * InUseList = Self->omInUseList; 1120 ObjectMonitor * InUseTail = NULL ; 1121 int InUseTally = 0; 1122 if (InUseList != NULL) { 1123 Self->omInUseList = NULL; 1124 ObjectMonitor *curom; 1125 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { 1126 InUseTail = curom; 1127 InUseTally++; 1128 } 1129 // TODO debug 1130 assert(Self->omInUseCount == InUseTally, "inuse count off"); 1131 Self->omInUseCount = 0; 1132 guarantee (InUseTail != NULL && InUseList != NULL, "invariant"); 1133 } 1134 1135 Thread::muxAcquire (&ListLock, "omFlush") ; 1136 if (Tail != NULL) { 1137 Tail->FreeNext = gFreeList ; 1138 gFreeList = List ; 1139 MonitorFreeCount += Tally; 1140 } 1141 1142 if (InUseTail != NULL) { 1143 InUseTail->FreeNext = gOmInUseList; 1144 gOmInUseList = InUseList; 1145 gOmInUseCount += InUseTally; 1146 } 1147 1148 Thread::muxRelease (&ListLock) ; 1149 TEVENT (omFlush) ; 1150 } 1151 1152 // Fast path code shared by multiple functions 1153 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1154 markOop mark = obj->mark(); 1155 if (mark->has_monitor()) { 1156 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1157 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1158 return mark->monitor(); 1159 } 1160 return ObjectSynchronizer::inflate(Thread::current(), obj); 1161 } 1162 1163 1164 // Note that we could encounter some performance loss through false-sharing as 1165 // multiple locks occupy the same $ line. Padding might be appropriate. 1166 1167 1168 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { 1169 // Inflate mutates the heap ... 1170 // Relaxing assertion for bug 6320749. 1171 assert (Universe::verify_in_progress() || 1172 !SafepointSynchronize::is_at_safepoint(), "invariant") ; 1173 1174 for (;;) { 1175 const markOop mark = object->mark() ; 1176 assert (!mark->has_bias_pattern(), "invariant") ; 1177 1178 // The mark can be in one of the following states: 1179 // * Inflated - just return 1180 // * Stack-locked - coerce it to inflated 1181 // * INFLATING - busy wait for conversion to complete 1182 // * Neutral - aggressively inflate the object. 1183 // * BIASED - Illegal. We should never see this 1184 1185 // CASE: inflated 1186 if (mark->has_monitor()) { 1187 ObjectMonitor * inf = mark->monitor() ; 1188 assert (inf->header()->is_neutral(), "invariant"); 1189 assert (inf->object() == object, "invariant") ; 1190 assert (ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1191 return inf ; 1192 } 1193 1194 // CASE: inflation in progress - inflating over a stack-lock. 1195 // Some other thread is converting from stack-locked to inflated. 1196 // Only that thread can complete inflation -- other threads must wait. 1197 // The INFLATING value is transient. 1198 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1199 // We could always eliminate polling by parking the thread on some auxiliary list. 1200 if (mark == markOopDesc::INFLATING()) { 1201 TEVENT (Inflate: spin while INFLATING) ; 1202 ReadStableMark(object) ; 1203 continue ; 1204 } 1205 1206 // CASE: stack-locked 1207 // Could be stack-locked either by this thread or by some other thread. 1208 // 1209 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1210 // to install INFLATING into the mark word. We originally installed INFLATING, 1211 // allocated the objectmonitor, and then finally STed the address of the 1212 // objectmonitor into the mark. This was correct, but artificially lengthened 1213 // the interval in which INFLATED appeared in the mark, thus increasing 1214 // the odds of inflation contention. 1215 // 1216 // We now use per-thread private objectmonitor free lists. 1217 // These list are reprovisioned from the global free list outside the 1218 // critical INFLATING...ST interval. A thread can transfer 1219 // multiple objectmonitors en-mass from the global free list to its local free list. 1220 // This reduces coherency traffic and lock contention on the global free list. 1221 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1222 // before or after the CAS(INFLATING) operation. 1223 // See the comments in omAlloc(). 1224 1225 if (mark->has_locker()) { 1226 ObjectMonitor * m = omAlloc (Self) ; 1227 // Optimistically prepare the objectmonitor - anticipate successful CAS 1228 // We do this before the CAS in order to minimize the length of time 1229 // in which INFLATING appears in the mark. 1230 m->Recycle(); 1231 m->_Responsible = NULL ; 1232 m->OwnerIsThread = 0 ; 1233 m->_recursions = 0 ; 1234 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // Consider: maintain by type/class 1235 1236 markOop cmp = (markOop) Atomic::cmpxchg_ptr (markOopDesc::INFLATING(), object->mark_addr(), mark) ; 1237 if (cmp != mark) { 1238 omRelease (Self, m, true) ; 1239 continue ; // Interference -- just retry 1240 } 1241 1242 // We've successfully installed INFLATING (0) into the mark-word. 1243 // This is the only case where 0 will appear in a mark-work. 1244 // Only the singular thread that successfully swings the mark-word 1245 // to 0 can perform (or more precisely, complete) inflation. 1246 // 1247 // Why do we CAS a 0 into the mark-word instead of just CASing the 1248 // mark-word from the stack-locked value directly to the new inflated state? 1249 // Consider what happens when a thread unlocks a stack-locked object. 1250 // It attempts to use CAS to swing the displaced header value from the 1251 // on-stack basiclock back into the object header. Recall also that the 1252 // header value (hashcode, etc) can reside in (a) the object header, or 1253 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1254 // header in an objectMonitor. The inflate() routine must copy the header 1255 // value from the basiclock on the owner's stack to the objectMonitor, all 1256 // the while preserving the hashCode stability invariants. If the owner 1257 // decides to release the lock while the value is 0, the unlock will fail 1258 // and control will eventually pass from slow_exit() to inflate. The owner 1259 // will then spin, waiting for the 0 value to disappear. Put another way, 1260 // the 0 causes the owner to stall if the owner happens to try to 1261 // drop the lock (restoring the header from the basiclock to the object) 1262 // while inflation is in-progress. This protocol avoids races that might 1263 // would otherwise permit hashCode values to change or "flicker" for an object. 1264 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1265 // 0 serves as a "BUSY" inflate-in-progress indicator. 1266 1267 1268 // fetch the displaced mark from the owner's stack. 1269 // The owner can't die or unwind past the lock while our INFLATING 1270 // object is in the mark. Furthermore the owner can't complete 1271 // an unlock on the object, either. 1272 markOop dmw = mark->displaced_mark_helper() ; 1273 assert (dmw->is_neutral(), "invariant") ; 1274 1275 // Setup monitor fields to proper values -- prepare the monitor 1276 m->set_header(dmw) ; 1277 1278 // Optimization: if the mark->locker stack address is associated 1279 // with this thread we could simply set m->_owner = Self and 1280 // m->OwnerIsThread = 1. Note that a thread can inflate an object 1281 // that it has stack-locked -- as might happen in wait() -- directly 1282 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1283 m->set_owner(mark->locker()); 1284 m->set_object(object); 1285 // TODO-FIXME: assert BasicLock->dhw != 0. 1286 1287 // Must preserve store ordering. The monitor state must 1288 // be stable at the time of publishing the monitor address. 1289 guarantee (object->mark() == markOopDesc::INFLATING(), "invariant") ; 1290 object->release_set_mark(markOopDesc::encode(m)); 1291 1292 // Hopefully the performance counters are allocated on distinct cache lines 1293 // to avoid false sharing on MP systems ... 1294 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; 1295 TEVENT(Inflate: overwrite stacklock) ; 1296 if (TraceMonitorInflation) { 1297 if (object->is_instance()) { 1298 ResourceMark rm; 1299 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1300 (void *) object, (intptr_t) object->mark(), 1301 object->klass()->external_name()); 1302 } 1303 } 1304 return m ; 1305 } 1306 1307 // CASE: neutral 1308 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1309 // If we know we're inflating for entry it's better to inflate by swinging a 1310 // pre-locked objectMonitor pointer into the object header. A successful 1311 // CAS inflates the object *and* confers ownership to the inflating thread. 1312 // In the current implementation we use a 2-step mechanism where we CAS() 1313 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1314 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1315 // would be useful. 1316 1317 assert (mark->is_neutral(), "invariant"); 1318 ObjectMonitor * m = omAlloc (Self) ; 1319 // prepare m for installation - set monitor to initial state 1320 m->Recycle(); 1321 m->set_header(mark); 1322 m->set_owner(NULL); 1323 m->set_object(object); 1324 m->OwnerIsThread = 1 ; 1325 m->_recursions = 0 ; 1326 m->_Responsible = NULL ; 1327 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit ; // consider: keep metastats by type/class 1328 1329 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1330 m->set_object (NULL) ; 1331 m->set_owner (NULL) ; 1332 m->OwnerIsThread = 0 ; 1333 m->Recycle() ; 1334 omRelease (Self, m, true) ; 1335 m = NULL ; 1336 continue ; 1337 // interference - the markword changed - just retry. 1338 // The state-transitions are one-way, so there's no chance of 1339 // live-lock -- "Inflated" is an absorbing state. 1340 } 1341 1342 // Hopefully the performance counters are allocated on distinct 1343 // cache lines to avoid false sharing on MP systems ... 1344 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc() ; 1345 TEVENT(Inflate: overwrite neutral) ; 1346 if (TraceMonitorInflation) { 1347 if (object->is_instance()) { 1348 ResourceMark rm; 1349 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1350 (void *) object, (intptr_t) object->mark(), 1351 object->klass()->external_name()); 1352 } 1353 } 1354 return m ; 1355 } 1356 } 1357 1358 // Note that we could encounter some performance loss through false-sharing as 1359 // multiple locks occupy the same $ line. Padding might be appropriate. 1360 1361 1362 // Deflate_idle_monitors() is called at all safepoints, immediately 1363 // after all mutators are stopped, but before any objects have moved. 1364 // It traverses the list of known monitors, deflating where possible. 1365 // The scavenged monitor are returned to the monitor free list. 1366 // 1367 // Beware that we scavenge at *every* stop-the-world point. 1368 // Having a large number of monitors in-circulation negatively 1369 // impacts the performance of some applications (e.g., PointBase). 1370 // Broadly, we want to minimize the # of monitors in circulation. 1371 // 1372 // We have added a flag, MonitorInUseLists, which creates a list 1373 // of active monitors for each thread. deflate_idle_monitors() 1374 // only scans the per-thread inuse lists. omAlloc() puts all 1375 // assigned monitors on the per-thread list. deflate_idle_monitors() 1376 // returns the non-busy monitors to the global free list. 1377 // When a thread dies, omFlush() adds the list of active monitors for 1378 // that thread to a global gOmInUseList acquiring the 1379 // global list lock. deflate_idle_monitors() acquires the global 1380 // list lock to scan for non-busy monitors to the global free list. 1381 // An alternative could have used a single global inuse list. The 1382 // downside would have been the additional cost of acquiring the global list lock 1383 // for every omAlloc(). 1384 // 1385 // Perversely, the heap size -- and thus the STW safepoint rate -- 1386 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1387 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1388 // This is an unfortunate aspect of this design. 1389 // 1390 1391 enum ManifestConstants { 1392 ClearResponsibleAtSTW = 0, 1393 MaximumRecheckInterval = 1000 1394 } ; 1395 1396 // Deflate a single monitor if not in use 1397 // Return true if deflated, false if in use 1398 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1399 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { 1400 bool deflated; 1401 // Normal case ... The monitor is associated with obj. 1402 guarantee (obj->mark() == markOopDesc::encode(mid), "invariant") ; 1403 guarantee (mid == obj->mark()->monitor(), "invariant"); 1404 guarantee (mid->header()->is_neutral(), "invariant"); 1405 1406 if (mid->is_busy()) { 1407 if (ClearResponsibleAtSTW) mid->_Responsible = NULL ; 1408 deflated = false; 1409 } else { 1410 // Deflate the monitor if it is no longer being used 1411 // It's idle - scavenge and return to the global free list 1412 // plain old deflation ... 1413 TEVENT (deflate_idle_monitors - scavenge1) ; 1414 if (TraceMonitorInflation) { 1415 if (obj->is_instance()) { 1416 ResourceMark rm; 1417 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1418 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); 1419 } 1420 } 1421 1422 // Restore the header back to obj 1423 obj->release_set_mark(mid->header()); 1424 mid->clear(); 1425 1426 assert (mid->object() == NULL, "invariant") ; 1427 1428 // Move the object to the working free list defined by FreeHead,FreeTail. 1429 if (*FreeHeadp == NULL) *FreeHeadp = mid; 1430 if (*FreeTailp != NULL) { 1431 ObjectMonitor * prevtail = *FreeTailp; 1432 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK 1433 prevtail->FreeNext = mid; 1434 } 1435 *FreeTailp = mid; 1436 deflated = true; 1437 } 1438 return deflated; 1439 } 1440 1441 // Caller acquires ListLock 1442 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp, 1443 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { 1444 ObjectMonitor* mid; 1445 ObjectMonitor* next; 1446 ObjectMonitor* curmidinuse = NULL; 1447 int deflatedcount = 0; 1448 1449 for (mid = *listheadp; mid != NULL; ) { 1450 oop obj = (oop) mid->object(); 1451 bool deflated = false; 1452 if (obj != NULL) { 1453 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp); 1454 } 1455 if (deflated) { 1456 // extract from per-thread in-use-list 1457 if (mid == *listheadp) { 1458 *listheadp = mid->FreeNext; 1459 } else if (curmidinuse != NULL) { 1460 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1461 } 1462 next = mid->FreeNext; 1463 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list 1464 mid = next; 1465 deflatedcount++; 1466 } else { 1467 curmidinuse = mid; 1468 mid = mid->FreeNext; 1469 } 1470 } 1471 return deflatedcount; 1472 } 1473 1474 void ObjectSynchronizer::deflate_idle_monitors() { 1475 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1476 int nInuse = 0 ; // currently associated with objects 1477 int nInCirculation = 0 ; // extant 1478 int nScavenged = 0 ; // reclaimed 1479 bool deflated = false; 1480 1481 ObjectMonitor * FreeHead = NULL ; // Local SLL of scavenged monitors 1482 ObjectMonitor * FreeTail = NULL ; 1483 1484 TEVENT (deflate_idle_monitors) ; 1485 // Prevent omFlush from changing mids in Thread dtor's during deflation 1486 // And in case the vm thread is acquiring a lock during a safepoint 1487 // See e.g. 6320749 1488 Thread::muxAcquire (&ListLock, "scavenge - return") ; 1489 1490 if (MonitorInUseLists) { 1491 int inUse = 0; 1492 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1493 nInCirculation+= cur->omInUseCount; 1494 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); 1495 cur->omInUseCount-= deflatedcount; 1496 // verifyInUse(cur); 1497 nScavenged += deflatedcount; 1498 nInuse += cur->omInUseCount; 1499 } 1500 1501 // For moribund threads, scan gOmInUseList 1502 if (gOmInUseList) { 1503 nInCirculation += gOmInUseCount; 1504 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail); 1505 gOmInUseCount-= deflatedcount; 1506 nScavenged += deflatedcount; 1507 nInuse += gOmInUseCount; 1508 } 1509 1510 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { 1511 // Iterate over all extant monitors - Scavenge all idle monitors. 1512 assert(block->object() == CHAINMARKER, "must be a block header"); 1513 nInCirculation += _BLOCKSIZE ; 1514 for (int i = 1 ; i < _BLOCKSIZE; i++) { 1515 ObjectMonitor* mid = &block[i]; 1516 oop obj = (oop) mid->object(); 1517 1518 if (obj == NULL) { 1519 // The monitor is not associated with an object. 1520 // The monitor should either be a thread-specific private 1521 // free list or the global free list. 1522 // obj == NULL IMPLIES mid->is_busy() == 0 1523 guarantee (!mid->is_busy(), "invariant") ; 1524 continue ; 1525 } 1526 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail); 1527 1528 if (deflated) { 1529 mid->FreeNext = NULL ; 1530 nScavenged ++ ; 1531 } else { 1532 nInuse ++; 1533 } 1534 } 1535 } 1536 1537 MonitorFreeCount += nScavenged; 1538 1539 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree. 1540 1541 if (ObjectMonitor::Knob_Verbose) { 1542 ::printf ("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", 1543 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1544 MonitorPopulation, MonitorFreeCount) ; 1545 ::fflush(stdout) ; 1546 } 1547 1548 ForceMonitorScavenge = 0; // Reset 1549 1550 // Move the scavenged monitors back to the global free list. 1551 if (FreeHead != NULL) { 1552 guarantee (FreeTail != NULL && nScavenged > 0, "invariant") ; 1553 assert (FreeTail->FreeNext == NULL, "invariant") ; 1554 // constant-time list splice - prepend scavenged segment to gFreeList 1555 FreeTail->FreeNext = gFreeList ; 1556 gFreeList = FreeHead ; 1557 } 1558 Thread::muxRelease (&ListLock) ; 1559 1560 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged) ; 1561 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); 1562 1563 // TODO: Add objectMonitor leak detection. 1564 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1565 GVars.stwRandom = os::random() ; 1566 GVars.stwCycle ++ ; 1567 } 1568 1569 // Monitor cleanup on JavaThread::exit 1570 1571 // Iterate through monitor cache and attempt to release thread's monitors 1572 // Gives up on a particular monitor if an exception occurs, but continues 1573 // the overall iteration, swallowing the exception. 1574 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1575 private: 1576 TRAPS; 1577 1578 public: 1579 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1580 void do_monitor(ObjectMonitor* mid) { 1581 if (mid->owner() == THREAD) { 1582 (void)mid->complete_exit(CHECK); 1583 } 1584 } 1585 }; 1586 1587 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1588 // ignored. This is meant to be called during JNI thread detach which assumes 1589 // all remaining monitors are heavyweight. All exceptions are swallowed. 1590 // Scanning the extant monitor list can be time consuming. 1591 // A simple optimization is to add a per-thread flag that indicates a thread 1592 // called jni_monitorenter() during its lifetime. 1593 // 1594 // Instead of No_Savepoint_Verifier it might be cheaper to 1595 // use an idiom of the form: 1596 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1597 // <code that must not run at safepoint> 1598 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1599 // Since the tests are extremely cheap we could leave them enabled 1600 // for normal product builds. 1601 1602 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1603 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1604 No_Safepoint_Verifier nsv ; 1605 ReleaseJavaMonitorsClosure rjmc(THREAD); 1606 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); 1607 ObjectSynchronizer::monitors_iterate(&rjmc); 1608 Thread::muxRelease(&ListLock); 1609 THREAD->clear_pending_exception(); 1610 } 1611 1612 //------------------------------------------------------------------------------ 1613 // Non-product code 1614 1615 #ifndef PRODUCT 1616 1617 // Verify all monitors in the monitor cache, the verification is weak. 1618 void ObjectSynchronizer::verify() { 1619 ObjectMonitor* block = gBlockList; 1620 ObjectMonitor* mid; 1621 while (block) { 1622 assert(block->object() == CHAINMARKER, "must be a block header"); 1623 for (int i = 1; i < _BLOCKSIZE; i++) { 1624 mid = block + i; 1625 oop object = (oop) mid->object(); 1626 if (object != NULL) { 1627 mid->verify(); 1628 } 1629 } 1630 block = (ObjectMonitor*) block->FreeNext; 1631 } 1632 } 1633 1634 // Check if monitor belongs to the monitor cache 1635 // The list is grow-only so it's *relatively* safe to traverse 1636 // the list of extant blocks without taking a lock. 1637 1638 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1639 ObjectMonitor* block = gBlockList; 1640 1641 while (block) { 1642 assert(block->object() == CHAINMARKER, "must be a block header"); 1643 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 1644 address mon = (address) monitor; 1645 address blk = (address) block; 1646 size_t diff = mon - blk; 1647 assert((diff % sizeof(ObjectMonitor)) == 0, "check"); 1648 return 1; 1649 } 1650 block = (ObjectMonitor*) block->FreeNext; 1651 } 1652 return 0; 1653 } 1654 1655 #endif