1 /* 2 * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "oops/markOop.hpp" 29 #include "oops/oop.inline.hpp" 30 #include "runtime/atomic.inline.hpp" 31 #include "runtime/biasedLocking.hpp" 32 #include "runtime/handles.inline.hpp" 33 #include "runtime/interfaceSupport.hpp" 34 #include "runtime/mutexLocker.hpp" 35 #include "runtime/objectMonitor.hpp" 36 #include "runtime/objectMonitor.inline.hpp" 37 #include "runtime/osThread.hpp" 38 #include "runtime/stubRoutines.hpp" 39 #include "runtime/synchronizer.hpp" 40 #include "runtime/thread.inline.hpp" 41 #include "utilities/dtrace.hpp" 42 #include "utilities/events.hpp" 43 #include "utilities/preserveException.hpp" 44 45 #if defined(__GNUC__) && !defined(PPC64) 46 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 47 #define ATTR __attribute__((noinline)) 48 #else 49 #define ATTR 50 #endif 51 52 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 53 54 // The "core" versions of monitor enter and exit reside in this file. 55 // The interpreter and compilers contain specialized transliterated 56 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 57 // for instance. If you make changes here, make sure to modify the 58 // interpreter, and both C1 and C2 fast-path inline locking code emission. 59 // 60 // 61 // ----------------------------------------------------------------------------- 62 63 #ifdef DTRACE_ENABLED 64 65 // Only bother with this argument setup if dtrace is available 66 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 67 68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 69 char* bytes = NULL; \ 70 int len = 0; \ 71 jlong jtid = SharedRuntime::get_java_tid(thread); \ 72 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 73 if (klassname != NULL) { \ 74 bytes = (char*)klassname->bytes(); \ 75 len = klassname->utf8_length(); \ 76 } 77 78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 79 { \ 80 if (DTraceMonitorProbes) { \ 81 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 82 HOTSPOT_MONITOR_WAIT(jtid, \ 83 (uintptr_t)(monitor), bytes, len, (millis)); \ 84 } \ 85 } 86 87 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 88 89 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 90 { \ 91 if (DTraceMonitorProbes) { \ 92 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 93 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 94 (uintptr_t)(monitor), bytes, len); \ 95 } \ 96 } 97 98 #else // ndef DTRACE_ENABLED 99 100 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 101 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 102 103 #endif // ndef DTRACE_ENABLED 104 105 // This exists only as a workaround of dtrace bug 6254741 106 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 107 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 108 return 0; 109 } 110 111 #define NINFLATIONLOCKS 256 112 static volatile intptr_t InflationLocks[NINFLATIONLOCKS]; 113 114 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL; 115 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 116 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 117 int ObjectSynchronizer::gOmInUseCount = 0; 118 static volatile intptr_t ListLock = 0; // protects global monitor free-list cache 119 static volatile int MonitorFreeCount = 0; // # on gFreeList 120 static volatile int MonitorPopulation = 0; // # Extant -- in circulation 121 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 122 123 // ----------------------------------------------------------------------------- 124 // Fast Monitor Enter/Exit 125 // This the fast monitor enter. The interpreter and compiler use 126 // some assembly copies of this code. Make sure update those code 127 // if the following function is changed. The implementation is 128 // extremely sensitive to race condition. Be careful. 129 130 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS) { 131 if (UseBiasedLocking) { 132 if (!SafepointSynchronize::is_at_safepoint()) { 133 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 134 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 135 return; 136 } 137 } else { 138 assert(!attempt_rebias, "can not rebias toward VM thread"); 139 BiasedLocking::revoke_at_safepoint(obj); 140 } 141 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 142 } 143 144 slow_enter(obj, lock, THREAD); 145 } 146 147 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 148 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 149 // if displaced header is null, the previous enter is recursive enter, no-op 150 markOop dhw = lock->displaced_header(); 151 markOop mark; 152 if (dhw == NULL) { 153 // Recursive stack-lock. 154 // Diagnostics -- Could be: stack-locked, inflating, inflated. 155 mark = object->mark(); 156 assert(!mark->is_neutral(), "invariant"); 157 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 158 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 159 } 160 if (mark->has_monitor()) { 161 ObjectMonitor * m = mark->monitor(); 162 assert(((oop)(m->object()))->mark() == mark, "invariant"); 163 assert(m->is_entered(THREAD), "invariant"); 164 } 165 return; 166 } 167 168 mark = object->mark(); 169 170 // If the object is stack-locked by the current thread, try to 171 // swing the displaced header from the box back to the mark. 172 if (mark == (markOop) lock) { 173 assert(dhw->is_neutral(), "invariant"); 174 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 175 TEVENT(fast_exit: release stacklock); 176 return; 177 } 178 } 179 180 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD); 181 } 182 183 // ----------------------------------------------------------------------------- 184 // Interpreter/Compiler Slow Case 185 // This routine is used to handle interpreter/compiler slow case 186 // We don't need to use fast path here, because it must have been 187 // failed in the interpreter/compiler code. 188 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 189 markOop mark = obj->mark(); 190 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 191 192 if (mark->is_neutral()) { 193 // Anticipate successful CAS -- the ST of the displaced mark must 194 // be visible <= the ST performed by the CAS. 195 lock->set_displaced_header(mark); 196 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 197 TEVENT(slow_enter: release stacklock); 198 return; 199 } 200 // Fall through to inflate() ... 201 } else 202 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 203 assert(lock != mark->locker(), "must not re-lock the same lock"); 204 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 205 lock->set_displaced_header(NULL); 206 return; 207 } 208 209 #if 0 210 // The following optimization isn't particularly useful. 211 if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) { 212 lock->set_displaced_header(NULL); 213 return; 214 } 215 #endif 216 217 // The object header will never be displaced to this lock, 218 // so it does not matter what the value is, except that it 219 // must be non-zero to avoid looking like a re-entrant lock, 220 // and must not look locked either. 221 lock->set_displaced_header(markOopDesc::unused_mark()); 222 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 223 } 224 225 // This routine is used to handle interpreter/compiler slow case 226 // We don't need to use fast path here, because it must have 227 // failed in the interpreter/compiler code. Simply use the heavy 228 // weight monitor should be ok, unless someone find otherwise. 229 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 230 fast_exit(object, lock, THREAD); 231 } 232 233 // ----------------------------------------------------------------------------- 234 // Class Loader support to workaround deadlocks on the class loader lock objects 235 // Also used by GC 236 // complete_exit()/reenter() are used to wait on a nested lock 237 // i.e. to give up an outer lock completely and then re-enter 238 // Used when holding nested locks - lock acquisition order: lock1 then lock2 239 // 1) complete_exit lock1 - saving recursion count 240 // 2) wait on lock2 241 // 3) when notified on lock2, unlock lock2 242 // 4) reenter lock1 with original recursion count 243 // 5) lock lock2 244 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 245 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 246 TEVENT(complete_exit); 247 if (UseBiasedLocking) { 248 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 249 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 250 } 251 252 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 253 254 return monitor->complete_exit(THREAD); 255 } 256 257 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 258 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 259 TEVENT(reenter); 260 if (UseBiasedLocking) { 261 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 262 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 263 } 264 265 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 266 267 monitor->reenter(recursion, THREAD); 268 } 269 // ----------------------------------------------------------------------------- 270 // JNI locks on java objects 271 // NOTE: must use heavy weight monitor to handle jni monitor enter 272 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { // possible entry from jni enter 273 // the current locking is from JNI instead of Java code 274 TEVENT(jni_enter); 275 if (UseBiasedLocking) { 276 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 277 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 278 } 279 THREAD->set_current_pending_monitor_is_from_java(false); 280 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 281 THREAD->set_current_pending_monitor_is_from_java(true); 282 } 283 284 // NOTE: must use heavy weight monitor to handle jni monitor enter 285 bool ObjectSynchronizer::jni_try_enter(Handle obj, Thread* THREAD) { 286 if (UseBiasedLocking) { 287 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 288 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 289 } 290 291 ObjectMonitor* monitor = ObjectSynchronizer::inflate_helper(obj()); 292 return monitor->try_enter(THREAD); 293 } 294 295 296 // NOTE: must use heavy weight monitor to handle jni monitor exit 297 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 298 TEVENT(jni_exit); 299 if (UseBiasedLocking) { 300 Handle h_obj(THREAD, obj); 301 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 302 obj = h_obj(); 303 } 304 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 305 306 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 307 // If this thread has locked the object, exit the monitor. Note: can't use 308 // monitor->check(CHECK); must exit even if an exception is pending. 309 if (monitor->check(THREAD)) { 310 monitor->exit(true, THREAD); 311 } 312 } 313 314 // ----------------------------------------------------------------------------- 315 // Internal VM locks on java objects 316 // standard constructor, allows locking failures 317 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 318 _dolock = doLock; 319 _thread = thread; 320 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 321 _obj = obj; 322 323 if (_dolock) { 324 TEVENT(ObjectLocker); 325 326 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 327 } 328 } 329 330 ObjectLocker::~ObjectLocker() { 331 if (_dolock) { 332 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 333 } 334 } 335 336 337 // ----------------------------------------------------------------------------- 338 // Wait/Notify/NotifyAll 339 // NOTE: must use heavy weight monitor to handle wait() 340 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 341 if (UseBiasedLocking) { 342 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 343 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 344 } 345 if (millis < 0) { 346 TEVENT(wait - throw IAX); 347 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 348 } 349 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 350 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 351 monitor->wait(millis, true, THREAD); 352 353 // This dummy call is in place to get around dtrace bug 6254741. Once 354 // that's fixed we can uncomment the following line, remove the call 355 // and change this function back into a "void" func. 356 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 357 return dtrace_waited_probe(monitor, obj, THREAD); 358 } 359 360 void ObjectSynchronizer::waitUninterruptibly (Handle obj, jlong millis, TRAPS) { 361 if (UseBiasedLocking) { 362 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 363 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 364 } 365 if (millis < 0) { 366 TEVENT(wait - throw IAX); 367 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 368 } 369 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD); 370 } 371 372 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 373 if (UseBiasedLocking) { 374 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 375 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 376 } 377 378 markOop mark = obj->mark(); 379 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 380 return; 381 } 382 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 383 } 384 385 // NOTE: see comment of notify() 386 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 387 if (UseBiasedLocking) { 388 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 389 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 390 } 391 392 markOop mark = obj->mark(); 393 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 394 return; 395 } 396 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 397 } 398 399 // ----------------------------------------------------------------------------- 400 // Hash Code handling 401 // 402 // Performance concern: 403 // OrderAccess::storestore() calls release() which STs 0 into the global volatile 404 // OrderAccess::Dummy variable. This store is unnecessary for correctness. 405 // Many threads STing into a common location causes considerable cache migration 406 // or "sloshing" on large SMP system. As such, I avoid using OrderAccess::storestore() 407 // until it's repaired. In some cases OrderAccess::fence() -- which incurs local 408 // latency on the executing processor -- is a better choice as it scales on SMP 409 // systems. See http://blogs.sun.com/dave/entry/biased_locking_in_hotspot for a 410 // discussion of coherency costs. Note that all our current reference platforms 411 // provide strong ST-ST order, so the issue is moot on IA32, x64, and SPARC. 412 // 413 // As a general policy we use "volatile" to control compiler-based reordering 414 // and explicit fences (barriers) to control for architectural reordering performed 415 // by the CPU(s) or platform. 416 417 struct SharedGlobals { 418 // These are highly shared mostly-read variables. 419 // To avoid false-sharing they need to be the sole occupants of a $ line. 420 double padPrefix[8]; 421 volatile int stwRandom; 422 volatile int stwCycle; 423 424 // Hot RW variables -- Sequester to avoid false-sharing 425 double padSuffix[16]; 426 volatile int hcSequence; 427 double padFinal[8]; 428 }; 429 430 static SharedGlobals GVars; 431 static int MonitorScavengeThreshold = 1000000; 432 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 433 434 static markOop ReadStableMark (oop obj) { 435 markOop mark = obj->mark(); 436 if (!mark->is_being_inflated()) { 437 return mark; // normal fast-path return 438 } 439 440 int its = 0; 441 for (;;) { 442 markOop mark = obj->mark(); 443 if (!mark->is_being_inflated()) { 444 return mark; // normal fast-path return 445 } 446 447 // The object is being inflated by some other thread. 448 // The caller of ReadStableMark() must wait for inflation to complete. 449 // Avoid live-lock 450 // TODO: consider calling SafepointSynchronize::do_call_back() while 451 // spinning to see if there's a safepoint pending. If so, immediately 452 // yielding or blocking would be appropriate. Avoid spinning while 453 // there is a safepoint pending. 454 // TODO: add inflation contention performance counters. 455 // TODO: restrict the aggregate number of spinners. 456 457 ++its; 458 if (its > 10000 || !os::is_MP()) { 459 if (its & 1) { 460 os::NakedYield(); 461 TEVENT(Inflate: INFLATING - yield); 462 } else { 463 // Note that the following code attenuates the livelock problem but is not 464 // a complete remedy. A more complete solution would require that the inflating 465 // thread hold the associated inflation lock. The following code simply restricts 466 // the number of spinners to at most one. We'll have N-2 threads blocked 467 // on the inflationlock, 1 thread holding the inflation lock and using 468 // a yield/park strategy, and 1 thread in the midst of inflation. 469 // A more refined approach would be to change the encoding of INFLATING 470 // to allow encapsulation of a native thread pointer. Threads waiting for 471 // inflation to complete would use CAS to push themselves onto a singly linked 472 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 473 // and calling park(). When inflation was complete the thread that accomplished inflation 474 // would detach the list and set the markword to inflated with a single CAS and 475 // then for each thread on the list, set the flag and unpark() the thread. 476 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 477 // wakes at most one thread whereas we need to wake the entire list. 478 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 479 int YieldThenBlock = 0; 480 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 481 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 482 Thread::muxAcquire(InflationLocks + ix, "InflationLock"); 483 while (obj->mark() == markOopDesc::INFLATING()) { 484 // Beware: NakedYield() is advisory and has almost no effect on some platforms 485 // so we periodically call Self->_ParkEvent->park(1). 486 // We use a mixed spin/yield/block mechanism. 487 if ((YieldThenBlock++) >= 16) { 488 Thread::current()->_ParkEvent->park(1); 489 } else { 490 os::NakedYield(); 491 } 492 } 493 Thread::muxRelease(InflationLocks + ix); 494 TEVENT(Inflate: INFLATING - yield/park); 495 } 496 } else { 497 SpinPause(); // SMP-polite spinning 498 } 499 } 500 } 501 502 // hashCode() generation : 503 // 504 // Possibilities: 505 // * MD5Digest of {obj,stwRandom} 506 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 507 // * A DES- or AES-style SBox[] mechanism 508 // * One of the Phi-based schemes, such as: 509 // 2654435761 = 2^32 * Phi (golden ratio) 510 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 511 // * A variation of Marsaglia's shift-xor RNG scheme. 512 // * (obj ^ stwRandom) is appealing, but can result 513 // in undesirable regularity in the hashCode values of adjacent objects 514 // (objects allocated back-to-back, in particular). This could potentially 515 // result in hashtable collisions and reduced hashtable efficiency. 516 // There are simple ways to "diffuse" the middle address bits over the 517 // generated hashCode values: 518 // 519 520 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 521 intptr_t value = 0; 522 if (hashCode == 0) { 523 // This form uses an unguarded global Park-Miller RNG, 524 // so it's possible for two threads to race and generate the same RNG. 525 // On MP system we'll have lots of RW access to a global, so the 526 // mechanism induces lots of coherency traffic. 527 value = os::random(); 528 } else 529 if (hashCode == 1) { 530 // This variation has the property of being stable (idempotent) 531 // between STW operations. This can be useful in some of the 1-0 532 // synchronization schemes. 533 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 534 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 535 } else 536 if (hashCode == 2) { 537 value = 1; // for sensitivity testing 538 } else 539 if (hashCode == 3) { 540 value = ++GVars.hcSequence; 541 } else 542 if (hashCode == 4) { 543 value = cast_from_oop<intptr_t>(obj); 544 } else { 545 // Marsaglia's xor-shift scheme with thread-specific state 546 // This is probably the best overall implementation -- we'll 547 // likely make this the default in future releases. 548 unsigned t = Self->_hashStateX; 549 t ^= (t << 11); 550 Self->_hashStateX = Self->_hashStateY; 551 Self->_hashStateY = Self->_hashStateZ; 552 Self->_hashStateZ = Self->_hashStateW; 553 unsigned v = Self->_hashStateW; 554 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 555 Self->_hashStateW = v; 556 value = v; 557 } 558 559 value &= markOopDesc::hash_mask; 560 if (value == 0) value = 0xBAD; 561 assert(value != markOopDesc::no_hash, "invariant"); 562 TEVENT(hashCode: GENERATE); 563 return value; 564 } 565 // 566 intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) { 567 if (UseBiasedLocking) { 568 // NOTE: many places throughout the JVM do not expect a safepoint 569 // to be taken here, in particular most operations on perm gen 570 // objects. However, we only ever bias Java instances and all of 571 // the call sites of identity_hash that might revoke biases have 572 // been checked to make sure they can handle a safepoint. The 573 // added check of the bias pattern is to avoid useless calls to 574 // thread-local storage. 575 if (obj->mark()->has_bias_pattern()) { 576 // Box and unbox the raw reference just in case we cause a STW safepoint. 577 Handle hobj(Self, obj); 578 // Relaxing assertion for bug 6320749. 579 assert(Universe::verify_in_progress() || 580 !SafepointSynchronize::is_at_safepoint(), 581 "biases should not be seen by VM thread here"); 582 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 583 obj = hobj(); 584 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 585 } 586 } 587 588 // hashCode() is a heap mutator ... 589 // Relaxing assertion for bug 6320749. 590 assert(Universe::verify_in_progress() || 591 !SafepointSynchronize::is_at_safepoint(), "invariant"); 592 assert(Universe::verify_in_progress() || 593 Self->is_Java_thread() , "invariant"); 594 assert(Universe::verify_in_progress() || 595 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 596 597 ObjectMonitor* monitor = NULL; 598 markOop temp, test; 599 intptr_t hash; 600 markOop mark = ReadStableMark (obj); 601 602 // object should remain ineligible for biased locking 603 assert(!mark->has_bias_pattern(), "invariant"); 604 605 if (mark->is_neutral()) { 606 hash = mark->hash(); // this is a normal header 607 if (hash) { // if it has hash, just return it 608 return hash; 609 } 610 hash = get_next_hash(Self, obj); // allocate a new hash code 611 temp = mark->copy_set_hash(hash); // merge the hash code into header 612 // use (machine word version) atomic operation to install the hash 613 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 614 if (test == mark) { 615 return hash; 616 } 617 // If atomic operation failed, we must inflate the header 618 // into heavy weight monitor. We could add more code here 619 // for fast path, but it does not worth the complexity. 620 } else if (mark->has_monitor()) { 621 monitor = mark->monitor(); 622 temp = monitor->header(); 623 assert(temp->is_neutral(), "invariant"); 624 hash = temp->hash(); 625 if (hash) { 626 return hash; 627 } 628 // Skip to the following code to reduce code size 629 } else if (Self->is_lock_owned((address)mark->locker())) { 630 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 631 assert(temp->is_neutral(), "invariant"); 632 hash = temp->hash(); // by current thread, check if the displaced 633 if (hash) { // header contains hash code 634 return hash; 635 } 636 // WARNING: 637 // The displaced header is strictly immutable. 638 // It can NOT be changed in ANY cases. So we have 639 // to inflate the header into heavyweight monitor 640 // even the current thread owns the lock. The reason 641 // is the BasicLock (stack slot) will be asynchronously 642 // read by other threads during the inflate() function. 643 // Any change to stack may not propagate to other threads 644 // correctly. 645 } 646 647 // Inflate the monitor to set hash code 648 monitor = ObjectSynchronizer::inflate(Self, obj); 649 // Load displaced header and check it has hash code 650 mark = monitor->header(); 651 assert(mark->is_neutral(), "invariant"); 652 hash = mark->hash(); 653 if (hash == 0) { 654 hash = get_next_hash(Self, obj); 655 temp = mark->copy_set_hash(hash); // merge hash code into header 656 assert(temp->is_neutral(), "invariant"); 657 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 658 if (test != mark) { 659 // The only update to the header in the monitor (outside GC) 660 // is install the hash code. If someone add new usage of 661 // displaced header, please update this code 662 hash = test->hash(); 663 assert(test->is_neutral(), "invariant"); 664 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 665 } 666 } 667 // We finally get the hash 668 return hash; 669 } 670 671 // Deprecated -- use FastHashCode() instead. 672 673 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 674 return FastHashCode(Thread::current(), obj()); 675 } 676 677 678 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 679 Handle h_obj) { 680 if (UseBiasedLocking) { 681 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 682 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 683 } 684 685 assert(thread == JavaThread::current(), "Can only be called on current thread"); 686 oop obj = h_obj(); 687 688 markOop mark = ReadStableMark(obj); 689 690 // Uncontended case, header points to stack 691 if (mark->has_locker()) { 692 return thread->is_lock_owned((address)mark->locker()); 693 } 694 // Contended case, header points to ObjectMonitor (tagged pointer) 695 if (mark->has_monitor()) { 696 ObjectMonitor* monitor = mark->monitor(); 697 return monitor->is_entered(thread) != 0; 698 } 699 // Unlocked case, header in place 700 assert(mark->is_neutral(), "sanity check"); 701 return false; 702 } 703 704 // Be aware of this method could revoke bias of the lock object. 705 // This method queries the ownership of the lock handle specified by 'h_obj'. 706 // If the current thread owns the lock, it returns owner_self. If no 707 // thread owns the lock, it returns owner_none. Otherwise, it will return 708 // owner_other. 709 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 710 (JavaThread *self, Handle h_obj) { 711 // The caller must beware this method can revoke bias, and 712 // revocation can result in a safepoint. 713 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 714 assert(self->thread_state() != _thread_blocked , "invariant"); 715 716 // Possible mark states: neutral, biased, stack-locked, inflated 717 718 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 719 // CASE: biased 720 BiasedLocking::revoke_and_rebias(h_obj, false, self); 721 assert(!h_obj->mark()->has_bias_pattern(), 722 "biases should be revoked by now"); 723 } 724 725 assert(self == JavaThread::current(), "Can only be called on current thread"); 726 oop obj = h_obj(); 727 markOop mark = ReadStableMark(obj); 728 729 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 730 if (mark->has_locker()) { 731 return self->is_lock_owned((address)mark->locker()) ? 732 owner_self : owner_other; 733 } 734 735 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 736 // The Object:ObjectMonitor relationship is stable as long as we're 737 // not at a safepoint. 738 if (mark->has_monitor()) { 739 void * owner = mark->monitor()->_owner; 740 if (owner == NULL) return owner_none; 741 return (owner == self || 742 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 743 } 744 745 // CASE: neutral 746 assert(mark->is_neutral(), "sanity check"); 747 return owner_none; // it's unlocked 748 } 749 750 // FIXME: jvmti should call this 751 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 752 if (UseBiasedLocking) { 753 if (SafepointSynchronize::is_at_safepoint()) { 754 BiasedLocking::revoke_at_safepoint(h_obj); 755 } else { 756 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 757 } 758 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 759 } 760 761 oop obj = h_obj(); 762 address owner = NULL; 763 764 markOop mark = ReadStableMark(obj); 765 766 // Uncontended case, header points to stack 767 if (mark->has_locker()) { 768 owner = (address) mark->locker(); 769 } 770 771 // Contended case, header points to ObjectMonitor (tagged pointer) 772 if (mark->has_monitor()) { 773 ObjectMonitor* monitor = mark->monitor(); 774 assert(monitor != NULL, "monitor should be non-null"); 775 owner = (address) monitor->owner(); 776 } 777 778 if (owner != NULL) { 779 // owning_thread_from_monitor_owner() may also return NULL here 780 return Threads::owning_thread_from_monitor_owner(owner, doLock); 781 } 782 783 // Unlocked case, header in place 784 // Cannot have assertion since this object may have been 785 // locked by another thread when reaching here. 786 // assert(mark->is_neutral(), "sanity check"); 787 788 return NULL; 789 } 790 // Visitors ... 791 792 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 793 ObjectMonitor* block = gBlockList; 794 ObjectMonitor* mid; 795 while (block) { 796 assert(block->object() == CHAINMARKER, "must be a block header"); 797 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 798 mid = block + i; 799 oop object = (oop) mid->object(); 800 if (object != NULL) { 801 closure->do_monitor(mid); 802 } 803 } 804 block = (ObjectMonitor*) block->FreeNext; 805 } 806 } 807 808 // Get the next block in the block list. 809 static inline ObjectMonitor* next(ObjectMonitor* block) { 810 assert(block->object() == CHAINMARKER, "must be a block header"); 811 block = block->FreeNext; 812 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 813 return block; 814 } 815 816 817 void ObjectSynchronizer::oops_do(OopClosure* f) { 818 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 819 for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { 820 assert(block->object() == CHAINMARKER, "must be a block header"); 821 for (int i = 1; i < _BLOCKSIZE; i++) { 822 ObjectMonitor* mid = &block[i]; 823 if (mid->object() != NULL) { 824 f->do_oop((oop*)mid->object_addr()); 825 } 826 } 827 } 828 } 829 830 831 // ----------------------------------------------------------------------------- 832 // ObjectMonitor Lifecycle 833 // ----------------------- 834 // Inflation unlinks monitors from the global gFreeList and 835 // associates them with objects. Deflation -- which occurs at 836 // STW-time -- disassociates idle monitors from objects. Such 837 // scavenged monitors are returned to the gFreeList. 838 // 839 // The global list is protected by ListLock. All the critical sections 840 // are short and operate in constant-time. 841 // 842 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 843 // 844 // Lifecycle: 845 // -- unassigned and on the global free list 846 // -- unassigned and on a thread's private omFreeList 847 // -- assigned to an object. The object is inflated and the mark refers 848 // to the objectmonitor. 849 // 850 851 852 // Constraining monitor pool growth via MonitorBound ... 853 // 854 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 855 // the rate of scavenging is driven primarily by GC. As such, we can find 856 // an inordinate number of monitors in circulation. 857 // To avoid that scenario we can artificially induce a STW safepoint 858 // if the pool appears to be growing past some reasonable bound. 859 // Generally we favor time in space-time tradeoffs, but as there's no 860 // natural back-pressure on the # of extant monitors we need to impose some 861 // type of limit. Beware that if MonitorBound is set to too low a value 862 // we could just loop. In addition, if MonitorBound is set to a low value 863 // we'll incur more safepoints, which are harmful to performance. 864 // See also: GuaranteedSafepointInterval 865 // 866 // The current implementation uses asynchronous VM operations. 867 // 868 869 static void InduceScavenge (Thread * Self, const char * Whence) { 870 // Induce STW safepoint to trim monitors 871 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 872 // More precisely, trigger an asynchronous STW safepoint as the number 873 // of active monitors passes the specified threshold. 874 // TODO: assert thread state is reasonable 875 876 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 877 if (ObjectMonitor::Knob_Verbose) { 878 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; 879 ::fflush(stdout); 880 } 881 // Induce a 'null' safepoint to scavenge monitors 882 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 883 // to the VMthread and have a lifespan longer than that of this activation record. 884 // The VMThread will delete the op when completed. 885 VMThread::execute(new VM_ForceAsyncSafepoint()); 886 887 if (ObjectMonitor::Knob_Verbose) { 888 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 889 ::fflush(stdout); 890 } 891 } 892 } 893 /* Too slow for general assert or debug 894 void ObjectSynchronizer::verifyInUse (Thread *Self) { 895 ObjectMonitor* mid; 896 int inusetally = 0; 897 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 898 inusetally ++; 899 } 900 assert(inusetally == Self->omInUseCount, "inuse count off"); 901 902 int freetally = 0; 903 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 904 freetally ++; 905 } 906 assert(freetally == Self->omFreeCount, "free count off"); 907 } 908 */ 909 ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) { 910 // A large MAXPRIVATE value reduces both list lock contention 911 // and list coherency traffic, but also tends to increase the 912 // number of objectMonitors in circulation as well as the STW 913 // scavenge costs. As usual, we lean toward time in space-time 914 // tradeoffs. 915 const int MAXPRIVATE = 1024; 916 for (;;) { 917 ObjectMonitor * m; 918 919 // 1: try to allocate from the thread's local omFreeList. 920 // Threads will attempt to allocate first from their local list, then 921 // from the global list, and only after those attempts fail will the thread 922 // attempt to instantiate new monitors. Thread-local free lists take 923 // heat off the ListLock and improve allocation latency, as well as reducing 924 // coherency traffic on the shared global list. 925 m = Self->omFreeList; 926 if (m != NULL) { 927 Self->omFreeList = m->FreeNext; 928 Self->omFreeCount--; 929 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 930 guarantee(m->object() == NULL, "invariant"); 931 if (MonitorInUseLists) { 932 m->FreeNext = Self->omInUseList; 933 Self->omInUseList = m; 934 Self->omInUseCount++; 935 // verifyInUse(Self); 936 } else { 937 m->FreeNext = NULL; 938 } 939 return m; 940 } 941 942 // 2: try to allocate from the global gFreeList 943 // CONSIDER: use muxTry() instead of muxAcquire(). 944 // If the muxTry() fails then drop immediately into case 3. 945 // If we're using thread-local free lists then try 946 // to reprovision the caller's free list. 947 if (gFreeList != NULL) { 948 // Reprovision the thread's omFreeList. 949 // Use bulk transfers to reduce the allocation rate and heat 950 // on various locks. 951 Thread::muxAcquire(&ListLock, "omAlloc"); 952 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 953 MonitorFreeCount--; 954 ObjectMonitor * take = gFreeList; 955 gFreeList = take->FreeNext; 956 guarantee(take->object() == NULL, "invariant"); 957 guarantee(!take->is_busy(), "invariant"); 958 take->Recycle(); 959 omRelease(Self, take, false); 960 } 961 Thread::muxRelease(&ListLock); 962 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 963 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 964 TEVENT(omFirst - reprovision); 965 966 const int mx = MonitorBound; 967 if (mx > 0 && (MonitorPopulation-MonitorFreeCount) > mx) { 968 // We can't safely induce a STW safepoint from omAlloc() as our thread 969 // state may not be appropriate for such activities and callers may hold 970 // naked oops, so instead we defer the action. 971 InduceScavenge(Self, "omAlloc"); 972 } 973 continue; 974 } 975 976 // 3: allocate a block of new ObjectMonitors 977 // Both the local and global free lists are empty -- resort to malloc(). 978 // In the current implementation objectMonitors are TSM - immortal. 979 assert(_BLOCKSIZE > 1, "invariant"); 980 ObjectMonitor * temp = new ObjectMonitor[_BLOCKSIZE]; 981 982 // NOTE: (almost) no way to recover if allocation failed. 983 // We might be able to induce a STW safepoint and scavenge enough 984 // objectMonitors to permit progress. 985 if (temp == NULL) { 986 vm_exit_out_of_memory(sizeof (ObjectMonitor[_BLOCKSIZE]), OOM_MALLOC_ERROR, 987 "Allocate ObjectMonitors"); 988 } 989 990 // Format the block. 991 // initialize the linked list, each monitor points to its next 992 // forming the single linked free list, the very first monitor 993 // will points to next block, which forms the block list. 994 // The trick of using the 1st element in the block as gBlockList 995 // linkage should be reconsidered. A better implementation would 996 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 997 998 for (int i = 1; i < _BLOCKSIZE; i++) { 999 temp[i].FreeNext = &temp[i+1]; 1000 } 1001 1002 // terminate the last monitor as the end of list 1003 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1004 1005 // Element [0] is reserved for global list linkage 1006 temp[0].set_object(CHAINMARKER); 1007 1008 // Consider carving out this thread's current request from the 1009 // block in hand. This avoids some lock traffic and redundant 1010 // list activity. 1011 1012 // Acquire the ListLock to manipulate BlockList and FreeList. 1013 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1014 Thread::muxAcquire(&ListLock, "omAlloc [2]"); 1015 MonitorPopulation += _BLOCKSIZE-1; 1016 MonitorFreeCount += _BLOCKSIZE-1; 1017 1018 // Add the new block to the list of extant blocks (gBlockList). 1019 // The very first objectMonitor in a block is reserved and dedicated. 1020 // It serves as blocklist "next" linkage. 1021 temp[0].FreeNext = gBlockList; 1022 gBlockList = temp; 1023 1024 // Add the new string of objectMonitors to the global free list 1025 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1026 gFreeList = temp + 1; 1027 Thread::muxRelease(&ListLock); 1028 TEVENT(Allocate block of monitors); 1029 } 1030 } 1031 1032 // Place "m" on the caller's private per-thread omFreeList. 1033 // In practice there's no need to clamp or limit the number of 1034 // monitors on a thread's omFreeList as the only time we'll call 1035 // omRelease is to return a monitor to the free list after a CAS 1036 // attempt failed. This doesn't allow unbounded #s of monitors to 1037 // accumulate on a thread's free list. 1038 // 1039 1040 void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromPerThreadAlloc) { 1041 guarantee(m->object() == NULL, "invariant"); 1042 1043 // Remove from omInUseList 1044 if (MonitorInUseLists && fromPerThreadAlloc) { 1045 ObjectMonitor* curmidinuse = NULL; 1046 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL;) { 1047 if (m == mid) { 1048 // extract from per-thread in-use-list 1049 if (mid == Self->omInUseList) { 1050 Self->omInUseList = mid->FreeNext; 1051 } else if (curmidinuse != NULL) { 1052 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1053 } 1054 Self->omInUseCount--; 1055 // verifyInUse(Self); 1056 break; 1057 } else { 1058 curmidinuse = mid; 1059 mid = mid->FreeNext; 1060 } 1061 } 1062 } 1063 1064 // FreeNext is used for both onInUseList and omFreeList, so clear old before setting new 1065 m->FreeNext = Self->omFreeList; 1066 Self->omFreeList = m; 1067 Self->omFreeCount++; 1068 } 1069 1070 // Return the monitors of a moribund thread's local free list to 1071 // the global free list. Typically a thread calls omFlush() when 1072 // it's dying. We could also consider having the VM thread steal 1073 // monitors from threads that have not run java code over a few 1074 // consecutive STW safepoints. Relatedly, we might decay 1075 // omFreeProvision at STW safepoints. 1076 // 1077 // Also return the monitors of a moribund thread"s omInUseList to 1078 // a global gOmInUseList under the global list lock so these 1079 // will continue to be scanned. 1080 // 1081 // We currently call omFlush() from the Thread:: dtor _after the thread 1082 // has been excised from the thread list and is no longer a mutator. 1083 // That means that omFlush() can run concurrently with a safepoint and 1084 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1085 // be a better choice as we could safely reason that that the JVM is 1086 // not at a safepoint at the time of the call, and thus there could 1087 // be not inopportune interleavings between omFlush() and the scavenge 1088 // operator. 1089 1090 void ObjectSynchronizer::omFlush (Thread * Self) { 1091 ObjectMonitor * List = Self->omFreeList; // Null-terminated SLL 1092 Self->omFreeList = NULL; 1093 ObjectMonitor * Tail = NULL; 1094 int Tally = 0; 1095 if (List != NULL) { 1096 ObjectMonitor * s; 1097 for (s = List; s != NULL; s = s->FreeNext) { 1098 Tally++; 1099 Tail = s; 1100 guarantee(s->object() == NULL, "invariant"); 1101 guarantee(!s->is_busy(), "invariant"); 1102 s->set_owner(NULL); // redundant but good hygiene 1103 TEVENT(omFlush - Move one); 1104 } 1105 guarantee(Tail != NULL && List != NULL, "invariant"); 1106 } 1107 1108 ObjectMonitor * InUseList = Self->omInUseList; 1109 ObjectMonitor * InUseTail = NULL; 1110 int InUseTally = 0; 1111 if (InUseList != NULL) { 1112 Self->omInUseList = NULL; 1113 ObjectMonitor *curom; 1114 for (curom = InUseList; curom != NULL; curom = curom->FreeNext) { 1115 InUseTail = curom; 1116 InUseTally++; 1117 } 1118 // TODO debug 1119 assert(Self->omInUseCount == InUseTally, "inuse count off"); 1120 Self->omInUseCount = 0; 1121 guarantee(InUseTail != NULL && InUseList != NULL, "invariant"); 1122 } 1123 1124 Thread::muxAcquire(&ListLock, "omFlush"); 1125 if (Tail != NULL) { 1126 Tail->FreeNext = gFreeList; 1127 gFreeList = List; 1128 MonitorFreeCount += Tally; 1129 } 1130 1131 if (InUseTail != NULL) { 1132 InUseTail->FreeNext = gOmInUseList; 1133 gOmInUseList = InUseList; 1134 gOmInUseCount += InUseTally; 1135 } 1136 1137 Thread::muxRelease(&ListLock); 1138 TEVENT(omFlush); 1139 } 1140 1141 // Fast path code shared by multiple functions 1142 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1143 markOop mark = obj->mark(); 1144 if (mark->has_monitor()) { 1145 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1146 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1147 return mark->monitor(); 1148 } 1149 return ObjectSynchronizer::inflate(Thread::current(), obj); 1150 } 1151 1152 1153 // Note that we could encounter some performance loss through false-sharing as 1154 // multiple locks occupy the same $ line. Padding might be appropriate. 1155 1156 1157 ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) { 1158 // Inflate mutates the heap ... 1159 // Relaxing assertion for bug 6320749. 1160 assert(Universe::verify_in_progress() || 1161 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1162 1163 for (;;) { 1164 const markOop mark = object->mark(); 1165 assert(!mark->has_bias_pattern(), "invariant"); 1166 1167 // The mark can be in one of the following states: 1168 // * Inflated - just return 1169 // * Stack-locked - coerce it to inflated 1170 // * INFLATING - busy wait for conversion to complete 1171 // * Neutral - aggressively inflate the object. 1172 // * BIASED - Illegal. We should never see this 1173 1174 // CASE: inflated 1175 if (mark->has_monitor()) { 1176 ObjectMonitor * inf = mark->monitor(); 1177 assert(inf->header()->is_neutral(), "invariant"); 1178 assert(inf->object() == object, "invariant"); 1179 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1180 return inf; 1181 } 1182 1183 // CASE: inflation in progress - inflating over a stack-lock. 1184 // Some other thread is converting from stack-locked to inflated. 1185 // Only that thread can complete inflation -- other threads must wait. 1186 // The INFLATING value is transient. 1187 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1188 // We could always eliminate polling by parking the thread on some auxiliary list. 1189 if (mark == markOopDesc::INFLATING()) { 1190 TEVENT(Inflate: spin while INFLATING); 1191 ReadStableMark(object); 1192 continue; 1193 } 1194 1195 // CASE: stack-locked 1196 // Could be stack-locked either by this thread or by some other thread. 1197 // 1198 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1199 // to install INFLATING into the mark word. We originally installed INFLATING, 1200 // allocated the objectmonitor, and then finally STed the address of the 1201 // objectmonitor into the mark. This was correct, but artificially lengthened 1202 // the interval in which INFLATED appeared in the mark, thus increasing 1203 // the odds of inflation contention. 1204 // 1205 // We now use per-thread private objectmonitor free lists. 1206 // These list are reprovisioned from the global free list outside the 1207 // critical INFLATING...ST interval. A thread can transfer 1208 // multiple objectmonitors en-mass from the global free list to its local free list. 1209 // This reduces coherency traffic and lock contention on the global free list. 1210 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1211 // before or after the CAS(INFLATING) operation. 1212 // See the comments in omAlloc(). 1213 1214 if (mark->has_locker()) { 1215 ObjectMonitor * m = omAlloc(Self); 1216 // Optimistically prepare the objectmonitor - anticipate successful CAS 1217 // We do this before the CAS in order to minimize the length of time 1218 // in which INFLATING appears in the mark. 1219 m->Recycle(); 1220 m->_Responsible = NULL; 1221 m->OwnerIsThread = 0; 1222 m->_recursions = 0; 1223 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1224 1225 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1226 if (cmp != mark) { 1227 omRelease(Self, m, true); 1228 continue; // Interference -- just retry 1229 } 1230 1231 // We've successfully installed INFLATING (0) into the mark-word. 1232 // This is the only case where 0 will appear in a mark-work. 1233 // Only the singular thread that successfully swings the mark-word 1234 // to 0 can perform (or more precisely, complete) inflation. 1235 // 1236 // Why do we CAS a 0 into the mark-word instead of just CASing the 1237 // mark-word from the stack-locked value directly to the new inflated state? 1238 // Consider what happens when a thread unlocks a stack-locked object. 1239 // It attempts to use CAS to swing the displaced header value from the 1240 // on-stack basiclock back into the object header. Recall also that the 1241 // header value (hashcode, etc) can reside in (a) the object header, or 1242 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1243 // header in an objectMonitor. The inflate() routine must copy the header 1244 // value from the basiclock on the owner's stack to the objectMonitor, all 1245 // the while preserving the hashCode stability invariants. If the owner 1246 // decides to release the lock while the value is 0, the unlock will fail 1247 // and control will eventually pass from slow_exit() to inflate. The owner 1248 // will then spin, waiting for the 0 value to disappear. Put another way, 1249 // the 0 causes the owner to stall if the owner happens to try to 1250 // drop the lock (restoring the header from the basiclock to the object) 1251 // while inflation is in-progress. This protocol avoids races that might 1252 // would otherwise permit hashCode values to change or "flicker" for an object. 1253 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1254 // 0 serves as a "BUSY" inflate-in-progress indicator. 1255 1256 1257 // fetch the displaced mark from the owner's stack. 1258 // The owner can't die or unwind past the lock while our INFLATING 1259 // object is in the mark. Furthermore the owner can't complete 1260 // an unlock on the object, either. 1261 markOop dmw = mark->displaced_mark_helper(); 1262 assert(dmw->is_neutral(), "invariant"); 1263 1264 // Setup monitor fields to proper values -- prepare the monitor 1265 m->set_header(dmw); 1266 1267 // Optimization: if the mark->locker stack address is associated 1268 // with this thread we could simply set m->_owner = Self and 1269 // m->OwnerIsThread = 1. Note that a thread can inflate an object 1270 // that it has stack-locked -- as might happen in wait() -- directly 1271 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1272 m->set_owner(mark->locker()); 1273 m->set_object(object); 1274 // TODO-FIXME: assert BasicLock->dhw != 0. 1275 1276 // Must preserve store ordering. The monitor state must 1277 // be stable at the time of publishing the monitor address. 1278 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1279 object->release_set_mark(markOopDesc::encode(m)); 1280 1281 // Hopefully the performance counters are allocated on distinct cache lines 1282 // to avoid false sharing on MP systems ... 1283 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1284 TEVENT(Inflate: overwrite stacklock); 1285 if (TraceMonitorInflation) { 1286 if (object->is_instance()) { 1287 ResourceMark rm; 1288 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1289 (void *) object, (intptr_t) object->mark(), 1290 object->klass()->external_name()); 1291 } 1292 } 1293 return m; 1294 } 1295 1296 // CASE: neutral 1297 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1298 // If we know we're inflating for entry it's better to inflate by swinging a 1299 // pre-locked objectMonitor pointer into the object header. A successful 1300 // CAS inflates the object *and* confers ownership to the inflating thread. 1301 // In the current implementation we use a 2-step mechanism where we CAS() 1302 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1303 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1304 // would be useful. 1305 1306 assert(mark->is_neutral(), "invariant"); 1307 ObjectMonitor * m = omAlloc(Self); 1308 // prepare m for installation - set monitor to initial state 1309 m->Recycle(); 1310 m->set_header(mark); 1311 m->set_owner(NULL); 1312 m->set_object(object); 1313 m->OwnerIsThread = 1; 1314 m->_recursions = 0; 1315 m->_Responsible = NULL; 1316 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1317 1318 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1319 m->set_object(NULL); 1320 m->set_owner(NULL); 1321 m->OwnerIsThread = 0; 1322 m->Recycle(); 1323 omRelease(Self, m, true); 1324 m = NULL; 1325 continue; 1326 // interference - the markword changed - just retry. 1327 // The state-transitions are one-way, so there's no chance of 1328 // live-lock -- "Inflated" is an absorbing state. 1329 } 1330 1331 // Hopefully the performance counters are allocated on distinct 1332 // cache lines to avoid false sharing on MP systems ... 1333 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1334 TEVENT(Inflate: overwrite neutral); 1335 if (TraceMonitorInflation) { 1336 if (object->is_instance()) { 1337 ResourceMark rm; 1338 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1339 (void *) object, (intptr_t) object->mark(), 1340 object->klass()->external_name()); 1341 } 1342 } 1343 return m; 1344 } 1345 } 1346 1347 // Note that we could encounter some performance loss through false-sharing as 1348 // multiple locks occupy the same $ line. Padding might be appropriate. 1349 1350 1351 // Deflate_idle_monitors() is called at all safepoints, immediately 1352 // after all mutators are stopped, but before any objects have moved. 1353 // It traverses the list of known monitors, deflating where possible. 1354 // The scavenged monitor are returned to the monitor free list. 1355 // 1356 // Beware that we scavenge at *every* stop-the-world point. 1357 // Having a large number of monitors in-circulation negatively 1358 // impacts the performance of some applications (e.g., PointBase). 1359 // Broadly, we want to minimize the # of monitors in circulation. 1360 // 1361 // We have added a flag, MonitorInUseLists, which creates a list 1362 // of active monitors for each thread. deflate_idle_monitors() 1363 // only scans the per-thread inuse lists. omAlloc() puts all 1364 // assigned monitors on the per-thread list. deflate_idle_monitors() 1365 // returns the non-busy monitors to the global free list. 1366 // When a thread dies, omFlush() adds the list of active monitors for 1367 // that thread to a global gOmInUseList acquiring the 1368 // global list lock. deflate_idle_monitors() acquires the global 1369 // list lock to scan for non-busy monitors to the global free list. 1370 // An alternative could have used a single global inuse list. The 1371 // downside would have been the additional cost of acquiring the global list lock 1372 // for every omAlloc(). 1373 // 1374 // Perversely, the heap size -- and thus the STW safepoint rate -- 1375 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1376 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1377 // This is an unfortunate aspect of this design. 1378 // 1379 1380 enum ManifestConstants { 1381 ClearResponsibleAtSTW = 0, 1382 MaximumRecheckInterval = 1000 1383 }; 1384 1385 // Deflate a single monitor if not in use 1386 // Return true if deflated, false if in use 1387 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1388 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { 1389 bool deflated; 1390 // Normal case ... The monitor is associated with obj. 1391 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1392 guarantee(mid == obj->mark()->monitor(), "invariant"); 1393 guarantee(mid->header()->is_neutral(), "invariant"); 1394 1395 if (mid->is_busy()) { 1396 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1397 deflated = false; 1398 } else { 1399 // Deflate the monitor if it is no longer being used 1400 // It's idle - scavenge and return to the global free list 1401 // plain old deflation ... 1402 TEVENT(deflate_idle_monitors - scavenge1); 1403 if (TraceMonitorInflation) { 1404 if (obj->is_instance()) { 1405 ResourceMark rm; 1406 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1407 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); 1408 } 1409 } 1410 1411 // Restore the header back to obj 1412 obj->release_set_mark(mid->header()); 1413 mid->clear(); 1414 1415 assert(mid->object() == NULL, "invariant"); 1416 1417 // Move the object to the working free list defined by FreeHead,FreeTail. 1418 if (*FreeHeadp == NULL) *FreeHeadp = mid; 1419 if (*FreeTailp != NULL) { 1420 ObjectMonitor * prevtail = *FreeTailp; 1421 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK 1422 prevtail->FreeNext = mid; 1423 } 1424 *FreeTailp = mid; 1425 deflated = true; 1426 } 1427 return deflated; 1428 } 1429 1430 // Caller acquires ListLock 1431 int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp, 1432 ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) { 1433 ObjectMonitor* mid; 1434 ObjectMonitor* next; 1435 ObjectMonitor* curmidinuse = NULL; 1436 int deflatedcount = 0; 1437 1438 for (mid = *listheadp; mid != NULL;) { 1439 oop obj = (oop) mid->object(); 1440 bool deflated = false; 1441 if (obj != NULL) { 1442 deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp); 1443 } 1444 if (deflated) { 1445 // extract from per-thread in-use-list 1446 if (mid == *listheadp) { 1447 *listheadp = mid->FreeNext; 1448 } else if (curmidinuse != NULL) { 1449 curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist 1450 } 1451 next = mid->FreeNext; 1452 mid->FreeNext = NULL; // This mid is current tail in the FreeHead list 1453 mid = next; 1454 deflatedcount++; 1455 } else { 1456 curmidinuse = mid; 1457 mid = mid->FreeNext; 1458 } 1459 } 1460 return deflatedcount; 1461 } 1462 1463 void ObjectSynchronizer::deflate_idle_monitors() { 1464 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1465 int nInuse = 0; // currently associated with objects 1466 int nInCirculation = 0; // extant 1467 int nScavenged = 0; // reclaimed 1468 bool deflated = false; 1469 1470 ObjectMonitor * FreeHead = NULL; // Local SLL of scavenged monitors 1471 ObjectMonitor * FreeTail = NULL; 1472 1473 TEVENT(deflate_idle_monitors); 1474 // Prevent omFlush from changing mids in Thread dtor's during deflation 1475 // And in case the vm thread is acquiring a lock during a safepoint 1476 // See e.g. 6320749 1477 Thread::muxAcquire(&ListLock, "scavenge - return"); 1478 1479 if (MonitorInUseLists) { 1480 int inUse = 0; 1481 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1482 nInCirculation+= cur->omInUseCount; 1483 int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail); 1484 cur->omInUseCount-= deflatedcount; 1485 // verifyInUse(cur); 1486 nScavenged += deflatedcount; 1487 nInuse += cur->omInUseCount; 1488 } 1489 1490 // For moribund threads, scan gOmInUseList 1491 if (gOmInUseList) { 1492 nInCirculation += gOmInUseCount; 1493 int deflatedcount = walk_monitor_list((ObjectMonitor **)&gOmInUseList, &FreeHead, &FreeTail); 1494 gOmInUseCount-= deflatedcount; 1495 nScavenged += deflatedcount; 1496 nInuse += gOmInUseCount; 1497 } 1498 1499 } else for (ObjectMonitor* block = gBlockList; block != NULL; block = next(block)) { 1500 // Iterate over all extant monitors - Scavenge all idle monitors. 1501 assert(block->object() == CHAINMARKER, "must be a block header"); 1502 nInCirculation += _BLOCKSIZE; 1503 for (int i = 1; i < _BLOCKSIZE; i++) { 1504 ObjectMonitor* mid = &block[i]; 1505 oop obj = (oop) mid->object(); 1506 1507 if (obj == NULL) { 1508 // The monitor is not associated with an object. 1509 // The monitor should either be a thread-specific private 1510 // free list or the global free list. 1511 // obj == NULL IMPLIES mid->is_busy() == 0 1512 guarantee(!mid->is_busy(), "invariant"); 1513 continue; 1514 } 1515 deflated = deflate_monitor(mid, obj, &FreeHead, &FreeTail); 1516 1517 if (deflated) { 1518 mid->FreeNext = NULL; 1519 nScavenged++; 1520 } else { 1521 nInuse++; 1522 } 1523 } 1524 } 1525 1526 MonitorFreeCount += nScavenged; 1527 1528 // Consider: audit gFreeList to ensure that MonitorFreeCount and list agree. 1529 1530 if (ObjectMonitor::Knob_Verbose) { 1531 ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", 1532 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1533 MonitorPopulation, MonitorFreeCount); 1534 ::fflush(stdout); 1535 } 1536 1537 ForceMonitorScavenge = 0; // Reset 1538 1539 // Move the scavenged monitors back to the global free list. 1540 if (FreeHead != NULL) { 1541 guarantee(FreeTail != NULL && nScavenged > 0, "invariant"); 1542 assert(FreeTail->FreeNext == NULL, "invariant"); 1543 // constant-time list splice - prepend scavenged segment to gFreeList 1544 FreeTail->FreeNext = gFreeList; 1545 gFreeList = FreeHead; 1546 } 1547 Thread::muxRelease(&ListLock); 1548 1549 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged); 1550 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); 1551 1552 // TODO: Add objectMonitor leak detection. 1553 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1554 GVars.stwRandom = os::random(); 1555 GVars.stwCycle++; 1556 } 1557 1558 // Monitor cleanup on JavaThread::exit 1559 1560 // Iterate through monitor cache and attempt to release thread's monitors 1561 // Gives up on a particular monitor if an exception occurs, but continues 1562 // the overall iteration, swallowing the exception. 1563 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1564 private: 1565 TRAPS; 1566 1567 public: 1568 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1569 void do_monitor(ObjectMonitor* mid) { 1570 if (mid->owner() == THREAD) { 1571 (void)mid->complete_exit(CHECK); 1572 } 1573 } 1574 }; 1575 1576 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1577 // ignored. This is meant to be called during JNI thread detach which assumes 1578 // all remaining monitors are heavyweight. All exceptions are swallowed. 1579 // Scanning the extant monitor list can be time consuming. 1580 // A simple optimization is to add a per-thread flag that indicates a thread 1581 // called jni_monitorenter() during its lifetime. 1582 // 1583 // Instead of No_Savepoint_Verifier it might be cheaper to 1584 // use an idiom of the form: 1585 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1586 // <code that must not run at safepoint> 1587 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1588 // Since the tests are extremely cheap we could leave them enabled 1589 // for normal product builds. 1590 1591 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1592 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1593 No_Safepoint_Verifier nsv; 1594 ReleaseJavaMonitorsClosure rjmc(THREAD); 1595 Thread::muxAcquire(&ListLock, "release_monitors_owned_by_thread"); 1596 ObjectSynchronizer::monitors_iterate(&rjmc); 1597 Thread::muxRelease(&ListLock); 1598 THREAD->clear_pending_exception(); 1599 } 1600 1601 //------------------------------------------------------------------------------ 1602 // Non-product code 1603 1604 #ifndef PRODUCT 1605 1606 // Verify all monitors in the monitor cache, the verification is weak. 1607 void ObjectSynchronizer::verify() { 1608 ObjectMonitor* block = gBlockList; 1609 ObjectMonitor* mid; 1610 while (block) { 1611 assert(block->object() == CHAINMARKER, "must be a block header"); 1612 for (int i = 1; i < _BLOCKSIZE; i++) { 1613 mid = block + i; 1614 oop object = (oop) mid->object(); 1615 if (object != NULL) { 1616 mid->verify(); 1617 } 1618 } 1619 block = (ObjectMonitor*) block->FreeNext; 1620 } 1621 } 1622 1623 // Check if monitor belongs to the monitor cache 1624 // The list is grow-only so it's *relatively* safe to traverse 1625 // the list of extant blocks without taking a lock. 1626 1627 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1628 ObjectMonitor* block = gBlockList; 1629 1630 while (block) { 1631 assert(block->object() == CHAINMARKER, "must be a block header"); 1632 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 1633 address mon = (address) monitor; 1634 address blk = (address) block; 1635 size_t diff = mon - blk; 1636 assert((diff % sizeof(ObjectMonitor)) == 0, "check"); 1637 return 1; 1638 } 1639 block = (ObjectMonitor*) block->FreeNext; 1640 } 1641 return 0; 1642 } 1643 1644 #endif