1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "memory/metaspaceShared.hpp" 28 #include "memory/padded.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/atomic.inline.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/handles.inline.hpp" 35 #include "runtime/interfaceSupport.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/objectMonitor.hpp" 38 #include "runtime/objectMonitor.inline.hpp" 39 #include "runtime/osThread.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/synchronizer.hpp" 42 #include "runtime/thread.inline.hpp" 43 #include "utilities/dtrace.hpp" 44 #include "utilities/events.hpp" 45 #include "utilities/preserveException.hpp" 46 47 #if defined(__GNUC__) && !defined(PPC64) 48 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 49 #define NOINLINE __attribute__((noinline)) 50 #else 51 #define NOINLINE 52 #endif 53 54 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 55 56 // The "core" versions of monitor enter and exit reside in this file. 57 // The interpreter and compilers contain specialized transliterated 58 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 59 // for instance. If you make changes here, make sure to modify the 60 // interpreter, and both C1 and C2 fast-path inline locking code emission. 61 // 62 // ----------------------------------------------------------------------------- 63 64 #ifdef DTRACE_ENABLED 65 66 // Only bother with this argument setup if dtrace is available 67 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 68 69 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 70 char* bytes = NULL; \ 71 int len = 0; \ 72 jlong jtid = SharedRuntime::get_java_tid(thread); \ 73 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 74 if (klassname != NULL) { \ 75 bytes = (char*)klassname->bytes(); \ 76 len = klassname->utf8_length(); \ 77 } 78 79 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 80 { \ 81 if (DTraceMonitorProbes) { \ 82 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 83 HOTSPOT_MONITOR_WAIT(jtid, \ 84 (uintptr_t)(monitor), bytes, len, (millis)); \ 85 } \ 86 } 87 88 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 89 90 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 91 { \ 92 if (DTraceMonitorProbes) { \ 93 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 94 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 95 (uintptr_t)(monitor), bytes, len); \ 96 } \ 97 } 98 99 #else // ndef DTRACE_ENABLED 100 101 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 102 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 103 104 #endif // ndef DTRACE_ENABLED 105 106 // This exists only as a workaround of dtrace bug 6254741 107 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 108 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 109 return 0; 110 } 111 112 #define NINFLATIONLOCKS 256 113 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 114 115 // global list of blocks of monitors 116 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 117 // want to expose the PaddedEnd template more than necessary. 118 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL; 119 // global monitor free list 120 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 121 // global monitor in-use list, for moribund threads, 122 // monitors they inflated need to be scanned for deflation 123 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 124 // count of entries in gOmInUseList 125 int ObjectSynchronizer::gOmInUseCount = 0; 126 127 static volatile intptr_t gListLock = 0; // protects global monitor lists 128 static volatile int gMonitorFreeCount = 0; // # on gFreeList 129 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 130 131 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 132 133 134 // =====================> Quick functions 135 136 // The quick_* forms are special fast-path variants used to improve 137 // performance. In the simplest case, a "quick_*" implementation could 138 // simply return false, in which case the caller will perform the necessary 139 // state transitions and call the slow-path form. 140 // The fast-path is designed to handle frequently arising cases in an efficient 141 // manner and is just a degenerate "optimistic" variant of the slow-path. 142 // returns true -- to indicate the call was satisfied. 143 // returns false -- to indicate the call needs the services of the slow-path. 144 // A no-loitering ordinance is in effect for code in the quick_* family 145 // operators: safepoints or indefinite blocking (blocking that might span a 146 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 147 // entry. 148 149 // The LockNode emitted directly at the synchronization site would have 150 // been too big if it were to have included support for the cases of inflated 151 // recursive enter and exit, so they go here instead. 152 // Note that we can't safely call AsyncPrintJavaStack() from within 153 // quick_enter() as our thread state remains _in_Java. 154 155 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 156 BasicLock * Lock) { 157 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 158 assert(Self->is_Java_thread(), "invariant"); 159 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 160 No_Safepoint_Verifier nsv; 161 if (obj == NULL) return false; // Need to throw NPE 162 const markOop mark = obj->mark(); 163 164 if (mark->has_monitor()) { 165 ObjectMonitor * const m = mark->monitor(); 166 assert(m->object() == obj, "invariant"); 167 Thread * const owner = (Thread *) m->_owner; 168 169 // Lock contention and Transactional Lock Elision (TLE) diagnostics 170 // and observability 171 // Case: light contention possibly amenable to TLE 172 // Case: TLE inimical operations such as nested/recursive synchronization 173 174 if (owner == Self) { 175 m->_recursions++; 176 return true; 177 } 178 179 if (owner == NULL && 180 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 181 assert(m->_recursions == 0, "invariant"); 182 assert(m->_owner == Self, "invariant"); 183 return true; 184 } 185 } 186 187 // Note that we could inflate in quick_enter. 188 // This is likely a useful optimization 189 // Critically, in quick_enter() we must not: 190 // -- perform bias revocation, or 191 // -- block indefinitely, or 192 // -- reach a safepoint 193 194 return false; // revert to slow-path 195 } 196 197 // ----------------------------------------------------------------------------- 198 // Fast Monitor Enter/Exit 199 // This the fast monitor enter. The interpreter and compiler use 200 // some assembly copies of this code. Make sure update those code 201 // if the following function is changed. The implementation is 202 // extremely sensitive to race condition. Be careful. 203 204 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 205 bool attempt_rebias, TRAPS) { 206 if (UseBiasedLocking) { 207 if (!SafepointSynchronize::is_at_safepoint()) { 208 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 209 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 210 return; 211 } 212 } else { 213 assert(!attempt_rebias, "can not rebias toward VM thread"); 214 BiasedLocking::revoke_at_safepoint(obj); 215 } 216 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 217 } 218 219 slow_enter(obj, lock, THREAD); 220 } 221 222 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 223 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 224 // if displaced header is null, the previous enter is recursive enter, no-op 225 markOop dhw = lock->displaced_header(); 226 markOop mark; 227 if (dhw == NULL) { 228 // Recursive stack-lock. 229 // Diagnostics -- Could be: stack-locked, inflating, inflated. 230 mark = object->mark(); 231 assert(!mark->is_neutral(), "invariant"); 232 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 233 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 234 } 235 if (mark->has_monitor()) { 236 ObjectMonitor * m = mark->monitor(); 237 assert(((oop)(m->object()))->mark() == mark, "invariant"); 238 assert(m->is_entered(THREAD), "invariant"); 239 } 240 return; 241 } 242 243 mark = object->mark(); 244 245 // If the object is stack-locked by the current thread, try to 246 // swing the displaced header from the box back to the mark. 247 if (mark == (markOop) lock) { 248 assert(dhw->is_neutral(), "invariant"); 249 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 250 TEVENT(fast_exit: release stacklock); 251 return; 252 } 253 } 254 255 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD); 256 } 257 258 // ----------------------------------------------------------------------------- 259 // Interpreter/Compiler Slow Case 260 // This routine is used to handle interpreter/compiler slow case 261 // We don't need to use fast path here, because it must have been 262 // failed in the interpreter/compiler code. 263 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 264 markOop mark = obj->mark(); 265 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 266 267 if (mark->is_neutral()) { 268 // Anticipate successful CAS -- the ST of the displaced mark must 269 // be visible <= the ST performed by the CAS. 270 lock->set_displaced_header(mark); 271 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 272 TEVENT(slow_enter: release stacklock); 273 return; 274 } 275 // Fall through to inflate() ... 276 } else if (mark->has_locker() && 277 THREAD->is_lock_owned((address)mark->locker())) { 278 assert(lock != mark->locker(), "must not re-lock the same lock"); 279 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 280 lock->set_displaced_header(NULL); 281 return; 282 } 283 284 // The object header will never be displaced to this lock, 285 // so it does not matter what the value is, except that it 286 // must be non-zero to avoid looking like a re-entrant lock, 287 // and must not look locked either. 288 lock->set_displaced_header(markOopDesc::unused_mark()); 289 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 290 } 291 292 // This routine is used to handle interpreter/compiler slow case 293 // We don't need to use fast path here, because it must have 294 // failed in the interpreter/compiler code. Simply use the heavy 295 // weight monitor should be ok, unless someone find otherwise. 296 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 297 fast_exit(object, lock, THREAD); 298 } 299 300 // ----------------------------------------------------------------------------- 301 // Class Loader support to workaround deadlocks on the class loader lock objects 302 // Also used by GC 303 // complete_exit()/reenter() are used to wait on a nested lock 304 // i.e. to give up an outer lock completely and then re-enter 305 // Used when holding nested locks - lock acquisition order: lock1 then lock2 306 // 1) complete_exit lock1 - saving recursion count 307 // 2) wait on lock2 308 // 3) when notified on lock2, unlock lock2 309 // 4) reenter lock1 with original recursion count 310 // 5) lock lock2 311 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 312 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 313 TEVENT(complete_exit); 314 if (UseBiasedLocking) { 315 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 316 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 317 } 318 319 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 320 321 return monitor->complete_exit(THREAD); 322 } 323 324 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 325 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 326 TEVENT(reenter); 327 if (UseBiasedLocking) { 328 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 329 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 330 } 331 332 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 333 334 monitor->reenter(recursion, THREAD); 335 } 336 // ----------------------------------------------------------------------------- 337 // JNI locks on java objects 338 // NOTE: must use heavy weight monitor to handle jni monitor enter 339 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 340 // the current locking is from JNI instead of Java code 341 TEVENT(jni_enter); 342 if (UseBiasedLocking) { 343 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 344 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 345 } 346 THREAD->set_current_pending_monitor_is_from_java(false); 347 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 348 THREAD->set_current_pending_monitor_is_from_java(true); 349 } 350 351 // NOTE: must use heavy weight monitor to handle jni monitor exit 352 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 353 TEVENT(jni_exit); 354 if (UseBiasedLocking) { 355 Handle h_obj(THREAD, obj); 356 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 357 obj = h_obj(); 358 } 359 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 360 361 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 362 // If this thread has locked the object, exit the monitor. Note: can't use 363 // monitor->check(CHECK); must exit even if an exception is pending. 364 if (monitor->check(THREAD)) { 365 monitor->exit(true, THREAD); 366 } 367 } 368 369 // ----------------------------------------------------------------------------- 370 // Internal VM locks on java objects 371 // standard constructor, allows locking failures 372 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 373 _dolock = doLock; 374 _thread = thread; 375 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 376 _obj = obj; 377 378 if (_dolock) { 379 TEVENT(ObjectLocker); 380 381 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 382 } 383 } 384 385 ObjectLocker::~ObjectLocker() { 386 if (_dolock) { 387 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 388 } 389 } 390 391 392 // ----------------------------------------------------------------------------- 393 // Wait/Notify/NotifyAll 394 // NOTE: must use heavy weight monitor to handle wait() 395 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 396 if (UseBiasedLocking) { 397 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 398 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 399 } 400 if (millis < 0) { 401 TEVENT(wait - throw IAX); 402 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 403 } 404 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 405 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 406 monitor->wait(millis, true, THREAD); 407 408 // This dummy call is in place to get around dtrace bug 6254741. Once 409 // that's fixed we can uncomment the following line, remove the call 410 // and change this function back into a "void" func. 411 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 412 return dtrace_waited_probe(monitor, obj, THREAD); 413 } 414 415 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 416 if (UseBiasedLocking) { 417 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 418 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 419 } 420 if (millis < 0) { 421 TEVENT(wait - throw IAX); 422 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 423 } 424 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD); 425 } 426 427 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 428 if (UseBiasedLocking) { 429 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 430 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 431 } 432 433 markOop mark = obj->mark(); 434 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 435 return; 436 } 437 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 438 } 439 440 // NOTE: see comment of notify() 441 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 442 if (UseBiasedLocking) { 443 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 444 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 445 } 446 447 markOop mark = obj->mark(); 448 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 449 return; 450 } 451 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 452 } 453 454 // ----------------------------------------------------------------------------- 455 // Hash Code handling 456 // 457 // Performance concern: 458 // OrderAccess::storestore() calls release() which at one time stored 0 459 // into the global volatile OrderAccess::dummy variable. This store was 460 // unnecessary for correctness. Many threads storing into a common location 461 // causes considerable cache migration or "sloshing" on large SMP systems. 462 // As such, I avoided using OrderAccess::storestore(). In some cases 463 // OrderAccess::fence() -- which incurs local latency on the executing 464 // processor -- is a better choice as it scales on SMP systems. 465 // 466 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 467 // a discussion of coherency costs. Note that all our current reference 468 // platforms provide strong ST-ST order, so the issue is moot on IA32, 469 // x64, and SPARC. 470 // 471 // As a general policy we use "volatile" to control compiler-based reordering 472 // and explicit fences (barriers) to control for architectural reordering 473 // performed by the CPU(s) or platform. 474 475 struct SharedGlobals { 476 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 477 // These are highly shared mostly-read variables. 478 // To avoid false-sharing they need to be the sole occupants of a cache line. 479 volatile int stwRandom; 480 volatile int stwCycle; 481 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 482 // Hot RW variable -- Sequester to avoid false-sharing 483 volatile int hcSequence; 484 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 485 }; 486 487 static SharedGlobals GVars; 488 static int MonitorScavengeThreshold = 1000000; 489 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 490 491 static markOop ReadStableMark(oop obj) { 492 markOop mark = obj->mark(); 493 if (!mark->is_being_inflated()) { 494 return mark; // normal fast-path return 495 } 496 497 int its = 0; 498 for (;;) { 499 markOop mark = obj->mark(); 500 if (!mark->is_being_inflated()) { 501 return mark; // normal fast-path return 502 } 503 504 // The object is being inflated by some other thread. 505 // The caller of ReadStableMark() must wait for inflation to complete. 506 // Avoid live-lock 507 // TODO: consider calling SafepointSynchronize::do_call_back() while 508 // spinning to see if there's a safepoint pending. If so, immediately 509 // yielding or blocking would be appropriate. Avoid spinning while 510 // there is a safepoint pending. 511 // TODO: add inflation contention performance counters. 512 // TODO: restrict the aggregate number of spinners. 513 514 ++its; 515 if (its > 10000 || !os::is_MP()) { 516 if (its & 1) { 517 os::naked_yield(); 518 TEVENT(Inflate: INFLATING - yield); 519 } else { 520 // Note that the following code attenuates the livelock problem but is not 521 // a complete remedy. A more complete solution would require that the inflating 522 // thread hold the associated inflation lock. The following code simply restricts 523 // the number of spinners to at most one. We'll have N-2 threads blocked 524 // on the inflationlock, 1 thread holding the inflation lock and using 525 // a yield/park strategy, and 1 thread in the midst of inflation. 526 // A more refined approach would be to change the encoding of INFLATING 527 // to allow encapsulation of a native thread pointer. Threads waiting for 528 // inflation to complete would use CAS to push themselves onto a singly linked 529 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 530 // and calling park(). When inflation was complete the thread that accomplished inflation 531 // would detach the list and set the markword to inflated with a single CAS and 532 // then for each thread on the list, set the flag and unpark() the thread. 533 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 534 // wakes at most one thread whereas we need to wake the entire list. 535 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 536 int YieldThenBlock = 0; 537 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 538 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 539 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 540 while (obj->mark() == markOopDesc::INFLATING()) { 541 // Beware: NakedYield() is advisory and has almost no effect on some platforms 542 // so we periodically call Self->_ParkEvent->park(1). 543 // We use a mixed spin/yield/block mechanism. 544 if ((YieldThenBlock++) >= 16) { 545 Thread::current()->_ParkEvent->park(1); 546 } else { 547 os::naked_yield(); 548 } 549 } 550 Thread::muxRelease(gInflationLocks + ix); 551 TEVENT(Inflate: INFLATING - yield/park); 552 } 553 } else { 554 SpinPause(); // SMP-polite spinning 555 } 556 } 557 } 558 559 // hashCode() generation : 560 // 561 // Possibilities: 562 // * MD5Digest of {obj,stwRandom} 563 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 564 // * A DES- or AES-style SBox[] mechanism 565 // * One of the Phi-based schemes, such as: 566 // 2654435761 = 2^32 * Phi (golden ratio) 567 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 568 // * A variation of Marsaglia's shift-xor RNG scheme. 569 // * (obj ^ stwRandom) is appealing, but can result 570 // in undesirable regularity in the hashCode values of adjacent objects 571 // (objects allocated back-to-back, in particular). This could potentially 572 // result in hashtable collisions and reduced hashtable efficiency. 573 // There are simple ways to "diffuse" the middle address bits over the 574 // generated hashCode values: 575 576 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 577 intptr_t value = 0; 578 if (hashCode == 0) { 579 // This form uses an unguarded global Park-Miller RNG, 580 // so it's possible for two threads to race and generate the same RNG. 581 // On MP system we'll have lots of RW access to a global, so the 582 // mechanism induces lots of coherency traffic. 583 value = os::random(); 584 } else if (hashCode == 1) { 585 // This variation has the property of being stable (idempotent) 586 // between STW operations. This can be useful in some of the 1-0 587 // synchronization schemes. 588 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 589 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 590 } else if (hashCode == 2) { 591 value = 1; // for sensitivity testing 592 } else if (hashCode == 3) { 593 value = ++GVars.hcSequence; 594 } else if (hashCode == 4) { 595 value = cast_from_oop<intptr_t>(obj); 596 } else { 597 // Marsaglia's xor-shift scheme with thread-specific state 598 // This is probably the best overall implementation -- we'll 599 // likely make this the default in future releases. 600 unsigned t = Self->_hashStateX; 601 t ^= (t << 11); 602 Self->_hashStateX = Self->_hashStateY; 603 Self->_hashStateY = Self->_hashStateZ; 604 Self->_hashStateZ = Self->_hashStateW; 605 unsigned v = Self->_hashStateW; 606 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 607 Self->_hashStateW = v; 608 value = v; 609 } 610 611 value &= markOopDesc::hash_mask; 612 if (value == 0) value = 0xBAD; 613 assert(value != markOopDesc::no_hash, "invariant"); 614 TEVENT(hashCode: GENERATE); 615 return value; 616 } 617 618 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 619 if (UseBiasedLocking) { 620 // NOTE: many places throughout the JVM do not expect a safepoint 621 // to be taken here, in particular most operations on perm gen 622 // objects. However, we only ever bias Java instances and all of 623 // the call sites of identity_hash that might revoke biases have 624 // been checked to make sure they can handle a safepoint. The 625 // added check of the bias pattern is to avoid useless calls to 626 // thread-local storage. 627 if (obj->mark()->has_bias_pattern()) { 628 // Handle for oop obj in case of STW safepoint 629 Handle hobj(Self, obj); 630 // Relaxing assertion for bug 6320749. 631 assert(Universe::verify_in_progress() || 632 !SafepointSynchronize::is_at_safepoint(), 633 "biases should not be seen by VM thread here"); 634 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 635 obj = hobj(); 636 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 637 } 638 } 639 640 // hashCode() is a heap mutator ... 641 // Relaxing assertion for bug 6320749. 642 assert(Universe::verify_in_progress() || DumpSharedSpaces || 643 !SafepointSynchronize::is_at_safepoint(), "invariant"); 644 assert(Universe::verify_in_progress() || DumpSharedSpaces || 645 Self->is_Java_thread() , "invariant"); 646 assert(Universe::verify_in_progress() || DumpSharedSpaces || 647 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 648 649 ObjectMonitor* monitor = NULL; 650 markOop temp, test; 651 intptr_t hash; 652 markOop mark = ReadStableMark(obj); 653 654 // object should remain ineligible for biased locking 655 assert(!mark->has_bias_pattern(), "invariant"); 656 657 if (mark->is_neutral()) { 658 hash = mark->hash(); // this is a normal header 659 if (hash) { // if it has hash, just return it 660 return hash; 661 } 662 hash = get_next_hash(Self, obj); // allocate a new hash code 663 temp = mark->copy_set_hash(hash); // merge the hash code into header 664 // use (machine word version) atomic operation to install the hash 665 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 666 if (test == mark) { 667 return hash; 668 } 669 // If atomic operation failed, we must inflate the header 670 // into heavy weight monitor. We could add more code here 671 // for fast path, but it does not worth the complexity. 672 } else if (mark->has_monitor()) { 673 monitor = mark->monitor(); 674 temp = monitor->header(); 675 assert(temp->is_neutral(), "invariant"); 676 hash = temp->hash(); 677 if (hash) { 678 return hash; 679 } 680 // Skip to the following code to reduce code size 681 } else if (Self->is_lock_owned((address)mark->locker())) { 682 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 683 assert(temp->is_neutral(), "invariant"); 684 hash = temp->hash(); // by current thread, check if the displaced 685 if (hash) { // header contains hash code 686 return hash; 687 } 688 // WARNING: 689 // The displaced header is strictly immutable. 690 // It can NOT be changed in ANY cases. So we have 691 // to inflate the header into heavyweight monitor 692 // even the current thread owns the lock. The reason 693 // is the BasicLock (stack slot) will be asynchronously 694 // read by other threads during the inflate() function. 695 // Any change to stack may not propagate to other threads 696 // correctly. 697 } 698 699 // Inflate the monitor to set hash code 700 monitor = ObjectSynchronizer::inflate(Self, obj); 701 // Load displaced header and check it has hash code 702 mark = monitor->header(); 703 assert(mark->is_neutral(), "invariant"); 704 hash = mark->hash(); 705 if (hash == 0) { 706 hash = get_next_hash(Self, obj); 707 temp = mark->copy_set_hash(hash); // merge hash code into header 708 assert(temp->is_neutral(), "invariant"); 709 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 710 if (test != mark) { 711 // The only update to the header in the monitor (outside GC) 712 // is install the hash code. If someone add new usage of 713 // displaced header, please update this code 714 hash = test->hash(); 715 assert(test->is_neutral(), "invariant"); 716 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 717 } 718 } 719 // We finally get the hash 720 return hash; 721 } 722 723 // Deprecated -- use FastHashCode() instead. 724 725 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 726 return FastHashCode(Thread::current(), obj()); 727 } 728 729 730 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 731 Handle h_obj) { 732 if (UseBiasedLocking) { 733 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 734 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 735 } 736 737 assert(thread == JavaThread::current(), "Can only be called on current thread"); 738 oop obj = h_obj(); 739 740 markOop mark = ReadStableMark(obj); 741 742 // Uncontended case, header points to stack 743 if (mark->has_locker()) { 744 return thread->is_lock_owned((address)mark->locker()); 745 } 746 // Contended case, header points to ObjectMonitor (tagged pointer) 747 if (mark->has_monitor()) { 748 ObjectMonitor* monitor = mark->monitor(); 749 return monitor->is_entered(thread) != 0; 750 } 751 // Unlocked case, header in place 752 assert(mark->is_neutral(), "sanity check"); 753 return false; 754 } 755 756 // Be aware of this method could revoke bias of the lock object. 757 // This method queries the ownership of the lock handle specified by 'h_obj'. 758 // If the current thread owns the lock, it returns owner_self. If no 759 // thread owns the lock, it returns owner_none. Otherwise, it will return 760 // owner_other. 761 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 762 (JavaThread *self, Handle h_obj) { 763 // The caller must beware this method can revoke bias, and 764 // revocation can result in a safepoint. 765 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 766 assert(self->thread_state() != _thread_blocked, "invariant"); 767 768 // Possible mark states: neutral, biased, stack-locked, inflated 769 770 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 771 // CASE: biased 772 BiasedLocking::revoke_and_rebias(h_obj, false, self); 773 assert(!h_obj->mark()->has_bias_pattern(), 774 "biases should be revoked by now"); 775 } 776 777 assert(self == JavaThread::current(), "Can only be called on current thread"); 778 oop obj = h_obj(); 779 markOop mark = ReadStableMark(obj); 780 781 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 782 if (mark->has_locker()) { 783 return self->is_lock_owned((address)mark->locker()) ? 784 owner_self : owner_other; 785 } 786 787 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 788 // The Object:ObjectMonitor relationship is stable as long as we're 789 // not at a safepoint. 790 if (mark->has_monitor()) { 791 void * owner = mark->monitor()->_owner; 792 if (owner == NULL) return owner_none; 793 return (owner == self || 794 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 795 } 796 797 // CASE: neutral 798 assert(mark->is_neutral(), "sanity check"); 799 return owner_none; // it's unlocked 800 } 801 802 // FIXME: jvmti should call this 803 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 804 if (UseBiasedLocking) { 805 if (SafepointSynchronize::is_at_safepoint()) { 806 BiasedLocking::revoke_at_safepoint(h_obj); 807 } else { 808 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 809 } 810 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 811 } 812 813 oop obj = h_obj(); 814 address owner = NULL; 815 816 markOop mark = ReadStableMark(obj); 817 818 // Uncontended case, header points to stack 819 if (mark->has_locker()) { 820 owner = (address) mark->locker(); 821 } 822 823 // Contended case, header points to ObjectMonitor (tagged pointer) 824 if (mark->has_monitor()) { 825 ObjectMonitor* monitor = mark->monitor(); 826 assert(monitor != NULL, "monitor should be non-null"); 827 owner = (address) monitor->owner(); 828 } 829 830 if (owner != NULL) { 831 // owning_thread_from_monitor_owner() may also return NULL here 832 return Threads::owning_thread_from_monitor_owner(owner, doLock); 833 } 834 835 // Unlocked case, header in place 836 // Cannot have assertion since this object may have been 837 // locked by another thread when reaching here. 838 // assert(mark->is_neutral(), "sanity check"); 839 840 return NULL; 841 } 842 // Visitors ... 843 844 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 845 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 846 ObjectMonitor* mid; 847 while (block) { 848 assert(block->object() == CHAINMARKER, "must be a block header"); 849 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 850 mid = (ObjectMonitor *)(block + i); 851 oop object = (oop) mid->object(); 852 if (object != NULL) { 853 closure->do_monitor(mid); 854 } 855 } 856 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 857 } 858 } 859 860 // Get the next block in the block list. 861 static inline ObjectMonitor* next(ObjectMonitor* block) { 862 assert(block->object() == CHAINMARKER, "must be a block header"); 863 block = block->FreeNext; 864 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 865 return block; 866 } 867 868 869 void ObjectSynchronizer::oops_do(OopClosure* f) { 870 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 871 for (PaddedEnd<ObjectMonitor> * block = 872 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 873 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 874 assert(block->object() == CHAINMARKER, "must be a block header"); 875 for (int i = 1; i < _BLOCKSIZE; i++) { 876 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 877 if (mid->object() != NULL) { 878 f->do_oop((oop*)mid->object_addr()); 879 } 880 } 881 } 882 } 883 884 885 // ----------------------------------------------------------------------------- 886 // ObjectMonitor Lifecycle 887 // ----------------------- 888 // Inflation unlinks monitors from the global gFreeList and 889 // associates them with objects. Deflation -- which occurs at 890 // STW-time -- disassociates idle monitors from objects. Such 891 // scavenged monitors are returned to the gFreeList. 892 // 893 // The global list is protected by gListLock. All the critical sections 894 // are short and operate in constant-time. 895 // 896 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 897 // 898 // Lifecycle: 899 // -- unassigned and on the global free list 900 // -- unassigned and on a thread's private omFreeList 901 // -- assigned to an object. The object is inflated and the mark refers 902 // to the objectmonitor. 903 904 905 // Constraining monitor pool growth via MonitorBound ... 906 // 907 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 908 // the rate of scavenging is driven primarily by GC. As such, we can find 909 // an inordinate number of monitors in circulation. 910 // To avoid that scenario we can artificially induce a STW safepoint 911 // if the pool appears to be growing past some reasonable bound. 912 // Generally we favor time in space-time tradeoffs, but as there's no 913 // natural back-pressure on the # of extant monitors we need to impose some 914 // type of limit. Beware that if MonitorBound is set to too low a value 915 // we could just loop. In addition, if MonitorBound is set to a low value 916 // we'll incur more safepoints, which are harmful to performance. 917 // See also: GuaranteedSafepointInterval 918 // 919 // The current implementation uses asynchronous VM operations. 920 921 static void InduceScavenge(Thread * Self, const char * Whence) { 922 // Induce STW safepoint to trim monitors 923 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 924 // More precisely, trigger an asynchronous STW safepoint as the number 925 // of active monitors passes the specified threshold. 926 // TODO: assert thread state is reasonable 927 928 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 929 if (ObjectMonitor::Knob_Verbose) { 930 ::printf ("Monitor scavenge - Induced STW @%s (%d)\n", Whence, ForceMonitorScavenge) ; 931 ::fflush(stdout); 932 } 933 // Induce a 'null' safepoint to scavenge monitors 934 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 935 // to the VMthread and have a lifespan longer than that of this activation record. 936 // The VMThread will delete the op when completed. 937 VMThread::execute(new VM_ForceAsyncSafepoint()); 938 939 if (ObjectMonitor::Knob_Verbose) { 940 ::printf ("Monitor scavenge - STW posted @%s (%d)\n", Whence, ForceMonitorScavenge) ; 941 ::fflush(stdout); 942 } 943 } 944 } 945 946 void ObjectSynchronizer::verifyInUse(Thread *Self) { 947 ObjectMonitor* mid; 948 int in_use_tally = 0; 949 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 950 in_use_tally++; 951 } 952 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 953 954 int free_tally = 0; 955 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 956 free_tally++; 957 } 958 assert(free_tally == Self->omFreeCount, "free count off"); 959 } 960 961 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { 962 // A large MAXPRIVATE value reduces both list lock contention 963 // and list coherency traffic, but also tends to increase the 964 // number of objectMonitors in circulation as well as the STW 965 // scavenge costs. As usual, we lean toward time in space-time 966 // tradeoffs. 967 const int MAXPRIVATE = 1024; 968 for (;;) { 969 ObjectMonitor * m; 970 971 // 1: try to allocate from the thread's local omFreeList. 972 // Threads will attempt to allocate first from their local list, then 973 // from the global list, and only after those attempts fail will the thread 974 // attempt to instantiate new monitors. Thread-local free lists take 975 // heat off the gListLock and improve allocation latency, as well as reducing 976 // coherency traffic on the shared global list. 977 m = Self->omFreeList; 978 if (m != NULL) { 979 Self->omFreeList = m->FreeNext; 980 Self->omFreeCount--; 981 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 982 guarantee(m->object() == NULL, "invariant"); 983 if (MonitorInUseLists) { 984 m->FreeNext = Self->omInUseList; 985 Self->omInUseList = m; 986 Self->omInUseCount++; 987 if (ObjectMonitor::Knob_VerifyInUse) { 988 verifyInUse(Self); 989 } 990 } else { 991 m->FreeNext = NULL; 992 } 993 return m; 994 } 995 996 // 2: try to allocate from the global gFreeList 997 // CONSIDER: use muxTry() instead of muxAcquire(). 998 // If the muxTry() fails then drop immediately into case 3. 999 // If we're using thread-local free lists then try 1000 // to reprovision the caller's free list. 1001 if (gFreeList != NULL) { 1002 // Reprovision the thread's omFreeList. 1003 // Use bulk transfers to reduce the allocation rate and heat 1004 // on various locks. 1005 Thread::muxAcquire(&gListLock, "omAlloc"); 1006 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1007 gMonitorFreeCount--; 1008 ObjectMonitor * take = gFreeList; 1009 gFreeList = take->FreeNext; 1010 guarantee(take->object() == NULL, "invariant"); 1011 guarantee(!take->is_busy(), "invariant"); 1012 take->Recycle(); 1013 omRelease(Self, take, false); 1014 } 1015 Thread::muxRelease(&gListLock); 1016 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1017 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1018 TEVENT(omFirst - reprovision); 1019 1020 const int mx = MonitorBound; 1021 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1022 // We can't safely induce a STW safepoint from omAlloc() as our thread 1023 // state may not be appropriate for such activities and callers may hold 1024 // naked oops, so instead we defer the action. 1025 InduceScavenge(Self, "omAlloc"); 1026 } 1027 continue; 1028 } 1029 1030 // 3: allocate a block of new ObjectMonitors 1031 // Both the local and global free lists are empty -- resort to malloc(). 1032 // In the current implementation objectMonitors are TSM - immortal. 1033 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1034 // each ObjectMonitor to start at the beginning of a cache line, 1035 // so we use align_size_up(). 1036 // A better solution would be to use C++ placement-new. 1037 // BEWARE: As it stands currently, we don't run the ctors! 1038 assert(_BLOCKSIZE > 1, "invariant"); 1039 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1040 PaddedEnd<ObjectMonitor> * temp; 1041 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1042 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1043 mtInternal); 1044 temp = (PaddedEnd<ObjectMonitor> *) 1045 align_size_up((intptr_t)real_malloc_addr, 1046 DEFAULT_CACHE_LINE_SIZE); 1047 1048 // NOTE: (almost) no way to recover if allocation failed. 1049 // We might be able to induce a STW safepoint and scavenge enough 1050 // objectMonitors to permit progress. 1051 if (temp == NULL) { 1052 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1053 "Allocate ObjectMonitors"); 1054 } 1055 (void)memset((void *) temp, 0, neededsize); 1056 1057 // Format the block. 1058 // initialize the linked list, each monitor points to its next 1059 // forming the single linked free list, the very first monitor 1060 // will points to next block, which forms the block list. 1061 // The trick of using the 1st element in the block as gBlockList 1062 // linkage should be reconsidered. A better implementation would 1063 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1064 1065 for (int i = 1; i < _BLOCKSIZE; i++) { 1066 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1067 } 1068 1069 // terminate the last monitor as the end of list 1070 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1071 1072 // Element [0] is reserved for global list linkage 1073 temp[0].set_object(CHAINMARKER); 1074 1075 // Consider carving out this thread's current request from the 1076 // block in hand. This avoids some lock traffic and redundant 1077 // list activity. 1078 1079 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1080 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1081 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1082 gMonitorPopulation += _BLOCKSIZE-1; 1083 gMonitorFreeCount += _BLOCKSIZE-1; 1084 1085 // Add the new block to the list of extant blocks (gBlockList). 1086 // The very first objectMonitor in a block is reserved and dedicated. 1087 // It serves as blocklist "next" linkage. 1088 temp[0].FreeNext = gBlockList; 1089 gBlockList = temp; 1090 1091 // Add the new string of objectMonitors to the global free list 1092 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1093 gFreeList = temp + 1; 1094 Thread::muxRelease(&gListLock); 1095 TEVENT(Allocate block of monitors); 1096 } 1097 } 1098 1099 // Place "m" on the caller's private per-thread omFreeList. 1100 // In practice there's no need to clamp or limit the number of 1101 // monitors on a thread's omFreeList as the only time we'll call 1102 // omRelease is to return a monitor to the free list after a CAS 1103 // attempt failed. This doesn't allow unbounded #s of monitors to 1104 // accumulate on a thread's free list. 1105 // 1106 // Key constraint: all ObjectMonitors on a thread's free list and the global 1107 // free list must have their object field set to null. This prevents the 1108 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1109 1110 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1111 bool fromPerThreadAlloc) { 1112 guarantee(m->object() == NULL, "invariant"); 1113 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1114 // Remove from omInUseList 1115 if (MonitorInUseLists && fromPerThreadAlloc) { 1116 ObjectMonitor* cur_mid_in_use = NULL; 1117 bool extracted = false; 1118 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1119 if (m == mid) { 1120 // extract from per-thread in-use list 1121 if (mid == Self->omInUseList) { 1122 Self->omInUseList = mid->FreeNext; 1123 } else if (cur_mid_in_use != NULL) { 1124 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1125 } 1126 extracted = true; 1127 Self->omInUseCount--; 1128 if (ObjectMonitor::Knob_VerifyInUse) { 1129 verifyInUse(Self); 1130 } 1131 break; 1132 } 1133 } 1134 assert(extracted, "Should have extracted from in-use list"); 1135 } 1136 1137 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1138 m->FreeNext = Self->omFreeList; 1139 Self->omFreeList = m; 1140 Self->omFreeCount++; 1141 } 1142 1143 // Return the monitors of a moribund thread's local free list to 1144 // the global free list. Typically a thread calls omFlush() when 1145 // it's dying. We could also consider having the VM thread steal 1146 // monitors from threads that have not run java code over a few 1147 // consecutive STW safepoints. Relatedly, we might decay 1148 // omFreeProvision at STW safepoints. 1149 // 1150 // Also return the monitors of a moribund thread's omInUseList to 1151 // a global gOmInUseList under the global list lock so these 1152 // will continue to be scanned. 1153 // 1154 // We currently call omFlush() from the Thread:: dtor _after the thread 1155 // has been excised from the thread list and is no longer a mutator. 1156 // That means that omFlush() can run concurrently with a safepoint and 1157 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1158 // be a better choice as we could safely reason that that the JVM is 1159 // not at a safepoint at the time of the call, and thus there could 1160 // be not inopportune interleavings between omFlush() and the scavenge 1161 // operator. 1162 1163 void ObjectSynchronizer::omFlush(Thread * Self) { 1164 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1165 Self->omFreeList = NULL; 1166 ObjectMonitor * tail = NULL; 1167 int tally = 0; 1168 if (list != NULL) { 1169 ObjectMonitor * s; 1170 // The thread is going away, the per-thread free monitors 1171 // are freed via set_owner(NULL) 1172 // Link them to tail, which will be linked into the global free list 1173 // gFreeList below, under the gListLock 1174 for (s = list; s != NULL; s = s->FreeNext) { 1175 tally++; 1176 tail = s; 1177 guarantee(s->object() == NULL, "invariant"); 1178 guarantee(!s->is_busy(), "invariant"); 1179 s->set_owner(NULL); // redundant but good hygiene 1180 TEVENT(omFlush - Move one); 1181 } 1182 guarantee(tail != NULL && list != NULL, "invariant"); 1183 } 1184 1185 ObjectMonitor * inUseList = Self->omInUseList; 1186 ObjectMonitor * inUseTail = NULL; 1187 int inUseTally = 0; 1188 if (inUseList != NULL) { 1189 Self->omInUseList = NULL; 1190 ObjectMonitor *cur_om; 1191 // The thread is going away, however the omInUseList inflated 1192 // monitors may still be in-use by other threads. 1193 // Link them to inUseTail, which will be linked into the global in-use list 1194 // gOmInUseList below, under the gListLock 1195 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1196 inUseTail = cur_om; 1197 inUseTally++; 1198 } 1199 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1200 Self->omInUseCount = 0; 1201 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1202 } 1203 1204 Thread::muxAcquire(&gListLock, "omFlush"); 1205 if (tail != NULL) { 1206 tail->FreeNext = gFreeList; 1207 gFreeList = list; 1208 gMonitorFreeCount += tally; 1209 } 1210 1211 if (inUseTail != NULL) { 1212 inUseTail->FreeNext = gOmInUseList; 1213 gOmInUseList = inUseList; 1214 gOmInUseCount += inUseTally; 1215 } 1216 1217 Thread::muxRelease(&gListLock); 1218 TEVENT(omFlush); 1219 } 1220 1221 // Fast path code shared by multiple functions 1222 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1223 markOop mark = obj->mark(); 1224 if (mark->has_monitor()) { 1225 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1226 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1227 return mark->monitor(); 1228 } 1229 return ObjectSynchronizer::inflate(Thread::current(), obj); 1230 } 1231 1232 1233 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, 1234 oop object) { 1235 // Inflate mutates the heap ... 1236 // Relaxing assertion for bug 6320749. 1237 assert(Universe::verify_in_progress() || 1238 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1239 1240 for (;;) { 1241 const markOop mark = object->mark(); 1242 assert(!mark->has_bias_pattern(), "invariant"); 1243 1244 // The mark can be in one of the following states: 1245 // * Inflated - just return 1246 // * Stack-locked - coerce it to inflated 1247 // * INFLATING - busy wait for conversion to complete 1248 // * Neutral - aggressively inflate the object. 1249 // * BIASED - Illegal. We should never see this 1250 1251 // CASE: inflated 1252 if (mark->has_monitor()) { 1253 ObjectMonitor * inf = mark->monitor(); 1254 assert(inf->header()->is_neutral(), "invariant"); 1255 assert(inf->object() == object, "invariant"); 1256 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1257 return inf; 1258 } 1259 1260 // CASE: inflation in progress - inflating over a stack-lock. 1261 // Some other thread is converting from stack-locked to inflated. 1262 // Only that thread can complete inflation -- other threads must wait. 1263 // The INFLATING value is transient. 1264 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1265 // We could always eliminate polling by parking the thread on some auxiliary list. 1266 if (mark == markOopDesc::INFLATING()) { 1267 TEVENT(Inflate: spin while INFLATING); 1268 ReadStableMark(object); 1269 continue; 1270 } 1271 1272 // CASE: stack-locked 1273 // Could be stack-locked either by this thread or by some other thread. 1274 // 1275 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1276 // to install INFLATING into the mark word. We originally installed INFLATING, 1277 // allocated the objectmonitor, and then finally STed the address of the 1278 // objectmonitor into the mark. This was correct, but artificially lengthened 1279 // the interval in which INFLATED appeared in the mark, thus increasing 1280 // the odds of inflation contention. 1281 // 1282 // We now use per-thread private objectmonitor free lists. 1283 // These list are reprovisioned from the global free list outside the 1284 // critical INFLATING...ST interval. A thread can transfer 1285 // multiple objectmonitors en-mass from the global free list to its local free list. 1286 // This reduces coherency traffic and lock contention on the global free list. 1287 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1288 // before or after the CAS(INFLATING) operation. 1289 // See the comments in omAlloc(). 1290 1291 if (mark->has_locker()) { 1292 ObjectMonitor * m = omAlloc(Self); 1293 // Optimistically prepare the objectmonitor - anticipate successful CAS 1294 // We do this before the CAS in order to minimize the length of time 1295 // in which INFLATING appears in the mark. 1296 m->Recycle(); 1297 m->_Responsible = NULL; 1298 m->_recursions = 0; 1299 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1300 1301 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1302 if (cmp != mark) { 1303 omRelease(Self, m, true); 1304 continue; // Interference -- just retry 1305 } 1306 1307 // We've successfully installed INFLATING (0) into the mark-word. 1308 // This is the only case where 0 will appear in a mark-work. 1309 // Only the singular thread that successfully swings the mark-word 1310 // to 0 can perform (or more precisely, complete) inflation. 1311 // 1312 // Why do we CAS a 0 into the mark-word instead of just CASing the 1313 // mark-word from the stack-locked value directly to the new inflated state? 1314 // Consider what happens when a thread unlocks a stack-locked object. 1315 // It attempts to use CAS to swing the displaced header value from the 1316 // on-stack basiclock back into the object header. Recall also that the 1317 // header value (hashcode, etc) can reside in (a) the object header, or 1318 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1319 // header in an objectMonitor. The inflate() routine must copy the header 1320 // value from the basiclock on the owner's stack to the objectMonitor, all 1321 // the while preserving the hashCode stability invariants. If the owner 1322 // decides to release the lock while the value is 0, the unlock will fail 1323 // and control will eventually pass from slow_exit() to inflate. The owner 1324 // will then spin, waiting for the 0 value to disappear. Put another way, 1325 // the 0 causes the owner to stall if the owner happens to try to 1326 // drop the lock (restoring the header from the basiclock to the object) 1327 // while inflation is in-progress. This protocol avoids races that might 1328 // would otherwise permit hashCode values to change or "flicker" for an object. 1329 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1330 // 0 serves as a "BUSY" inflate-in-progress indicator. 1331 1332 1333 // fetch the displaced mark from the owner's stack. 1334 // The owner can't die or unwind past the lock while our INFLATING 1335 // object is in the mark. Furthermore the owner can't complete 1336 // an unlock on the object, either. 1337 markOop dmw = mark->displaced_mark_helper(); 1338 assert(dmw->is_neutral(), "invariant"); 1339 1340 // Setup monitor fields to proper values -- prepare the monitor 1341 m->set_header(dmw); 1342 1343 // Optimization: if the mark->locker stack address is associated 1344 // with this thread we could simply set m->_owner = Self. 1345 // Note that a thread can inflate an object 1346 // that it has stack-locked -- as might happen in wait() -- directly 1347 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1348 m->set_owner(mark->locker()); 1349 m->set_object(object); 1350 // TODO-FIXME: assert BasicLock->dhw != 0. 1351 1352 // Must preserve store ordering. The monitor state must 1353 // be stable at the time of publishing the monitor address. 1354 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1355 object->release_set_mark(markOopDesc::encode(m)); 1356 1357 // Hopefully the performance counters are allocated on distinct cache lines 1358 // to avoid false sharing on MP systems ... 1359 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1360 TEVENT(Inflate: overwrite stacklock); 1361 if (TraceMonitorInflation) { 1362 if (object->is_instance()) { 1363 ResourceMark rm; 1364 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1365 (void *) object, (intptr_t) object->mark(), 1366 object->klass()->external_name()); 1367 } 1368 } 1369 return m; 1370 } 1371 1372 // CASE: neutral 1373 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1374 // If we know we're inflating for entry it's better to inflate by swinging a 1375 // pre-locked objectMonitor pointer into the object header. A successful 1376 // CAS inflates the object *and* confers ownership to the inflating thread. 1377 // In the current implementation we use a 2-step mechanism where we CAS() 1378 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1379 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1380 // would be useful. 1381 1382 assert(mark->is_neutral(), "invariant"); 1383 ObjectMonitor * m = omAlloc(Self); 1384 // prepare m for installation - set monitor to initial state 1385 m->Recycle(); 1386 m->set_header(mark); 1387 m->set_owner(NULL); 1388 m->set_object(object); 1389 m->_recursions = 0; 1390 m->_Responsible = NULL; 1391 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1392 1393 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1394 m->set_object(NULL); 1395 m->set_owner(NULL); 1396 m->Recycle(); 1397 omRelease(Self, m, true); 1398 m = NULL; 1399 continue; 1400 // interference - the markword changed - just retry. 1401 // The state-transitions are one-way, so there's no chance of 1402 // live-lock -- "Inflated" is an absorbing state. 1403 } 1404 1405 // Hopefully the performance counters are allocated on distinct 1406 // cache lines to avoid false sharing on MP systems ... 1407 if (ObjectMonitor::_sync_Inflations != NULL) ObjectMonitor::_sync_Inflations->inc(); 1408 TEVENT(Inflate: overwrite neutral); 1409 if (TraceMonitorInflation) { 1410 if (object->is_instance()) { 1411 ResourceMark rm; 1412 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1413 (void *) object, (intptr_t) object->mark(), 1414 object->klass()->external_name()); 1415 } 1416 } 1417 return m; 1418 } 1419 } 1420 1421 1422 // Deflate_idle_monitors() is called at all safepoints, immediately 1423 // after all mutators are stopped, but before any objects have moved. 1424 // It traverses the list of known monitors, deflating where possible. 1425 // The scavenged monitor are returned to the monitor free list. 1426 // 1427 // Beware that we scavenge at *every* stop-the-world point. 1428 // Having a large number of monitors in-circulation negatively 1429 // impacts the performance of some applications (e.g., PointBase). 1430 // Broadly, we want to minimize the # of monitors in circulation. 1431 // 1432 // We have added a flag, MonitorInUseLists, which creates a list 1433 // of active monitors for each thread. deflate_idle_monitors() 1434 // only scans the per-thread in-use lists. omAlloc() puts all 1435 // assigned monitors on the per-thread list. deflate_idle_monitors() 1436 // returns the non-busy monitors to the global free list. 1437 // When a thread dies, omFlush() adds the list of active monitors for 1438 // that thread to a global gOmInUseList acquiring the 1439 // global list lock. deflate_idle_monitors() acquires the global 1440 // list lock to scan for non-busy monitors to the global free list. 1441 // An alternative could have used a single global in-use list. The 1442 // downside would have been the additional cost of acquiring the global list lock 1443 // for every omAlloc(). 1444 // 1445 // Perversely, the heap size -- and thus the STW safepoint rate -- 1446 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1447 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1448 // This is an unfortunate aspect of this design. 1449 1450 enum ManifestConstants { 1451 ClearResponsibleAtSTW = 0, 1452 MaximumRecheckInterval = 1000 1453 }; 1454 1455 // Deflate a single monitor if not in-use 1456 // Return true if deflated, false if in-use 1457 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1458 ObjectMonitor** freeHeadp, 1459 ObjectMonitor** freeTailp) { 1460 bool deflated; 1461 // Normal case ... The monitor is associated with obj. 1462 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1463 guarantee(mid == obj->mark()->monitor(), "invariant"); 1464 guarantee(mid->header()->is_neutral(), "invariant"); 1465 1466 if (mid->is_busy()) { 1467 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1468 deflated = false; 1469 } else { 1470 // Deflate the monitor if it is no longer being used 1471 // It's idle - scavenge and return to the global free list 1472 // plain old deflation ... 1473 TEVENT(deflate_idle_monitors - scavenge1); 1474 if (TraceMonitorInflation) { 1475 if (obj->is_instance()) { 1476 ResourceMark rm; 1477 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1478 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); 1479 } 1480 } 1481 1482 // Restore the header back to obj 1483 obj->release_set_mark(mid->header()); 1484 mid->clear(); 1485 1486 assert(mid->object() == NULL, "invariant"); 1487 1488 // Move the object to the working free list defined by freeHeadp, freeTailp 1489 if (*freeHeadp == NULL) *freeHeadp = mid; 1490 if (*freeTailp != NULL) { 1491 ObjectMonitor * prevtail = *freeTailp; 1492 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1493 prevtail->FreeNext = mid; 1494 } 1495 *freeTailp = mid; 1496 deflated = true; 1497 } 1498 return deflated; 1499 } 1500 1501 // Walk a given monitor list, and deflate idle monitors 1502 // The given list could be a per-thread list or a global list 1503 // Caller acquires gListLock 1504 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1505 ObjectMonitor** freeHeadp, 1506 ObjectMonitor** freeTailp) { 1507 ObjectMonitor* mid; 1508 ObjectMonitor* next; 1509 ObjectMonitor* cur_mid_in_use = NULL; 1510 int deflated_count = 0; 1511 1512 for (mid = *listHeadp; mid != NULL;) { 1513 oop obj = (oop) mid->object(); 1514 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1515 // if deflate_monitor succeeded, 1516 // extract from per-thread in-use list 1517 if (mid == *listHeadp) { 1518 *listHeadp = mid->FreeNext; 1519 } else if (cur_mid_in_use != NULL) { 1520 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1521 } 1522 next = mid->FreeNext; 1523 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1524 mid = next; 1525 deflated_count++; 1526 } else { 1527 cur_mid_in_use = mid; 1528 mid = mid->FreeNext; 1529 } 1530 } 1531 return deflated_count; 1532 } 1533 1534 void ObjectSynchronizer::deflate_idle_monitors() { 1535 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1536 int nInuse = 0; // currently associated with objects 1537 int nInCirculation = 0; // extant 1538 int nScavenged = 0; // reclaimed 1539 bool deflated = false; 1540 1541 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1542 ObjectMonitor * freeTailp = NULL; 1543 1544 TEVENT(deflate_idle_monitors); 1545 // Prevent omFlush from changing mids in Thread dtor's during deflation 1546 // And in case the vm thread is acquiring a lock during a safepoint 1547 // See e.g. 6320749 1548 Thread::muxAcquire(&gListLock, "scavenge - return"); 1549 1550 if (MonitorInUseLists) { 1551 int inUse = 0; 1552 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1553 nInCirculation+= cur->omInUseCount; 1554 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1555 cur->omInUseCount-= deflated_count; 1556 if (ObjectMonitor::Knob_VerifyInUse) { 1557 verifyInUse(cur); 1558 } 1559 nScavenged += deflated_count; 1560 nInuse += cur->omInUseCount; 1561 } 1562 1563 // For moribund threads, scan gOmInUseList 1564 if (gOmInUseList) { 1565 nInCirculation += gOmInUseCount; 1566 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1567 gOmInUseCount-= deflated_count; 1568 nScavenged += deflated_count; 1569 nInuse += gOmInUseCount; 1570 } 1571 1572 } else for (PaddedEnd<ObjectMonitor> * block = 1573 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 1574 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1575 // Iterate over all extant monitors - Scavenge all idle monitors. 1576 assert(block->object() == CHAINMARKER, "must be a block header"); 1577 nInCirculation += _BLOCKSIZE; 1578 for (int i = 1; i < _BLOCKSIZE; i++) { 1579 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1580 oop obj = (oop) mid->object(); 1581 1582 if (obj == NULL) { 1583 // The monitor is not associated with an object. 1584 // The monitor should either be a thread-specific private 1585 // free list or the global free list. 1586 // obj == NULL IMPLIES mid->is_busy() == 0 1587 guarantee(!mid->is_busy(), "invariant"); 1588 continue; 1589 } 1590 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1591 1592 if (deflated) { 1593 mid->FreeNext = NULL; 1594 nScavenged++; 1595 } else { 1596 nInuse++; 1597 } 1598 } 1599 } 1600 1601 gMonitorFreeCount += nScavenged; 1602 1603 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1604 1605 if (ObjectMonitor::Knob_Verbose) { 1606 ::printf("Deflate: InCirc=%d InUse=%d Scavenged=%d ForceMonitorScavenge=%d : pop=%d free=%d\n", 1607 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1608 gMonitorPopulation, gMonitorFreeCount); 1609 ::fflush(stdout); 1610 } 1611 1612 ForceMonitorScavenge = 0; // Reset 1613 1614 // Move the scavenged monitors back to the global free list. 1615 if (freeHeadp != NULL) { 1616 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1617 assert(freeTailp->FreeNext == NULL, "invariant"); 1618 // constant-time list splice - prepend scavenged segment to gFreeList 1619 freeTailp->FreeNext = gFreeList; 1620 gFreeList = freeHeadp; 1621 } 1622 Thread::muxRelease(&gListLock); 1623 1624 if (ObjectMonitor::_sync_Deflations != NULL) ObjectMonitor::_sync_Deflations->inc(nScavenged); 1625 if (ObjectMonitor::_sync_MonExtant != NULL) ObjectMonitor::_sync_MonExtant ->set_value(nInCirculation); 1626 1627 // TODO: Add objectMonitor leak detection. 1628 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1629 GVars.stwRandom = os::random(); 1630 GVars.stwCycle++; 1631 } 1632 1633 // Monitor cleanup on JavaThread::exit 1634 1635 // Iterate through monitor cache and attempt to release thread's monitors 1636 // Gives up on a particular monitor if an exception occurs, but continues 1637 // the overall iteration, swallowing the exception. 1638 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1639 private: 1640 TRAPS; 1641 1642 public: 1643 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1644 void do_monitor(ObjectMonitor* mid) { 1645 if (mid->owner() == THREAD) { 1646 (void)mid->complete_exit(CHECK); 1647 } 1648 } 1649 }; 1650 1651 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1652 // ignored. This is meant to be called during JNI thread detach which assumes 1653 // all remaining monitors are heavyweight. All exceptions are swallowed. 1654 // Scanning the extant monitor list can be time consuming. 1655 // A simple optimization is to add a per-thread flag that indicates a thread 1656 // called jni_monitorenter() during its lifetime. 1657 // 1658 // Instead of No_Savepoint_Verifier it might be cheaper to 1659 // use an idiom of the form: 1660 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1661 // <code that must not run at safepoint> 1662 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1663 // Since the tests are extremely cheap we could leave them enabled 1664 // for normal product builds. 1665 1666 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1667 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1668 No_Safepoint_Verifier nsv; 1669 ReleaseJavaMonitorsClosure rjmc(THREAD); 1670 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1671 ObjectSynchronizer::monitors_iterate(&rjmc); 1672 Thread::muxRelease(&gListLock); 1673 THREAD->clear_pending_exception(); 1674 } 1675 1676 //------------------------------------------------------------------------------ 1677 // Debugging code 1678 1679 void ObjectSynchronizer::sanity_checks(const bool verbose, 1680 const uint cache_line_size, 1681 int *error_cnt_ptr, 1682 int *warning_cnt_ptr) { 1683 u_char *addr_begin = (u_char*)&GVars; 1684 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1685 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1686 1687 if (verbose) { 1688 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1689 sizeof(SharedGlobals)); 1690 } 1691 1692 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1693 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1694 1695 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1696 if (verbose) { 1697 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1698 } 1699 1700 if (cache_line_size != 0) { 1701 // We were able to determine the L1 data cache line size so 1702 // do some cache line specific sanity checks 1703 1704 if (offset_stwRandom < cache_line_size) { 1705 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1706 "to the struct beginning than a cache line which permits " 1707 "false sharing."); 1708 (*warning_cnt_ptr)++; 1709 } 1710 1711 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1712 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1713 "SharedGlobals.hcSequence fields are closer than a cache " 1714 "line which permits false sharing."); 1715 (*warning_cnt_ptr)++; 1716 } 1717 1718 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1719 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1720 "to the struct end than a cache line which permits false " 1721 "sharing."); 1722 (*warning_cnt_ptr)++; 1723 } 1724 } 1725 } 1726 1727 #ifndef PRODUCT 1728 1729 // Verify all monitors in the monitor cache, the verification is weak. 1730 void ObjectSynchronizer::verify() { 1731 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1732 ObjectMonitor* mid; 1733 while (block) { 1734 assert(block->object() == CHAINMARKER, "must be a block header"); 1735 for (int i = 1; i < _BLOCKSIZE; i++) { 1736 mid = (ObjectMonitor *)(block + i); 1737 oop object = (oop) mid->object(); 1738 if (object != NULL) { 1739 mid->verify(); 1740 } 1741 } 1742 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1743 } 1744 } 1745 1746 // Check if monitor belongs to the monitor cache 1747 // The list is grow-only so it's *relatively* safe to traverse 1748 // the list of extant blocks without taking a lock. 1749 1750 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1751 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1752 1753 while (block) { 1754 assert(block->object() == CHAINMARKER, "must be a block header"); 1755 if (monitor > (ObjectMonitor *)&block[0] && 1756 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1757 address mon = (address) monitor; 1758 address blk = (address) block; 1759 size_t diff = mon - blk; 1760 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check"); 1761 return 1; 1762 } 1763 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1764 } 1765 return 0; 1766 } 1767 1768 #endif