1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "memory/metaspaceShared.hpp" 28 #include "memory/padded.hpp" 29 #include "memory/resourceArea.hpp" 30 #include "oops/markOop.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "runtime/atomic.inline.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/handles.inline.hpp" 35 #include "runtime/interfaceSupport.hpp" 36 #include "runtime/mutexLocker.hpp" 37 #include "runtime/objectMonitor.hpp" 38 #include "runtime/objectMonitor.inline.hpp" 39 #include "runtime/osThread.hpp" 40 #include "runtime/stubRoutines.hpp" 41 #include "runtime/synchronizer.hpp" 42 #include "runtime/thread.inline.hpp" 43 #include "runtime/vframe.hpp" 44 #include "utilities/dtrace.hpp" 45 #include "utilities/events.hpp" 46 #include "utilities/preserveException.hpp" 47 48 #if defined(__GNUC__) && !defined(PPC64) 49 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 50 #define NOINLINE __attribute__((noinline)) 51 #else 52 #define NOINLINE 53 #endif 54 55 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 56 57 // The "core" versions of monitor enter and exit reside in this file. 58 // The interpreter and compilers contain specialized transliterated 59 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 60 // for instance. If you make changes here, make sure to modify the 61 // interpreter, and both C1 and C2 fast-path inline locking code emission. 62 // 63 // ----------------------------------------------------------------------------- 64 65 #ifdef DTRACE_ENABLED 66 67 // Only bother with this argument setup if dtrace is available 68 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 69 70 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 71 char* bytes = NULL; \ 72 int len = 0; \ 73 jlong jtid = SharedRuntime::get_java_tid(thread); \ 74 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 75 if (klassname != NULL) { \ 76 bytes = (char*)klassname->bytes(); \ 77 len = klassname->utf8_length(); \ 78 } 79 80 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 81 { \ 82 if (DTraceMonitorProbes) { \ 83 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 84 HOTSPOT_MONITOR_WAIT(jtid, \ 85 (uintptr_t)(monitor), bytes, len, (millis)); \ 86 } \ 87 } 88 89 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 90 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 91 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 92 93 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 94 { \ 95 if (DTraceMonitorProbes) { \ 96 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 97 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 98 (uintptr_t)(monitor), bytes, len); \ 99 } \ 100 } 101 102 #else // ndef DTRACE_ENABLED 103 104 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 105 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 106 107 #endif // ndef DTRACE_ENABLED 108 109 // This exists only as a workaround of dtrace bug 6254741 110 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 111 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 112 return 0; 113 } 114 115 #define NINFLATIONLOCKS 256 116 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 117 118 // global list of blocks of monitors 119 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 120 // want to expose the PaddedEnd template more than necessary. 121 ObjectMonitor * ObjectSynchronizer::gBlockList = NULL; 122 // global monitor free list 123 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 124 // global monitor in-use list, for moribund threads, 125 // monitors they inflated need to be scanned for deflation 126 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 127 // count of entries in gOmInUseList 128 int ObjectSynchronizer::gOmInUseCount = 0; 129 130 static volatile intptr_t gListLock = 0; // protects global monitor lists 131 static volatile int gMonitorFreeCount = 0; // # on gFreeList 132 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 133 134 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 135 136 137 // =====================> Quick functions 138 139 // The quick_* forms are special fast-path variants used to improve 140 // performance. In the simplest case, a "quick_*" implementation could 141 // simply return false, in which case the caller will perform the necessary 142 // state transitions and call the slow-path form. 143 // The fast-path is designed to handle frequently arising cases in an efficient 144 // manner and is just a degenerate "optimistic" variant of the slow-path. 145 // returns true -- to indicate the call was satisfied. 146 // returns false -- to indicate the call needs the services of the slow-path. 147 // A no-loitering ordinance is in effect for code in the quick_* family 148 // operators: safepoints or indefinite blocking (blocking that might span a 149 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 150 // entry. 151 // 152 // Consider: An interesting optimization is to have the JIT recognize the 153 // following common idiom: 154 // synchronized (someobj) { .... ; notify(); } 155 // That is, we find a notify() or notifyAll() call that immediately precedes 156 // the monitorexit operation. In that case the JIT could fuse the operations 157 // into a single notifyAndExit() runtime primitive. 158 159 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 161 assert(self->is_Java_thread(), "invariant"); 162 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 163 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy"); 164 No_Safepoint_Verifier nsv; 165 if (obj == NULL) return false; // slow-path for invalid obj 166 const markOop mark = obj->mark(); 167 168 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 169 // Degenerate notify 170 // stack-locked by caller so by definition the implied waitset is empty. 171 return true; 172 } 173 174 if (mark->has_monitor()) { 175 ObjectMonitor * const mon = mark->monitor(); 176 assert(mon->object() == obj, "invariant"); 177 if (mon->owner() != self) return false; // slow-path for IMS exception 178 179 if (mon->first_waiter() != NULL) { 180 // We have one or more waiters. Since this is an inflated monitor 181 // that we own, we can transfer one or more threads from the waitset 182 // to the entrylist here and now, avoiding the slow-path. 183 if (all) { 184 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 185 } else { 186 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 187 } 188 int tally = 0; 189 do { 190 mon->INotify(self); 191 ++tally; 192 } while (mon->first_waiter() != NULL && all); 193 OM_PERFDATA_OP(Notifications, inc(tally)); 194 } 195 return true; 196 } 197 198 // biased locking and any other IMS exception states take the slow-path 199 return false; 200 } 201 202 203 // The LockNode emitted directly at the synchronization site would have 204 // been too big if it were to have included support for the cases of inflated 205 // recursive enter and exit, so they go here instead. 206 // Note that we can't safely call AsyncPrintJavaStack() from within 207 // quick_enter() as our thread state remains _in_Java. 208 209 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 210 BasicLock * Lock) { 211 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 212 assert(Self->is_Java_thread(), "invariant"); 213 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 214 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy"); 215 No_Safepoint_Verifier nsv; 216 if (obj == NULL) return false; // Need to throw NPE 217 const markOop mark = obj->mark(); 218 219 if (mark->has_monitor()) { 220 ObjectMonitor * const m = mark->monitor(); 221 assert(m->object() == obj, "invariant"); 222 Thread * const owner = (Thread *) m->_owner; 223 224 // Lock contention and Transactional Lock Elision (TLE) diagnostics 225 // and observability 226 // Case: light contention possibly amenable to TLE 227 // Case: TLE inimical operations such as nested/recursive synchronization 228 229 if (owner == Self) { 230 m->_recursions++; 231 return true; 232 } 233 234 if (owner == NULL && 235 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 236 assert(m->_recursions == 0, "invariant"); 237 assert(m->_owner == Self, "invariant"); 238 return true; 239 } 240 } 241 242 // Note that we could inflate in quick_enter. 243 // This is likely a useful optimization 244 // Critically, in quick_enter() we must not: 245 // -- perform bias revocation, or 246 // -- block indefinitely, or 247 // -- reach a safepoint 248 249 return false; // revert to slow-path 250 } 251 252 // ----------------------------------------------------------------------------- 253 // Fast Monitor Enter/Exit 254 // This the fast monitor enter. The interpreter and compiler use 255 // some assembly copies of this code. Make sure update those code 256 // if the following function is changed. The implementation is 257 // extremely sensitive to race condition. Be careful. 258 259 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 260 bool attempt_rebias, TRAPS) { 261 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 262 if (UseBiasedLocking) { 263 if (!SafepointSynchronize::is_at_safepoint()) { 264 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 265 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 266 return; 267 } 268 } else { 269 assert(!attempt_rebias, "can not rebias toward VM thread"); 270 BiasedLocking::revoke_at_safepoint(obj); 271 } 272 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 273 } 274 275 slow_enter(obj, lock, THREAD); 276 } 277 278 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 279 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 280 // if displaced header is null, the previous enter is recursive enter, no-op 281 assert(object == oopDesc::bs()->resolve_and_maybe_copy_oop(object), "expect to-space copy"); 282 283 markOop dhw = lock->displaced_header(); 284 markOop mark; 285 if (dhw == NULL) { 286 // Recursive stack-lock. 287 // Diagnostics -- Could be: stack-locked, inflating, inflated. 288 mark = object->mark(); 289 assert(!mark->is_neutral(), "invariant"); 290 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 291 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 292 } 293 if (mark->has_monitor()) { 294 ObjectMonitor * m = mark->monitor(); 295 assert(((oop)(m->object()))->mark() == mark, "invariant"); 296 assert(m->is_entered(THREAD), "invariant"); 297 } 298 return; 299 } 300 301 mark = object->mark(); 302 303 // If the object is stack-locked by the current thread, try to 304 // swing the displaced header from the box back to the mark. 305 if (mark == (markOop) lock) { 306 assert(dhw->is_neutral(), "invariant"); 307 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 308 TEVENT(fast_exit: release stacklock); 309 return; 310 } 311 } 312 313 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD); 314 } 315 316 // ----------------------------------------------------------------------------- 317 // Interpreter/Compiler Slow Case 318 // This routine is used to handle interpreter/compiler slow case 319 // We don't need to use fast path here, because it must have been 320 // failed in the interpreter/compiler code. 321 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 322 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 323 markOop mark = obj->mark(); 324 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 325 326 if (mark->is_neutral()) { 327 // Anticipate successful CAS -- the ST of the displaced mark must 328 // be visible <= the ST performed by the CAS. 329 lock->set_displaced_header(mark); 330 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 331 TEVENT(slow_enter: release stacklock); 332 return; 333 } 334 // Fall through to inflate() ... 335 } else if (mark->has_locker() && 336 THREAD->is_lock_owned((address)mark->locker())) { 337 assert(lock != mark->locker(), "must not re-lock the same lock"); 338 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 339 lock->set_displaced_header(NULL); 340 return; 341 } 342 343 // The object header will never be displaced to this lock, 344 // so it does not matter what the value is, except that it 345 // must be non-zero to avoid looking like a re-entrant lock, 346 // and must not look locked either. 347 lock->set_displaced_header(markOopDesc::unused_mark()); 348 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 349 } 350 351 // This routine is used to handle interpreter/compiler slow case 352 // We don't need to use fast path here, because it must have 353 // failed in the interpreter/compiler code. Simply use the heavy 354 // weight monitor should be ok, unless someone find otherwise. 355 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 356 fast_exit(object, lock, THREAD); 357 } 358 359 // ----------------------------------------------------------------------------- 360 // Class Loader support to workaround deadlocks on the class loader lock objects 361 // Also used by GC 362 // complete_exit()/reenter() are used to wait on a nested lock 363 // i.e. to give up an outer lock completely and then re-enter 364 // Used when holding nested locks - lock acquisition order: lock1 then lock2 365 // 1) complete_exit lock1 - saving recursion count 366 // 2) wait on lock2 367 // 3) when notified on lock2, unlock lock2 368 // 4) reenter lock1 with original recursion count 369 // 5) lock lock2 370 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 371 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 372 TEVENT(complete_exit); 373 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 374 if (UseBiasedLocking) { 375 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 376 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 377 } 378 379 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 380 381 return monitor->complete_exit(THREAD); 382 } 383 384 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 385 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 386 TEVENT(reenter); 387 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 388 if (UseBiasedLocking) { 389 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 390 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 391 } 392 393 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 394 395 monitor->reenter(recursion, THREAD); 396 } 397 // ----------------------------------------------------------------------------- 398 // JNI locks on java objects 399 // NOTE: must use heavy weight monitor to handle jni monitor enter 400 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 401 // the current locking is from JNI instead of Java code 402 TEVENT(jni_enter); 403 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 404 if (UseBiasedLocking) { 405 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 406 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 407 } 408 THREAD->set_current_pending_monitor_is_from_java(false); 409 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 410 THREAD->set_current_pending_monitor_is_from_java(true); 411 } 412 413 // NOTE: must use heavy weight monitor to handle jni monitor exit 414 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 415 TEVENT(jni_exit); 416 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy"); 417 if (UseBiasedLocking) { 418 Handle h_obj(THREAD, obj); 419 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 420 obj = h_obj(); 421 } 422 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 423 424 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 425 // If this thread has locked the object, exit the monitor. Note: can't use 426 // monitor->check(CHECK); must exit even if an exception is pending. 427 if (monitor->check(THREAD)) { 428 monitor->exit(true, THREAD); 429 } 430 } 431 432 // ----------------------------------------------------------------------------- 433 // Internal VM locks on java objects 434 // standard constructor, allows locking failures 435 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 436 _dolock = doLock; 437 _thread = thread; 438 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 439 _obj = obj; 440 441 if (_dolock) { 442 TEVENT(ObjectLocker); 443 444 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 445 } 446 } 447 448 ObjectLocker::~ObjectLocker() { 449 if (_dolock) { 450 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 451 } 452 } 453 454 455 // ----------------------------------------------------------------------------- 456 // Wait/Notify/NotifyAll 457 // NOTE: must use heavy weight monitor to handle wait() 458 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 459 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 460 if (UseBiasedLocking) { 461 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 462 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 463 } 464 if (millis < 0) { 465 TEVENT(wait - throw IAX); 466 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 467 } 468 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 469 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 470 monitor->wait(millis, true, THREAD); 471 472 // This dummy call is in place to get around dtrace bug 6254741. Once 473 // that's fixed we can uncomment the following line, remove the call 474 // and change this function back into a "void" func. 475 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 476 return dtrace_waited_probe(monitor, obj, THREAD); 477 } 478 479 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 480 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 481 if (UseBiasedLocking) { 482 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 483 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 484 } 485 if (millis < 0) { 486 TEVENT(wait - throw IAX); 487 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 488 } 489 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD); 490 } 491 492 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 493 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 494 if (UseBiasedLocking) { 495 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 496 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 497 } 498 499 markOop mark = obj->mark(); 500 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 501 return; 502 } 503 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 504 } 505 506 // NOTE: see comment of notify() 507 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 508 assert(obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(obj()), "expect to-space copy"); 509 if (UseBiasedLocking) { 510 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 511 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 512 } 513 514 markOop mark = obj->mark(); 515 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 516 return; 517 } 518 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 519 } 520 521 // ----------------------------------------------------------------------------- 522 // Hash Code handling 523 // 524 // Performance concern: 525 // OrderAccess::storestore() calls release() which at one time stored 0 526 // into the global volatile OrderAccess::dummy variable. This store was 527 // unnecessary for correctness. Many threads storing into a common location 528 // causes considerable cache migration or "sloshing" on large SMP systems. 529 // As such, I avoided using OrderAccess::storestore(). In some cases 530 // OrderAccess::fence() -- which incurs local latency on the executing 531 // processor -- is a better choice as it scales on SMP systems. 532 // 533 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 534 // a discussion of coherency costs. Note that all our current reference 535 // platforms provide strong ST-ST order, so the issue is moot on IA32, 536 // x64, and SPARC. 537 // 538 // As a general policy we use "volatile" to control compiler-based reordering 539 // and explicit fences (barriers) to control for architectural reordering 540 // performed by the CPU(s) or platform. 541 542 struct SharedGlobals { 543 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 544 // These are highly shared mostly-read variables. 545 // To avoid false-sharing they need to be the sole occupants of a cache line. 546 volatile int stwRandom; 547 volatile int stwCycle; 548 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 549 // Hot RW variable -- Sequester to avoid false-sharing 550 volatile int hcSequence; 551 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 552 }; 553 554 static SharedGlobals GVars; 555 static int MonitorScavengeThreshold = 1000000; 556 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 557 558 static markOop ReadStableMark(oop obj) { 559 markOop mark = obj->mark(); 560 if (!mark->is_being_inflated()) { 561 return mark; // normal fast-path return 562 } 563 564 int its = 0; 565 for (;;) { 566 markOop mark = obj->mark(); 567 if (!mark->is_being_inflated()) { 568 return mark; // normal fast-path return 569 } 570 571 // The object is being inflated by some other thread. 572 // The caller of ReadStableMark() must wait for inflation to complete. 573 // Avoid live-lock 574 // TODO: consider calling SafepointSynchronize::do_call_back() while 575 // spinning to see if there's a safepoint pending. If so, immediately 576 // yielding or blocking would be appropriate. Avoid spinning while 577 // there is a safepoint pending. 578 // TODO: add inflation contention performance counters. 579 // TODO: restrict the aggregate number of spinners. 580 581 ++its; 582 if (its > 10000 || !os::is_MP()) { 583 if (its & 1) { 584 os::naked_yield(); 585 TEVENT(Inflate: INFLATING - yield); 586 } else { 587 // Note that the following code attenuates the livelock problem but is not 588 // a complete remedy. A more complete solution would require that the inflating 589 // thread hold the associated inflation lock. The following code simply restricts 590 // the number of spinners to at most one. We'll have N-2 threads blocked 591 // on the inflationlock, 1 thread holding the inflation lock and using 592 // a yield/park strategy, and 1 thread in the midst of inflation. 593 // A more refined approach would be to change the encoding of INFLATING 594 // to allow encapsulation of a native thread pointer. Threads waiting for 595 // inflation to complete would use CAS to push themselves onto a singly linked 596 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 597 // and calling park(). When inflation was complete the thread that accomplished inflation 598 // would detach the list and set the markword to inflated with a single CAS and 599 // then for each thread on the list, set the flag and unpark() the thread. 600 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 601 // wakes at most one thread whereas we need to wake the entire list. 602 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 603 int YieldThenBlock = 0; 604 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 605 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 606 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 607 while (obj->mark() == markOopDesc::INFLATING()) { 608 // Beware: NakedYield() is advisory and has almost no effect on some platforms 609 // so we periodically call Self->_ParkEvent->park(1). 610 // We use a mixed spin/yield/block mechanism. 611 if ((YieldThenBlock++) >= 16) { 612 Thread::current()->_ParkEvent->park(1); 613 } else { 614 os::naked_yield(); 615 } 616 } 617 Thread::muxRelease(gInflationLocks + ix); 618 TEVENT(Inflate: INFLATING - yield/park); 619 } 620 } else { 621 SpinPause(); // SMP-polite spinning 622 } 623 } 624 } 625 626 // hashCode() generation : 627 // 628 // Possibilities: 629 // * MD5Digest of {obj,stwRandom} 630 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 631 // * A DES- or AES-style SBox[] mechanism 632 // * One of the Phi-based schemes, such as: 633 // 2654435761 = 2^32 * Phi (golden ratio) 634 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 635 // * A variation of Marsaglia's shift-xor RNG scheme. 636 // * (obj ^ stwRandom) is appealing, but can result 637 // in undesirable regularity in the hashCode values of adjacent objects 638 // (objects allocated back-to-back, in particular). This could potentially 639 // result in hashtable collisions and reduced hashtable efficiency. 640 // There are simple ways to "diffuse" the middle address bits over the 641 // generated hashCode values: 642 643 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 644 intptr_t value = 0; 645 if (hashCode == 0) { 646 // This form uses an unguarded global Park-Miller RNG, 647 // so it's possible for two threads to race and generate the same RNG. 648 // On MP system we'll have lots of RW access to a global, so the 649 // mechanism induces lots of coherency traffic. 650 value = os::random(); 651 } else if (hashCode == 1) { 652 // This variation has the property of being stable (idempotent) 653 // between STW operations. This can be useful in some of the 1-0 654 // synchronization schemes. 655 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 656 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 657 } else if (hashCode == 2) { 658 value = 1; // for sensitivity testing 659 } else if (hashCode == 3) { 660 value = ++GVars.hcSequence; 661 } else if (hashCode == 4) { 662 value = cast_from_oop<intptr_t>(obj); 663 } else { 664 // Marsaglia's xor-shift scheme with thread-specific state 665 // This is probably the best overall implementation -- we'll 666 // likely make this the default in future releases. 667 unsigned t = Self->_hashStateX; 668 t ^= (t << 11); 669 Self->_hashStateX = Self->_hashStateY; 670 Self->_hashStateY = Self->_hashStateZ; 671 Self->_hashStateZ = Self->_hashStateW; 672 unsigned v = Self->_hashStateW; 673 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 674 Self->_hashStateW = v; 675 value = v; 676 } 677 678 value &= markOopDesc::hash_mask; 679 if (value == 0) value = 0xBAD; 680 assert(value != markOopDesc::no_hash, "invariant"); 681 TEVENT(hashCode: GENERATE); 682 return value; 683 } 684 685 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 686 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy"); 687 if (UseBiasedLocking) { 688 // NOTE: many places throughout the JVM do not expect a safepoint 689 // to be taken here, in particular most operations on perm gen 690 // objects. However, we only ever bias Java instances and all of 691 // the call sites of identity_hash that might revoke biases have 692 // been checked to make sure they can handle a safepoint. The 693 // added check of the bias pattern is to avoid useless calls to 694 // thread-local storage. 695 if (obj->mark()->has_bias_pattern()) { 696 // Handle for oop obj in case of STW safepoint 697 Handle hobj(Self, obj); 698 // Relaxing assertion for bug 6320749. 699 assert(Universe::verify_in_progress() || 700 !SafepointSynchronize::is_at_safepoint(), 701 "biases should not be seen by VM thread here"); 702 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 703 obj = hobj(); 704 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 705 } 706 } 707 708 // hashCode() is a heap mutator ... 709 // Relaxing assertion for bug 6320749. 710 assert(Universe::verify_in_progress() || DumpSharedSpaces || 711 !SafepointSynchronize::is_at_safepoint(), "invariant"); 712 assert(Universe::verify_in_progress() || DumpSharedSpaces || 713 Self->is_Java_thread() , "invariant"); 714 assert(Universe::verify_in_progress() || DumpSharedSpaces || 715 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 716 717 ObjectMonitor* monitor = NULL; 718 markOop temp, test; 719 intptr_t hash; 720 markOop mark = ReadStableMark(obj); 721 722 // object should remain ineligible for biased locking 723 assert(!mark->has_bias_pattern(), "invariant"); 724 725 if (mark->is_neutral()) { 726 hash = mark->hash(); // this is a normal header 727 if (hash) { // if it has hash, just return it 728 return hash; 729 } 730 hash = get_next_hash(Self, obj); // allocate a new hash code 731 temp = mark->copy_set_hash(hash); // merge the hash code into header 732 // use (machine word version) atomic operation to install the hash 733 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 734 if (test == mark) { 735 return hash; 736 } 737 // If atomic operation failed, we must inflate the header 738 // into heavy weight monitor. We could add more code here 739 // for fast path, but it does not worth the complexity. 740 } else if (mark->has_monitor()) { 741 monitor = mark->monitor(); 742 temp = monitor->header(); 743 assert(temp->is_neutral(), "invariant"); 744 hash = temp->hash(); 745 if (hash) { 746 return hash; 747 } 748 // Skip to the following code to reduce code size 749 } else if (Self->is_lock_owned((address)mark->locker())) { 750 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 751 assert(temp->is_neutral(), "invariant"); 752 hash = temp->hash(); // by current thread, check if the displaced 753 if (hash) { // header contains hash code 754 return hash; 755 } 756 // WARNING: 757 // The displaced header is strictly immutable. 758 // It can NOT be changed in ANY cases. So we have 759 // to inflate the header into heavyweight monitor 760 // even the current thread owns the lock. The reason 761 // is the BasicLock (stack slot) will be asynchronously 762 // read by other threads during the inflate() function. 763 // Any change to stack may not propagate to other threads 764 // correctly. 765 } 766 767 // Inflate the monitor to set hash code 768 monitor = ObjectSynchronizer::inflate(Self, obj); 769 // Load displaced header and check it has hash code 770 mark = monitor->header(); 771 assert(mark->is_neutral(), "invariant"); 772 hash = mark->hash(); 773 if (hash == 0) { 774 hash = get_next_hash(Self, obj); 775 temp = mark->copy_set_hash(hash); // merge hash code into header 776 assert(temp->is_neutral(), "invariant"); 777 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 778 if (test != mark) { 779 // The only update to the header in the monitor (outside GC) 780 // is install the hash code. If someone add new usage of 781 // displaced header, please update this code 782 hash = test->hash(); 783 assert(test->is_neutral(), "invariant"); 784 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 785 } 786 } 787 // We finally get the hash 788 return hash; 789 } 790 791 // Deprecated -- use FastHashCode() instead. 792 793 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 794 return FastHashCode(Thread::current(), obj()); 795 } 796 797 798 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 799 Handle h_obj) { 800 assert(h_obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(h_obj()), "expect to-space copy"); 801 if (UseBiasedLocking) { 802 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 803 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 804 } 805 806 assert(thread == JavaThread::current(), "Can only be called on current thread"); 807 oop obj = h_obj(); 808 809 markOop mark = ReadStableMark(obj); 810 811 // Uncontended case, header points to stack 812 if (mark->has_locker()) { 813 return thread->is_lock_owned((address)mark->locker()); 814 } 815 // Contended case, header points to ObjectMonitor (tagged pointer) 816 if (mark->has_monitor()) { 817 ObjectMonitor* monitor = mark->monitor(); 818 return monitor->is_entered(thread) != 0; 819 } 820 // Unlocked case, header in place 821 assert(mark->is_neutral(), "sanity check"); 822 return false; 823 } 824 825 // Be aware of this method could revoke bias of the lock object. 826 // This method queries the ownership of the lock handle specified by 'h_obj'. 827 // If the current thread owns the lock, it returns owner_self. If no 828 // thread owns the lock, it returns owner_none. Otherwise, it will return 829 // owner_other. 830 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 831 (JavaThread *self, Handle h_obj) { 832 // The caller must beware this method can revoke bias, and 833 // revocation can result in a safepoint. 834 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 835 assert(self->thread_state() != _thread_blocked, "invariant"); 836 837 // Possible mark states: neutral, biased, stack-locked, inflated 838 839 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 840 // CASE: biased 841 BiasedLocking::revoke_and_rebias(h_obj, false, self); 842 assert(!h_obj->mark()->has_bias_pattern(), 843 "biases should be revoked by now"); 844 } 845 846 assert(self == JavaThread::current(), "Can only be called on current thread"); 847 oop obj = h_obj(); 848 markOop mark = ReadStableMark(obj); 849 850 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 851 if (mark->has_locker()) { 852 return self->is_lock_owned((address)mark->locker()) ? 853 owner_self : owner_other; 854 } 855 856 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 857 // The Object:ObjectMonitor relationship is stable as long as we're 858 // not at a safepoint. 859 if (mark->has_monitor()) { 860 void * owner = mark->monitor()->_owner; 861 if (owner == NULL) return owner_none; 862 return (owner == self || 863 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 864 } 865 866 // CASE: neutral 867 assert(mark->is_neutral(), "sanity check"); 868 return owner_none; // it's unlocked 869 } 870 871 // FIXME: jvmti should call this 872 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 873 assert(h_obj() == oopDesc::bs()->resolve_and_maybe_copy_oop(h_obj()), "expect to-space copy"); 874 if (UseBiasedLocking) { 875 if (SafepointSynchronize::is_at_safepoint()) { 876 BiasedLocking::revoke_at_safepoint(h_obj); 877 } else { 878 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 879 } 880 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 881 } 882 883 oop obj = h_obj(); 884 address owner = NULL; 885 886 markOop mark = ReadStableMark(obj); 887 888 // Uncontended case, header points to stack 889 if (mark->has_locker()) { 890 owner = (address) mark->locker(); 891 } 892 893 // Contended case, header points to ObjectMonitor (tagged pointer) 894 if (mark->has_monitor()) { 895 ObjectMonitor* monitor = mark->monitor(); 896 assert(monitor != NULL, "monitor should be non-null"); 897 owner = (address) monitor->owner(); 898 } 899 900 if (owner != NULL) { 901 // owning_thread_from_monitor_owner() may also return NULL here 902 return Threads::owning_thread_from_monitor_owner(owner, doLock); 903 } 904 905 // Unlocked case, header in place 906 // Cannot have assertion since this object may have been 907 // locked by another thread when reaching here. 908 // assert(mark->is_neutral(), "sanity check"); 909 910 return NULL; 911 } 912 // Visitors ... 913 914 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 915 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 916 ObjectMonitor* mid; 917 while (block) { 918 assert(block->object() == CHAINMARKER, "must be a block header"); 919 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 920 mid = (ObjectMonitor *)(block + i); 921 oop object = (oop) mid->object(); 922 if (object != NULL) { 923 closure->do_monitor(mid); 924 } 925 } 926 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 927 } 928 } 929 930 // Get the next block in the block list. 931 static inline ObjectMonitor* next(ObjectMonitor* block) { 932 assert(block->object() == CHAINMARKER, "must be a block header"); 933 block = block->FreeNext; 934 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 935 return block; 936 } 937 938 939 void ObjectSynchronizer::oops_do(OopClosure* f) { 940 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 941 for (PaddedEnd<ObjectMonitor> * block = 942 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 943 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 944 assert(block->object() == CHAINMARKER, "must be a block header"); 945 for (int i = 1; i < _BLOCKSIZE; i++) { 946 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 947 if (mid->object() != NULL) { 948 f->do_oop((oop*)mid->object_addr()); 949 } 950 } 951 } 952 } 953 954 955 // ----------------------------------------------------------------------------- 956 // ObjectMonitor Lifecycle 957 // ----------------------- 958 // Inflation unlinks monitors from the global gFreeList and 959 // associates them with objects. Deflation -- which occurs at 960 // STW-time -- disassociates idle monitors from objects. Such 961 // scavenged monitors are returned to the gFreeList. 962 // 963 // The global list is protected by gListLock. All the critical sections 964 // are short and operate in constant-time. 965 // 966 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 967 // 968 // Lifecycle: 969 // -- unassigned and on the global free list 970 // -- unassigned and on a thread's private omFreeList 971 // -- assigned to an object. The object is inflated and the mark refers 972 // to the objectmonitor. 973 974 975 // Constraining monitor pool growth via MonitorBound ... 976 // 977 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 978 // the rate of scavenging is driven primarily by GC. As such, we can find 979 // an inordinate number of monitors in circulation. 980 // To avoid that scenario we can artificially induce a STW safepoint 981 // if the pool appears to be growing past some reasonable bound. 982 // Generally we favor time in space-time tradeoffs, but as there's no 983 // natural back-pressure on the # of extant monitors we need to impose some 984 // type of limit. Beware that if MonitorBound is set to too low a value 985 // we could just loop. In addition, if MonitorBound is set to a low value 986 // we'll incur more safepoints, which are harmful to performance. 987 // See also: GuaranteedSafepointInterval 988 // 989 // The current implementation uses asynchronous VM operations. 990 991 static void InduceScavenge(Thread * Self, const char * Whence) { 992 // Induce STW safepoint to trim monitors 993 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 994 // More precisely, trigger an asynchronous STW safepoint as the number 995 // of active monitors passes the specified threshold. 996 // TODO: assert thread state is reasonable 997 998 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 999 if (ObjectMonitor::Knob_Verbose) { 1000 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 1001 Whence, ForceMonitorScavenge) ; 1002 tty->flush(); 1003 } 1004 // Induce a 'null' safepoint to scavenge monitors 1005 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1006 // to the VMthread and have a lifespan longer than that of this activation record. 1007 // The VMThread will delete the op when completed. 1008 VMThread::execute(new VM_ForceAsyncSafepoint()); 1009 1010 if (ObjectMonitor::Knob_Verbose) { 1011 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 1012 Whence, ForceMonitorScavenge) ; 1013 tty->flush(); 1014 } 1015 } 1016 } 1017 1018 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1019 ObjectMonitor* mid; 1020 int in_use_tally = 0; 1021 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1022 in_use_tally++; 1023 } 1024 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1025 1026 int free_tally = 0; 1027 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1028 free_tally++; 1029 } 1030 assert(free_tally == Self->omFreeCount, "free count off"); 1031 } 1032 1033 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { 1034 // A large MAXPRIVATE value reduces both list lock contention 1035 // and list coherency traffic, but also tends to increase the 1036 // number of objectMonitors in circulation as well as the STW 1037 // scavenge costs. As usual, we lean toward time in space-time 1038 // tradeoffs. 1039 const int MAXPRIVATE = 1024; 1040 for (;;) { 1041 ObjectMonitor * m; 1042 1043 // 1: try to allocate from the thread's local omFreeList. 1044 // Threads will attempt to allocate first from their local list, then 1045 // from the global list, and only after those attempts fail will the thread 1046 // attempt to instantiate new monitors. Thread-local free lists take 1047 // heat off the gListLock and improve allocation latency, as well as reducing 1048 // coherency traffic on the shared global list. 1049 m = Self->omFreeList; 1050 if (m != NULL) { 1051 Self->omFreeList = m->FreeNext; 1052 Self->omFreeCount--; 1053 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1054 guarantee(m->object() == NULL, "invariant"); 1055 if (MonitorInUseLists) { 1056 m->FreeNext = Self->omInUseList; 1057 Self->omInUseList = m; 1058 Self->omInUseCount++; 1059 if (ObjectMonitor::Knob_VerifyInUse) { 1060 verifyInUse(Self); 1061 } 1062 } else { 1063 m->FreeNext = NULL; 1064 } 1065 return m; 1066 } 1067 1068 // 2: try to allocate from the global gFreeList 1069 // CONSIDER: use muxTry() instead of muxAcquire(). 1070 // If the muxTry() fails then drop immediately into case 3. 1071 // If we're using thread-local free lists then try 1072 // to reprovision the caller's free list. 1073 if (gFreeList != NULL) { 1074 // Reprovision the thread's omFreeList. 1075 // Use bulk transfers to reduce the allocation rate and heat 1076 // on various locks. 1077 Thread::muxAcquire(&gListLock, "omAlloc"); 1078 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1079 gMonitorFreeCount--; 1080 ObjectMonitor * take = gFreeList; 1081 gFreeList = take->FreeNext; 1082 guarantee(take->object() == NULL, "invariant"); 1083 guarantee(!take->is_busy(), "invariant"); 1084 take->Recycle(); 1085 omRelease(Self, take, false); 1086 } 1087 Thread::muxRelease(&gListLock); 1088 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1089 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1090 TEVENT(omFirst - reprovision); 1091 1092 const int mx = MonitorBound; 1093 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1094 // We can't safely induce a STW safepoint from omAlloc() as our thread 1095 // state may not be appropriate for such activities and callers may hold 1096 // naked oops, so instead we defer the action. 1097 InduceScavenge(Self, "omAlloc"); 1098 } 1099 continue; 1100 } 1101 1102 // 3: allocate a block of new ObjectMonitors 1103 // Both the local and global free lists are empty -- resort to malloc(). 1104 // In the current implementation objectMonitors are TSM - immortal. 1105 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1106 // each ObjectMonitor to start at the beginning of a cache line, 1107 // so we use align_size_up(). 1108 // A better solution would be to use C++ placement-new. 1109 // BEWARE: As it stands currently, we don't run the ctors! 1110 assert(_BLOCKSIZE > 1, "invariant"); 1111 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1112 PaddedEnd<ObjectMonitor> * temp; 1113 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1114 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1115 mtInternal); 1116 temp = (PaddedEnd<ObjectMonitor> *) 1117 align_size_up((intptr_t)real_malloc_addr, 1118 DEFAULT_CACHE_LINE_SIZE); 1119 1120 // NOTE: (almost) no way to recover if allocation failed. 1121 // We might be able to induce a STW safepoint and scavenge enough 1122 // objectMonitors to permit progress. 1123 if (temp == NULL) { 1124 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1125 "Allocate ObjectMonitors"); 1126 } 1127 (void)memset((void *) temp, 0, neededsize); 1128 1129 // Format the block. 1130 // initialize the linked list, each monitor points to its next 1131 // forming the single linked free list, the very first monitor 1132 // will points to next block, which forms the block list. 1133 // The trick of using the 1st element in the block as gBlockList 1134 // linkage should be reconsidered. A better implementation would 1135 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1136 1137 for (int i = 1; i < _BLOCKSIZE; i++) { 1138 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1139 } 1140 1141 // terminate the last monitor as the end of list 1142 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1143 1144 // Element [0] is reserved for global list linkage 1145 temp[0].set_object(CHAINMARKER); 1146 1147 // Consider carving out this thread's current request from the 1148 // block in hand. This avoids some lock traffic and redundant 1149 // list activity. 1150 1151 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1152 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1153 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1154 gMonitorPopulation += _BLOCKSIZE-1; 1155 gMonitorFreeCount += _BLOCKSIZE-1; 1156 1157 // Add the new block to the list of extant blocks (gBlockList). 1158 // The very first objectMonitor in a block is reserved and dedicated. 1159 // It serves as blocklist "next" linkage. 1160 temp[0].FreeNext = gBlockList; 1161 gBlockList = temp; 1162 1163 // Add the new string of objectMonitors to the global free list 1164 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1165 gFreeList = temp + 1; 1166 Thread::muxRelease(&gListLock); 1167 TEVENT(Allocate block of monitors); 1168 } 1169 } 1170 1171 // Place "m" on the caller's private per-thread omFreeList. 1172 // In practice there's no need to clamp or limit the number of 1173 // monitors on a thread's omFreeList as the only time we'll call 1174 // omRelease is to return a monitor to the free list after a CAS 1175 // attempt failed. This doesn't allow unbounded #s of monitors to 1176 // accumulate on a thread's free list. 1177 // 1178 // Key constraint: all ObjectMonitors on a thread's free list and the global 1179 // free list must have their object field set to null. This prevents the 1180 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1181 1182 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1183 bool fromPerThreadAlloc) { 1184 guarantee(m->object() == NULL, "invariant"); 1185 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1186 // Remove from omInUseList 1187 if (MonitorInUseLists && fromPerThreadAlloc) { 1188 ObjectMonitor* cur_mid_in_use = NULL; 1189 bool extracted = false; 1190 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1191 if (m == mid) { 1192 // extract from per-thread in-use list 1193 if (mid == Self->omInUseList) { 1194 Self->omInUseList = mid->FreeNext; 1195 } else if (cur_mid_in_use != NULL) { 1196 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1197 } 1198 extracted = true; 1199 Self->omInUseCount--; 1200 if (ObjectMonitor::Knob_VerifyInUse) { 1201 verifyInUse(Self); 1202 } 1203 break; 1204 } 1205 } 1206 assert(extracted, "Should have extracted from in-use list"); 1207 } 1208 1209 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1210 m->FreeNext = Self->omFreeList; 1211 Self->omFreeList = m; 1212 Self->omFreeCount++; 1213 } 1214 1215 // Return the monitors of a moribund thread's local free list to 1216 // the global free list. Typically a thread calls omFlush() when 1217 // it's dying. We could also consider having the VM thread steal 1218 // monitors from threads that have not run java code over a few 1219 // consecutive STW safepoints. Relatedly, we might decay 1220 // omFreeProvision at STW safepoints. 1221 // 1222 // Also return the monitors of a moribund thread's omInUseList to 1223 // a global gOmInUseList under the global list lock so these 1224 // will continue to be scanned. 1225 // 1226 // We currently call omFlush() from the Thread:: dtor _after the thread 1227 // has been excised from the thread list and is no longer a mutator. 1228 // That means that omFlush() can run concurrently with a safepoint and 1229 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1230 // be a better choice as we could safely reason that that the JVM is 1231 // not at a safepoint at the time of the call, and thus there could 1232 // be not inopportune interleavings between omFlush() and the scavenge 1233 // operator. 1234 1235 void ObjectSynchronizer::omFlush(Thread * Self) { 1236 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1237 Self->omFreeList = NULL; 1238 ObjectMonitor * tail = NULL; 1239 int tally = 0; 1240 if (list != NULL) { 1241 ObjectMonitor * s; 1242 // The thread is going away, the per-thread free monitors 1243 // are freed via set_owner(NULL) 1244 // Link them to tail, which will be linked into the global free list 1245 // gFreeList below, under the gListLock 1246 for (s = list; s != NULL; s = s->FreeNext) { 1247 tally++; 1248 tail = s; 1249 guarantee(s->object() == NULL, "invariant"); 1250 guarantee(!s->is_busy(), "invariant"); 1251 s->set_owner(NULL); // redundant but good hygiene 1252 TEVENT(omFlush - Move one); 1253 } 1254 guarantee(tail != NULL && list != NULL, "invariant"); 1255 } 1256 1257 ObjectMonitor * inUseList = Self->omInUseList; 1258 ObjectMonitor * inUseTail = NULL; 1259 int inUseTally = 0; 1260 if (inUseList != NULL) { 1261 Self->omInUseList = NULL; 1262 ObjectMonitor *cur_om; 1263 // The thread is going away, however the omInUseList inflated 1264 // monitors may still be in-use by other threads. 1265 // Link them to inUseTail, which will be linked into the global in-use list 1266 // gOmInUseList below, under the gListLock 1267 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1268 inUseTail = cur_om; 1269 inUseTally++; 1270 } 1271 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1272 Self->omInUseCount = 0; 1273 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1274 } 1275 1276 Thread::muxAcquire(&gListLock, "omFlush"); 1277 if (tail != NULL) { 1278 tail->FreeNext = gFreeList; 1279 gFreeList = list; 1280 gMonitorFreeCount += tally; 1281 } 1282 1283 if (inUseTail != NULL) { 1284 inUseTail->FreeNext = gOmInUseList; 1285 gOmInUseList = inUseList; 1286 gOmInUseCount += inUseTally; 1287 } 1288 1289 Thread::muxRelease(&gListLock); 1290 TEVENT(omFlush); 1291 } 1292 1293 // Fast path code shared by multiple functions 1294 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1295 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy"); 1296 markOop mark = obj->mark(); 1297 if (mark->has_monitor()) { 1298 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1299 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1300 return mark->monitor(); 1301 } 1302 return ObjectSynchronizer::inflate(Thread::current(), obj); 1303 } 1304 1305 1306 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, 1307 oop object) { 1308 // Inflate mutates the heap ... 1309 // Relaxing assertion for bug 6320749. 1310 assert(object == oopDesc::bs()->resolve_and_maybe_copy_oop(object), "expect to-space copy"); 1311 assert(Universe::verify_in_progress() || 1312 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1313 1314 for (;;) { 1315 const markOop mark = object->mark(); 1316 assert(!mark->has_bias_pattern(), "invariant"); 1317 1318 // The mark can be in one of the following states: 1319 // * Inflated - just return 1320 // * Stack-locked - coerce it to inflated 1321 // * INFLATING - busy wait for conversion to complete 1322 // * Neutral - aggressively inflate the object. 1323 // * BIASED - Illegal. We should never see this 1324 1325 // CASE: inflated 1326 if (mark->has_monitor()) { 1327 ObjectMonitor * inf = mark->monitor(); 1328 assert(inf->header()->is_neutral(), "invariant"); 1329 assert((oop) inf->object() == object, "invariant"); 1330 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1331 return inf; 1332 } 1333 1334 // CASE: inflation in progress - inflating over a stack-lock. 1335 // Some other thread is converting from stack-locked to inflated. 1336 // Only that thread can complete inflation -- other threads must wait. 1337 // The INFLATING value is transient. 1338 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1339 // We could always eliminate polling by parking the thread on some auxiliary list. 1340 if (mark == markOopDesc::INFLATING()) { 1341 TEVENT(Inflate: spin while INFLATING); 1342 ReadStableMark(object); 1343 continue; 1344 } 1345 1346 // CASE: stack-locked 1347 // Could be stack-locked either by this thread or by some other thread. 1348 // 1349 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1350 // to install INFLATING into the mark word. We originally installed INFLATING, 1351 // allocated the objectmonitor, and then finally STed the address of the 1352 // objectmonitor into the mark. This was correct, but artificially lengthened 1353 // the interval in which INFLATED appeared in the mark, thus increasing 1354 // the odds of inflation contention. 1355 // 1356 // We now use per-thread private objectmonitor free lists. 1357 // These list are reprovisioned from the global free list outside the 1358 // critical INFLATING...ST interval. A thread can transfer 1359 // multiple objectmonitors en-mass from the global free list to its local free list. 1360 // This reduces coherency traffic and lock contention on the global free list. 1361 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1362 // before or after the CAS(INFLATING) operation. 1363 // See the comments in omAlloc(). 1364 1365 if (mark->has_locker()) { 1366 ObjectMonitor * m = omAlloc(Self); 1367 // Optimistically prepare the objectmonitor - anticipate successful CAS 1368 // We do this before the CAS in order to minimize the length of time 1369 // in which INFLATING appears in the mark. 1370 m->Recycle(); 1371 m->_Responsible = NULL; 1372 m->_recursions = 0; 1373 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1374 1375 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1376 if (cmp != mark) { 1377 omRelease(Self, m, true); 1378 continue; // Interference -- just retry 1379 } 1380 1381 // We've successfully installed INFLATING (0) into the mark-word. 1382 // This is the only case where 0 will appear in a mark-word. 1383 // Only the singular thread that successfully swings the mark-word 1384 // to 0 can perform (or more precisely, complete) inflation. 1385 // 1386 // Why do we CAS a 0 into the mark-word instead of just CASing the 1387 // mark-word from the stack-locked value directly to the new inflated state? 1388 // Consider what happens when a thread unlocks a stack-locked object. 1389 // It attempts to use CAS to swing the displaced header value from the 1390 // on-stack basiclock back into the object header. Recall also that the 1391 // header value (hashcode, etc) can reside in (a) the object header, or 1392 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1393 // header in an objectMonitor. The inflate() routine must copy the header 1394 // value from the basiclock on the owner's stack to the objectMonitor, all 1395 // the while preserving the hashCode stability invariants. If the owner 1396 // decides to release the lock while the value is 0, the unlock will fail 1397 // and control will eventually pass from slow_exit() to inflate. The owner 1398 // will then spin, waiting for the 0 value to disappear. Put another way, 1399 // the 0 causes the owner to stall if the owner happens to try to 1400 // drop the lock (restoring the header from the basiclock to the object) 1401 // while inflation is in-progress. This protocol avoids races that might 1402 // would otherwise permit hashCode values to change or "flicker" for an object. 1403 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1404 // 0 serves as a "BUSY" inflate-in-progress indicator. 1405 1406 1407 // fetch the displaced mark from the owner's stack. 1408 // The owner can't die or unwind past the lock while our INFLATING 1409 // object is in the mark. Furthermore the owner can't complete 1410 // an unlock on the object, either. 1411 markOop dmw = mark->displaced_mark_helper(); 1412 assert(dmw->is_neutral(), "invariant"); 1413 1414 // Setup monitor fields to proper values -- prepare the monitor 1415 m->set_header(dmw); 1416 1417 // Optimization: if the mark->locker stack address is associated 1418 // with this thread we could simply set m->_owner = Self. 1419 // Note that a thread can inflate an object 1420 // that it has stack-locked -- as might happen in wait() -- directly 1421 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1422 m->set_owner(mark->locker()); 1423 m->set_object(object); 1424 // TODO-FIXME: assert BasicLock->dhw != 0. 1425 1426 // Must preserve store ordering. The monitor state must 1427 // be stable at the time of publishing the monitor address. 1428 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1429 object->release_set_mark(markOopDesc::encode(m)); 1430 1431 // Hopefully the performance counters are allocated on distinct cache lines 1432 // to avoid false sharing on MP systems ... 1433 OM_PERFDATA_OP(Inflations, inc()); 1434 TEVENT(Inflate: overwrite stacklock); 1435 if (TraceMonitorInflation) { 1436 if (object->is_instance()) { 1437 ResourceMark rm; 1438 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1439 (void *) object, (intptr_t) object->mark(), 1440 object->klass()->external_name()); 1441 } 1442 } 1443 return m; 1444 } 1445 1446 // CASE: neutral 1447 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1448 // If we know we're inflating for entry it's better to inflate by swinging a 1449 // pre-locked objectMonitor pointer into the object header. A successful 1450 // CAS inflates the object *and* confers ownership to the inflating thread. 1451 // In the current implementation we use a 2-step mechanism where we CAS() 1452 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1453 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1454 // would be useful. 1455 1456 assert(mark->is_neutral(), "invariant"); 1457 ObjectMonitor * m = omAlloc(Self); 1458 // prepare m for installation - set monitor to initial state 1459 m->Recycle(); 1460 m->set_header(mark); 1461 m->set_owner(NULL); 1462 m->set_object(object); 1463 m->_recursions = 0; 1464 m->_Responsible = NULL; 1465 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1466 1467 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1468 m->set_object(NULL); 1469 m->set_owner(NULL); 1470 m->Recycle(); 1471 omRelease(Self, m, true); 1472 m = NULL; 1473 continue; 1474 // interference - the markword changed - just retry. 1475 // The state-transitions are one-way, so there's no chance of 1476 // live-lock -- "Inflated" is an absorbing state. 1477 } 1478 1479 // Hopefully the performance counters are allocated on distinct 1480 // cache lines to avoid false sharing on MP systems ... 1481 OM_PERFDATA_OP(Inflations, inc()); 1482 TEVENT(Inflate: overwrite neutral); 1483 if (TraceMonitorInflation) { 1484 if (object->is_instance()) { 1485 ResourceMark rm; 1486 tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1487 (void *) object, (intptr_t) object->mark(), 1488 object->klass()->external_name()); 1489 } 1490 } 1491 return m; 1492 } 1493 } 1494 1495 1496 // Deflate_idle_monitors() is called at all safepoints, immediately 1497 // after all mutators are stopped, but before any objects have moved. 1498 // It traverses the list of known monitors, deflating where possible. 1499 // The scavenged monitor are returned to the monitor free list. 1500 // 1501 // Beware that we scavenge at *every* stop-the-world point. 1502 // Having a large number of monitors in-circulation negatively 1503 // impacts the performance of some applications (e.g., PointBase). 1504 // Broadly, we want to minimize the # of monitors in circulation. 1505 // 1506 // We have added a flag, MonitorInUseLists, which creates a list 1507 // of active monitors for each thread. deflate_idle_monitors() 1508 // only scans the per-thread in-use lists. omAlloc() puts all 1509 // assigned monitors on the per-thread list. deflate_idle_monitors() 1510 // returns the non-busy monitors to the global free list. 1511 // When a thread dies, omFlush() adds the list of active monitors for 1512 // that thread to a global gOmInUseList acquiring the 1513 // global list lock. deflate_idle_monitors() acquires the global 1514 // list lock to scan for non-busy monitors to the global free list. 1515 // An alternative could have used a single global in-use list. The 1516 // downside would have been the additional cost of acquiring the global list lock 1517 // for every omAlloc(). 1518 // 1519 // Perversely, the heap size -- and thus the STW safepoint rate -- 1520 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1521 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1522 // This is an unfortunate aspect of this design. 1523 1524 enum ManifestConstants { 1525 ClearResponsibleAtSTW = 0 1526 }; 1527 1528 // Deflate a single monitor if not in-use 1529 // Return true if deflated, false if in-use 1530 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1531 ObjectMonitor** freeHeadp, 1532 ObjectMonitor** freeTailp) { 1533 bool deflated; 1534 // Normal case ... The monitor is associated with obj. 1535 assert(obj == oopDesc::bs()->resolve_and_maybe_copy_oop(obj), "expect to-space copy"); 1536 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1537 guarantee(mid == obj->mark()->monitor(), "invariant"); 1538 guarantee(mid->header()->is_neutral(), "invariant"); 1539 1540 if (mid->is_busy()) { 1541 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1542 deflated = false; 1543 } else { 1544 // Deflate the monitor if it is no longer being used 1545 // It's idle - scavenge and return to the global free list 1546 // plain old deflation ... 1547 TEVENT(deflate_idle_monitors - scavenge1); 1548 if (TraceMonitorInflation) { 1549 if (obj->is_instance()) { 1550 ResourceMark rm; 1551 tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1552 (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name()); 1553 } 1554 } 1555 1556 // Restore the header back to obj 1557 obj->release_set_mark(mid->header()); 1558 mid->clear(); 1559 1560 assert(mid->object() == NULL, "invariant"); 1561 1562 // Move the object to the working free list defined by freeHeadp, freeTailp 1563 if (*freeHeadp == NULL) *freeHeadp = mid; 1564 if (*freeTailp != NULL) { 1565 ObjectMonitor * prevtail = *freeTailp; 1566 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1567 prevtail->FreeNext = mid; 1568 } 1569 *freeTailp = mid; 1570 deflated = true; 1571 } 1572 return deflated; 1573 } 1574 1575 // Walk a given monitor list, and deflate idle monitors 1576 // The given list could be a per-thread list or a global list 1577 // Caller acquires gListLock 1578 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1579 ObjectMonitor** freeHeadp, 1580 ObjectMonitor** freeTailp) { 1581 ObjectMonitor* mid; 1582 ObjectMonitor* next; 1583 ObjectMonitor* cur_mid_in_use = NULL; 1584 int deflated_count = 0; 1585 1586 for (mid = *listHeadp; mid != NULL;) { 1587 oop obj = (oop) mid->object(); 1588 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1589 // if deflate_monitor succeeded, 1590 // extract from per-thread in-use list 1591 if (mid == *listHeadp) { 1592 *listHeadp = mid->FreeNext; 1593 } else if (cur_mid_in_use != NULL) { 1594 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1595 } 1596 next = mid->FreeNext; 1597 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1598 mid = next; 1599 deflated_count++; 1600 } else { 1601 cur_mid_in_use = mid; 1602 mid = mid->FreeNext; 1603 } 1604 } 1605 return deflated_count; 1606 } 1607 1608 void ObjectSynchronizer::deflate_idle_monitors() { 1609 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1610 int nInuse = 0; // currently associated with objects 1611 int nInCirculation = 0; // extant 1612 int nScavenged = 0; // reclaimed 1613 bool deflated = false; 1614 1615 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1616 ObjectMonitor * freeTailp = NULL; 1617 1618 TEVENT(deflate_idle_monitors); 1619 // Prevent omFlush from changing mids in Thread dtor's during deflation 1620 // And in case the vm thread is acquiring a lock during a safepoint 1621 // See e.g. 6320749 1622 Thread::muxAcquire(&gListLock, "scavenge - return"); 1623 1624 if (MonitorInUseLists) { 1625 int inUse = 0; 1626 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1627 nInCirculation+= cur->omInUseCount; 1628 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1629 cur->omInUseCount-= deflated_count; 1630 if (ObjectMonitor::Knob_VerifyInUse) { 1631 verifyInUse(cur); 1632 } 1633 nScavenged += deflated_count; 1634 nInuse += cur->omInUseCount; 1635 } 1636 1637 // For moribund threads, scan gOmInUseList 1638 if (gOmInUseList) { 1639 nInCirculation += gOmInUseCount; 1640 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1641 gOmInUseCount-= deflated_count; 1642 nScavenged += deflated_count; 1643 nInuse += gOmInUseCount; 1644 } 1645 1646 } else for (PaddedEnd<ObjectMonitor> * block = 1647 (PaddedEnd<ObjectMonitor> *)gBlockList; block != NULL; 1648 block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1649 // Iterate over all extant monitors - Scavenge all idle monitors. 1650 assert(block->object() == CHAINMARKER, "must be a block header"); 1651 nInCirculation += _BLOCKSIZE; 1652 for (int i = 1; i < _BLOCKSIZE; i++) { 1653 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1654 oop obj = (oop) mid->object(); 1655 1656 if (obj == NULL) { 1657 // The monitor is not associated with an object. 1658 // The monitor should either be a thread-specific private 1659 // free list or the global free list. 1660 // obj == NULL IMPLIES mid->is_busy() == 0 1661 guarantee(!mid->is_busy(), "invariant"); 1662 continue; 1663 } 1664 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1665 1666 if (deflated) { 1667 mid->FreeNext = NULL; 1668 nScavenged++; 1669 } else { 1670 nInuse++; 1671 } 1672 } 1673 } 1674 1675 gMonitorFreeCount += nScavenged; 1676 1677 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1678 1679 if (ObjectMonitor::Knob_Verbose) { 1680 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1681 "ForceMonitorScavenge=%d : pop=%d free=%d", 1682 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1683 gMonitorPopulation, gMonitorFreeCount); 1684 tty->flush(); 1685 } 1686 1687 ForceMonitorScavenge = 0; // Reset 1688 1689 // Move the scavenged monitors back to the global free list. 1690 if (freeHeadp != NULL) { 1691 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1692 assert(freeTailp->FreeNext == NULL, "invariant"); 1693 // constant-time list splice - prepend scavenged segment to gFreeList 1694 freeTailp->FreeNext = gFreeList; 1695 gFreeList = freeHeadp; 1696 } 1697 Thread::muxRelease(&gListLock); 1698 1699 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1700 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1701 1702 // TODO: Add objectMonitor leak detection. 1703 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1704 GVars.stwRandom = os::random(); 1705 GVars.stwCycle++; 1706 } 1707 1708 // Monitor cleanup on JavaThread::exit 1709 1710 // Iterate through monitor cache and attempt to release thread's monitors 1711 // Gives up on a particular monitor if an exception occurs, but continues 1712 // the overall iteration, swallowing the exception. 1713 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1714 private: 1715 TRAPS; 1716 1717 public: 1718 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1719 void do_monitor(ObjectMonitor* mid) { 1720 if (mid->owner() == THREAD) { 1721 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1722 Handle obj((oop) mid->object()); 1723 tty->print("INFO: unexpected locked object:"); 1724 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1725 fatal(err_msg("exiting JavaThread=" INTPTR_FORMAT 1726 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1727 THREAD, mid)); 1728 } 1729 (void)mid->complete_exit(CHECK); 1730 } 1731 } 1732 }; 1733 1734 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1735 // ignored. This is meant to be called during JNI thread detach which assumes 1736 // all remaining monitors are heavyweight. All exceptions are swallowed. 1737 // Scanning the extant monitor list can be time consuming. 1738 // A simple optimization is to add a per-thread flag that indicates a thread 1739 // called jni_monitorenter() during its lifetime. 1740 // 1741 // Instead of No_Savepoint_Verifier it might be cheaper to 1742 // use an idiom of the form: 1743 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1744 // <code that must not run at safepoint> 1745 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1746 // Since the tests are extremely cheap we could leave them enabled 1747 // for normal product builds. 1748 1749 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1750 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1751 No_Safepoint_Verifier nsv; 1752 ReleaseJavaMonitorsClosure rjmc(THREAD); 1753 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1754 ObjectSynchronizer::monitors_iterate(&rjmc); 1755 Thread::muxRelease(&gListLock); 1756 THREAD->clear_pending_exception(); 1757 } 1758 1759 //------------------------------------------------------------------------------ 1760 // Debugging code 1761 1762 void ObjectSynchronizer::sanity_checks(const bool verbose, 1763 const uint cache_line_size, 1764 int *error_cnt_ptr, 1765 int *warning_cnt_ptr) { 1766 u_char *addr_begin = (u_char*)&GVars; 1767 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1768 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1769 1770 if (verbose) { 1771 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1772 sizeof(SharedGlobals)); 1773 } 1774 1775 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1776 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1777 1778 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1779 if (verbose) { 1780 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1781 } 1782 1783 if (cache_line_size != 0) { 1784 // We were able to determine the L1 data cache line size so 1785 // do some cache line specific sanity checks 1786 1787 if (offset_stwRandom < cache_line_size) { 1788 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1789 "to the struct beginning than a cache line which permits " 1790 "false sharing."); 1791 (*warning_cnt_ptr)++; 1792 } 1793 1794 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1795 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1796 "SharedGlobals.hcSequence fields are closer than a cache " 1797 "line which permits false sharing."); 1798 (*warning_cnt_ptr)++; 1799 } 1800 1801 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1802 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1803 "to the struct end than a cache line which permits false " 1804 "sharing."); 1805 (*warning_cnt_ptr)++; 1806 } 1807 } 1808 } 1809 1810 #ifndef PRODUCT 1811 1812 // Verify all monitors in the monitor cache, the verification is weak. 1813 void ObjectSynchronizer::verify() { 1814 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1815 ObjectMonitor* mid; 1816 while (block) { 1817 assert(block->object() == CHAINMARKER, "must be a block header"); 1818 for (int i = 1; i < _BLOCKSIZE; i++) { 1819 mid = (ObjectMonitor *)(block + i); 1820 oop object = (oop) mid->object(); 1821 1822 if (object != NULL) { 1823 mid->verify(); 1824 } 1825 } 1826 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1827 } 1828 } 1829 1830 // Check if monitor belongs to the monitor cache 1831 // The list is grow-only so it's *relatively* safe to traverse 1832 // the list of extant blocks without taking a lock. 1833 1834 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1835 PaddedEnd<ObjectMonitor> * block = (PaddedEnd<ObjectMonitor> *)gBlockList; 1836 1837 while (block) { 1838 assert(block->object() == CHAINMARKER, "must be a block header"); 1839 if (monitor > (ObjectMonitor *)&block[0] && 1840 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1841 address mon = (address) monitor; 1842 address blk = (address) block; 1843 size_t diff = mon - blk; 1844 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "check"); 1845 return 1; 1846 } 1847 block = (PaddedEnd<ObjectMonitor> *) block->FreeNext; 1848 } 1849 return 0; 1850 } 1851 1852 #endif