1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "memory/metaspaceShared.hpp" 29 #include "memory/padded.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.inline.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/osThread.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/vframe.hpp" 45 #include "utilities/dtrace.hpp" 46 #include "utilities/events.hpp" 47 #include "utilities/preserveException.hpp" 48 49 #if defined(__GNUC__) && !defined(PPC64) 50 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 51 #define NOINLINE __attribute__((noinline)) 52 #else 53 #define NOINLINE 54 #endif 55 56 // The "core" versions of monitor enter and exit reside in this file. 57 // The interpreter and compilers contain specialized transliterated 58 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 59 // for instance. If you make changes here, make sure to modify the 60 // interpreter, and both C1 and C2 fast-path inline locking code emission. 61 // 62 // ----------------------------------------------------------------------------- 63 64 #ifdef DTRACE_ENABLED 65 66 // Only bother with this argument setup if dtrace is available 67 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 68 69 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 70 char* bytes = NULL; \ 71 int len = 0; \ 72 jlong jtid = SharedRuntime::get_java_tid(thread); \ 73 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 74 if (klassname != NULL) { \ 75 bytes = (char*)klassname->bytes(); \ 76 len = klassname->utf8_length(); \ 77 } 78 79 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 80 { \ 81 if (DTraceMonitorProbes) { \ 82 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 83 HOTSPOT_MONITOR_WAIT(jtid, \ 84 (uintptr_t)(monitor), bytes, len, (millis)); \ 85 } \ 86 } 87 88 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 89 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 90 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 91 92 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 93 { \ 94 if (DTraceMonitorProbes) { \ 95 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 96 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 97 (uintptr_t)(monitor), bytes, len); \ 98 } \ 99 } 100 101 #else // ndef DTRACE_ENABLED 102 103 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 104 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 105 106 #endif // ndef DTRACE_ENABLED 107 108 // This exists only as a workaround of dtrace bug 6254741 109 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 110 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 111 return 0; 112 } 113 114 #define NINFLATIONLOCKS 256 115 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 116 117 // global list of blocks of monitors 118 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 119 // want to expose the PaddedEnd template more than necessary. 120 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; 121 // global monitor free list 122 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 123 // global monitor in-use list, for moribund threads, 124 // monitors they inflated need to be scanned for deflation 125 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 126 // count of entries in gOmInUseList 127 int ObjectSynchronizer::gOmInUseCount = 0; 128 129 static volatile intptr_t gListLock = 0; // protects global monitor lists 130 static volatile int gMonitorFreeCount = 0; // # on gFreeList 131 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 132 133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 134 135 136 // =====================> Quick functions 137 138 // The quick_* forms are special fast-path variants used to improve 139 // performance. In the simplest case, a "quick_*" implementation could 140 // simply return false, in which case the caller will perform the necessary 141 // state transitions and call the slow-path form. 142 // The fast-path is designed to handle frequently arising cases in an efficient 143 // manner and is just a degenerate "optimistic" variant of the slow-path. 144 // returns true -- to indicate the call was satisfied. 145 // returns false -- to indicate the call needs the services of the slow-path. 146 // A no-loitering ordinance is in effect for code in the quick_* family 147 // operators: safepoints or indefinite blocking (blocking that might span a 148 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 149 // entry. 150 // 151 // Consider: An interesting optimization is to have the JIT recognize the 152 // following common idiom: 153 // synchronized (someobj) { .... ; notify(); } 154 // That is, we find a notify() or notifyAll() call that immediately precedes 155 // the monitorexit operation. In that case the JIT could fuse the operations 156 // into a single notifyAndExit() runtime primitive. 157 158 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 159 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 160 assert(self->is_Java_thread(), "invariant"); 161 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 162 NoSafepointVerifier nsv; 163 if (obj == NULL) return false; // slow-path for invalid obj 164 const markOop mark = obj->mark(); 165 166 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 167 // Degenerate notify 168 // stack-locked by caller so by definition the implied waitset is empty. 169 return true; 170 } 171 172 if (mark->has_monitor()) { 173 ObjectMonitor * const mon = mark->monitor(); 174 assert(mon->object() == obj, "invariant"); 175 if (mon->owner() != self) return false; // slow-path for IMS exception 176 177 if (mon->first_waiter() != NULL) { 178 // We have one or more waiters. Since this is an inflated monitor 179 // that we own, we can transfer one or more threads from the waitset 180 // to the entrylist here and now, avoiding the slow-path. 181 if (all) { 182 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 183 } else { 184 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 185 } 186 int tally = 0; 187 do { 188 mon->INotify(self); 189 ++tally; 190 } while (mon->first_waiter() != NULL && all); 191 OM_PERFDATA_OP(Notifications, inc(tally)); 192 } 193 return true; 194 } 195 196 // biased locking and any other IMS exception states take the slow-path 197 return false; 198 } 199 200 201 // The LockNode emitted directly at the synchronization site would have 202 // been too big if it were to have included support for the cases of inflated 203 // recursive enter and exit, so they go here instead. 204 // Note that we can't safely call AsyncPrintJavaStack() from within 205 // quick_enter() as our thread state remains _in_Java. 206 207 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 208 BasicLock * Lock) { 209 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 210 assert(Self->is_Java_thread(), "invariant"); 211 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 212 NoSafepointVerifier nsv; 213 if (obj == NULL) return false; // Need to throw NPE 214 const markOop mark = obj->mark(); 215 216 if (mark->has_monitor()) { 217 ObjectMonitor * const m = mark->monitor(); 218 assert(m->object() == obj, "invariant"); 219 Thread * const owner = (Thread *) m->_owner; 220 221 // Lock contention and Transactional Lock Elision (TLE) diagnostics 222 // and observability 223 // Case: light contention possibly amenable to TLE 224 // Case: TLE inimical operations such as nested/recursive synchronization 225 226 if (owner == Self) { 227 m->_recursions++; 228 return true; 229 } 230 231 if (owner == NULL && 232 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 233 assert(m->_recursions == 0, "invariant"); 234 assert(m->_owner == Self, "invariant"); 235 return true; 236 } 237 } 238 239 // Note that we could inflate in quick_enter. 240 // This is likely a useful optimization 241 // Critically, in quick_enter() we must not: 242 // -- perform bias revocation, or 243 // -- block indefinitely, or 244 // -- reach a safepoint 245 246 return false; // revert to slow-path 247 } 248 249 // ----------------------------------------------------------------------------- 250 // Fast Monitor Enter/Exit 251 // This the fast monitor enter. The interpreter and compiler use 252 // some assembly copies of this code. Make sure update those code 253 // if the following function is changed. The implementation is 254 // extremely sensitive to race condition. Be careful. 255 256 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 257 bool attempt_rebias, TRAPS) { 258 if (UseBiasedLocking) { 259 if (!SafepointSynchronize::is_at_safepoint()) { 260 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 261 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 262 return; 263 } 264 } else { 265 assert(!attempt_rebias, "can not rebias toward VM thread"); 266 BiasedLocking::revoke_at_safepoint(obj); 267 } 268 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 269 } 270 271 slow_enter(obj, lock, THREAD); 272 } 273 274 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 275 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 276 // if displaced header is null, the previous enter is recursive enter, no-op 277 markOop dhw = lock->displaced_header(); 278 markOop mark; 279 if (dhw == NULL) { 280 // Recursive stack-lock. 281 // Diagnostics -- Could be: stack-locked, inflating, inflated. 282 mark = object->mark(); 283 assert(!mark->is_neutral(), "invariant"); 284 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 285 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 286 } 287 if (mark->has_monitor()) { 288 ObjectMonitor * m = mark->monitor(); 289 assert(((oop)(m->object()))->mark() == mark, "invariant"); 290 assert(m->is_entered(THREAD), "invariant"); 291 } 292 return; 293 } 294 295 mark = object->mark(); 296 297 // If the object is stack-locked by the current thread, try to 298 // swing the displaced header from the box back to the mark. 299 if (mark == (markOop) lock) { 300 assert(dhw->is_neutral(), "invariant"); 301 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 302 TEVENT(fast_exit: release stacklock); 303 return; 304 } 305 } 306 307 ObjectSynchronizer::inflate(THREAD, object)->exit(true, THREAD); 308 } 309 310 // ----------------------------------------------------------------------------- 311 // Interpreter/Compiler Slow Case 312 // This routine is used to handle interpreter/compiler slow case 313 // We don't need to use fast path here, because it must have been 314 // failed in the interpreter/compiler code. 315 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 316 markOop mark = obj->mark(); 317 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 318 319 if (mark->is_neutral()) { 320 // Anticipate successful CAS -- the ST of the displaced mark must 321 // be visible <= the ST performed by the CAS. 322 lock->set_displaced_header(mark); 323 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 324 TEVENT(slow_enter: release stacklock); 325 return; 326 } 327 // Fall through to inflate() ... 328 } else if (mark->has_locker() && 329 THREAD->is_lock_owned((address)mark->locker())) { 330 assert(lock != mark->locker(), "must not re-lock the same lock"); 331 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 332 lock->set_displaced_header(NULL); 333 return; 334 } 335 336 // The object header will never be displaced to this lock, 337 // so it does not matter what the value is, except that it 338 // must be non-zero to avoid looking like a re-entrant lock, 339 // and must not look locked either. 340 lock->set_displaced_header(markOopDesc::unused_mark()); 341 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 342 } 343 344 // This routine is used to handle interpreter/compiler slow case 345 // We don't need to use fast path here, because it must have 346 // failed in the interpreter/compiler code. Simply use the heavy 347 // weight monitor should be ok, unless someone find otherwise. 348 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 349 fast_exit(object, lock, THREAD); 350 } 351 352 // ----------------------------------------------------------------------------- 353 // Class Loader support to workaround deadlocks on the class loader lock objects 354 // Also used by GC 355 // complete_exit()/reenter() are used to wait on a nested lock 356 // i.e. to give up an outer lock completely and then re-enter 357 // Used when holding nested locks - lock acquisition order: lock1 then lock2 358 // 1) complete_exit lock1 - saving recursion count 359 // 2) wait on lock2 360 // 3) when notified on lock2, unlock lock2 361 // 4) reenter lock1 with original recursion count 362 // 5) lock lock2 363 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 364 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 365 TEVENT(complete_exit); 366 if (UseBiasedLocking) { 367 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 368 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 369 } 370 371 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 372 373 return monitor->complete_exit(THREAD); 374 } 375 376 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 377 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 378 TEVENT(reenter); 379 if (UseBiasedLocking) { 380 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 381 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 382 } 383 384 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 385 386 monitor->reenter(recursion, THREAD); 387 } 388 // ----------------------------------------------------------------------------- 389 // JNI locks on java objects 390 // NOTE: must use heavy weight monitor to handle jni monitor enter 391 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 392 // the current locking is from JNI instead of Java code 393 TEVENT(jni_enter); 394 if (UseBiasedLocking) { 395 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 396 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 397 } 398 THREAD->set_current_pending_monitor_is_from_java(false); 399 ObjectSynchronizer::inflate(THREAD, obj())->enter(THREAD); 400 THREAD->set_current_pending_monitor_is_from_java(true); 401 } 402 403 // NOTE: must use heavy weight monitor to handle jni monitor exit 404 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 405 TEVENT(jni_exit); 406 if (UseBiasedLocking) { 407 Handle h_obj(THREAD, obj); 408 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 409 obj = h_obj(); 410 } 411 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 412 413 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj); 414 // If this thread has locked the object, exit the monitor. Note: can't use 415 // monitor->check(CHECK); must exit even if an exception is pending. 416 if (monitor->check(THREAD)) { 417 monitor->exit(true, THREAD); 418 } 419 } 420 421 // ----------------------------------------------------------------------------- 422 // Internal VM locks on java objects 423 // standard constructor, allows locking failures 424 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 425 _dolock = doLock; 426 _thread = thread; 427 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 428 _obj = obj; 429 430 if (_dolock) { 431 TEVENT(ObjectLocker); 432 433 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 434 } 435 } 436 437 ObjectLocker::~ObjectLocker() { 438 if (_dolock) { 439 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 440 } 441 } 442 443 444 // ----------------------------------------------------------------------------- 445 // Wait/Notify/NotifyAll 446 // NOTE: must use heavy weight monitor to handle wait() 447 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 448 if (UseBiasedLocking) { 449 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 450 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 451 } 452 if (millis < 0) { 453 TEVENT(wait - throw IAX); 454 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 455 } 456 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, obj()); 457 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 458 monitor->wait(millis, true, THREAD); 459 460 // This dummy call is in place to get around dtrace bug 6254741. Once 461 // that's fixed we can uncomment the following line, remove the call 462 // and change this function back into a "void" func. 463 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 464 return dtrace_waited_probe(monitor, obj, THREAD); 465 } 466 467 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 468 if (UseBiasedLocking) { 469 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 470 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 471 } 472 if (millis < 0) { 473 TEVENT(wait - throw IAX); 474 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 475 } 476 ObjectSynchronizer::inflate(THREAD, obj()) -> wait(millis, false, THREAD); 477 } 478 479 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 480 if (UseBiasedLocking) { 481 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 482 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 483 } 484 485 markOop mark = obj->mark(); 486 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 487 return; 488 } 489 ObjectSynchronizer::inflate(THREAD, obj())->notify(THREAD); 490 } 491 492 // NOTE: see comment of notify() 493 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 494 if (UseBiasedLocking) { 495 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 496 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 497 } 498 499 markOop mark = obj->mark(); 500 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 501 return; 502 } 503 ObjectSynchronizer::inflate(THREAD, obj())->notifyAll(THREAD); 504 } 505 506 // ----------------------------------------------------------------------------- 507 // Hash Code handling 508 // 509 // Performance concern: 510 // OrderAccess::storestore() calls release() which at one time stored 0 511 // into the global volatile OrderAccess::dummy variable. This store was 512 // unnecessary for correctness. Many threads storing into a common location 513 // causes considerable cache migration or "sloshing" on large SMP systems. 514 // As such, I avoided using OrderAccess::storestore(). In some cases 515 // OrderAccess::fence() -- which incurs local latency on the executing 516 // processor -- is a better choice as it scales on SMP systems. 517 // 518 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 519 // a discussion of coherency costs. Note that all our current reference 520 // platforms provide strong ST-ST order, so the issue is moot on IA32, 521 // x64, and SPARC. 522 // 523 // As a general policy we use "volatile" to control compiler-based reordering 524 // and explicit fences (barriers) to control for architectural reordering 525 // performed by the CPU(s) or platform. 526 527 struct SharedGlobals { 528 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 529 // These are highly shared mostly-read variables. 530 // To avoid false-sharing they need to be the sole occupants of a cache line. 531 volatile int stwRandom; 532 volatile int stwCycle; 533 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 534 // Hot RW variable -- Sequester to avoid false-sharing 535 volatile int hcSequence; 536 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 537 }; 538 539 static SharedGlobals GVars; 540 static int MonitorScavengeThreshold = 1000000; 541 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 542 543 static markOop ReadStableMark(oop obj) { 544 markOop mark = obj->mark(); 545 if (!mark->is_being_inflated()) { 546 return mark; // normal fast-path return 547 } 548 549 int its = 0; 550 for (;;) { 551 markOop mark = obj->mark(); 552 if (!mark->is_being_inflated()) { 553 return mark; // normal fast-path return 554 } 555 556 // The object is being inflated by some other thread. 557 // The caller of ReadStableMark() must wait for inflation to complete. 558 // Avoid live-lock 559 // TODO: consider calling SafepointSynchronize::do_call_back() while 560 // spinning to see if there's a safepoint pending. If so, immediately 561 // yielding or blocking would be appropriate. Avoid spinning while 562 // there is a safepoint pending. 563 // TODO: add inflation contention performance counters. 564 // TODO: restrict the aggregate number of spinners. 565 566 ++its; 567 if (its > 10000 || !os::is_MP()) { 568 if (its & 1) { 569 os::naked_yield(); 570 TEVENT(Inflate: INFLATING - yield); 571 } else { 572 // Note that the following code attenuates the livelock problem but is not 573 // a complete remedy. A more complete solution would require that the inflating 574 // thread hold the associated inflation lock. The following code simply restricts 575 // the number of spinners to at most one. We'll have N-2 threads blocked 576 // on the inflationlock, 1 thread holding the inflation lock and using 577 // a yield/park strategy, and 1 thread in the midst of inflation. 578 // A more refined approach would be to change the encoding of INFLATING 579 // to allow encapsulation of a native thread pointer. Threads waiting for 580 // inflation to complete would use CAS to push themselves onto a singly linked 581 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 582 // and calling park(). When inflation was complete the thread that accomplished inflation 583 // would detach the list and set the markword to inflated with a single CAS and 584 // then for each thread on the list, set the flag and unpark() the thread. 585 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 586 // wakes at most one thread whereas we need to wake the entire list. 587 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 588 int YieldThenBlock = 0; 589 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 590 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 591 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 592 while (obj->mark() == markOopDesc::INFLATING()) { 593 // Beware: NakedYield() is advisory and has almost no effect on some platforms 594 // so we periodically call Self->_ParkEvent->park(1). 595 // We use a mixed spin/yield/block mechanism. 596 if ((YieldThenBlock++) >= 16) { 597 Thread::current()->_ParkEvent->park(1); 598 } else { 599 os::naked_yield(); 600 } 601 } 602 Thread::muxRelease(gInflationLocks + ix); 603 TEVENT(Inflate: INFLATING - yield/park); 604 } 605 } else { 606 SpinPause(); // SMP-polite spinning 607 } 608 } 609 } 610 611 // hashCode() generation : 612 // 613 // Possibilities: 614 // * MD5Digest of {obj,stwRandom} 615 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 616 // * A DES- or AES-style SBox[] mechanism 617 // * One of the Phi-based schemes, such as: 618 // 2654435761 = 2^32 * Phi (golden ratio) 619 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 620 // * A variation of Marsaglia's shift-xor RNG scheme. 621 // * (obj ^ stwRandom) is appealing, but can result 622 // in undesirable regularity in the hashCode values of adjacent objects 623 // (objects allocated back-to-back, in particular). This could potentially 624 // result in hashtable collisions and reduced hashtable efficiency. 625 // There are simple ways to "diffuse" the middle address bits over the 626 // generated hashCode values: 627 628 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 629 intptr_t value = 0; 630 if (hashCode == 0) { 631 // This form uses an unguarded global Park-Miller RNG, 632 // so it's possible for two threads to race and generate the same RNG. 633 // On MP system we'll have lots of RW access to a global, so the 634 // mechanism induces lots of coherency traffic. 635 value = os::random(); 636 } else if (hashCode == 1) { 637 // This variation has the property of being stable (idempotent) 638 // between STW operations. This can be useful in some of the 1-0 639 // synchronization schemes. 640 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 641 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 642 } else if (hashCode == 2) { 643 value = 1; // for sensitivity testing 644 } else if (hashCode == 3) { 645 value = ++GVars.hcSequence; 646 } else if (hashCode == 4) { 647 value = cast_from_oop<intptr_t>(obj); 648 } else { 649 // Marsaglia's xor-shift scheme with thread-specific state 650 // This is probably the best overall implementation -- we'll 651 // likely make this the default in future releases. 652 unsigned t = Self->_hashStateX; 653 t ^= (t << 11); 654 Self->_hashStateX = Self->_hashStateY; 655 Self->_hashStateY = Self->_hashStateZ; 656 Self->_hashStateZ = Self->_hashStateW; 657 unsigned v = Self->_hashStateW; 658 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 659 Self->_hashStateW = v; 660 value = v; 661 } 662 663 value &= markOopDesc::hash_mask; 664 if (value == 0) value = 0xBAD; 665 assert(value != markOopDesc::no_hash, "invariant"); 666 TEVENT(hashCode: GENERATE); 667 return value; 668 } 669 670 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 671 if (UseBiasedLocking) { 672 // NOTE: many places throughout the JVM do not expect a safepoint 673 // to be taken here, in particular most operations on perm gen 674 // objects. However, we only ever bias Java instances and all of 675 // the call sites of identity_hash that might revoke biases have 676 // been checked to make sure they can handle a safepoint. The 677 // added check of the bias pattern is to avoid useless calls to 678 // thread-local storage. 679 if (obj->mark()->has_bias_pattern()) { 680 // Handle for oop obj in case of STW safepoint 681 Handle hobj(Self, obj); 682 // Relaxing assertion for bug 6320749. 683 assert(Universe::verify_in_progress() || 684 !SafepointSynchronize::is_at_safepoint(), 685 "biases should not be seen by VM thread here"); 686 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 687 obj = hobj(); 688 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 689 } 690 } 691 692 // hashCode() is a heap mutator ... 693 // Relaxing assertion for bug 6320749. 694 assert(Universe::verify_in_progress() || DumpSharedSpaces || 695 !SafepointSynchronize::is_at_safepoint(), "invariant"); 696 assert(Universe::verify_in_progress() || DumpSharedSpaces || 697 Self->is_Java_thread() , "invariant"); 698 assert(Universe::verify_in_progress() || DumpSharedSpaces || 699 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 700 701 ObjectMonitor* monitor = NULL; 702 markOop temp, test; 703 intptr_t hash; 704 markOop mark = ReadStableMark(obj); 705 706 // object should remain ineligible for biased locking 707 assert(!mark->has_bias_pattern(), "invariant"); 708 709 if (mark->is_neutral()) { 710 hash = mark->hash(); // this is a normal header 711 if (hash) { // if it has hash, just return it 712 return hash; 713 } 714 hash = get_next_hash(Self, obj); // allocate a new hash code 715 temp = mark->copy_set_hash(hash); // merge the hash code into header 716 // use (machine word version) atomic operation to install the hash 717 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 718 if (test == mark) { 719 return hash; 720 } 721 // If atomic operation failed, we must inflate the header 722 // into heavy weight monitor. We could add more code here 723 // for fast path, but it does not worth the complexity. 724 } else if (mark->has_monitor()) { 725 monitor = mark->monitor(); 726 temp = monitor->header(); 727 assert(temp->is_neutral(), "invariant"); 728 hash = temp->hash(); 729 if (hash) { 730 return hash; 731 } 732 // Skip to the following code to reduce code size 733 } else if (Self->is_lock_owned((address)mark->locker())) { 734 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 735 assert(temp->is_neutral(), "invariant"); 736 hash = temp->hash(); // by current thread, check if the displaced 737 if (hash) { // header contains hash code 738 return hash; 739 } 740 // WARNING: 741 // The displaced header is strictly immutable. 742 // It can NOT be changed in ANY cases. So we have 743 // to inflate the header into heavyweight monitor 744 // even the current thread owns the lock. The reason 745 // is the BasicLock (stack slot) will be asynchronously 746 // read by other threads during the inflate() function. 747 // Any change to stack may not propagate to other threads 748 // correctly. 749 } 750 751 // Inflate the monitor to set hash code 752 monitor = ObjectSynchronizer::inflate(Self, obj); 753 // Load displaced header and check it has hash code 754 mark = monitor->header(); 755 assert(mark->is_neutral(), "invariant"); 756 hash = mark->hash(); 757 if (hash == 0) { 758 hash = get_next_hash(Self, obj); 759 temp = mark->copy_set_hash(hash); // merge hash code into header 760 assert(temp->is_neutral(), "invariant"); 761 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 762 if (test != mark) { 763 // The only update to the header in the monitor (outside GC) 764 // is install the hash code. If someone add new usage of 765 // displaced header, please update this code 766 hash = test->hash(); 767 assert(test->is_neutral(), "invariant"); 768 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 769 } 770 } 771 // We finally get the hash 772 return hash; 773 } 774 775 // Deprecated -- use FastHashCode() instead. 776 777 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 778 return FastHashCode(Thread::current(), obj()); 779 } 780 781 782 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 783 Handle h_obj) { 784 if (UseBiasedLocking) { 785 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 786 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 787 } 788 789 assert(thread == JavaThread::current(), "Can only be called on current thread"); 790 oop obj = h_obj(); 791 792 markOop mark = ReadStableMark(obj); 793 794 // Uncontended case, header points to stack 795 if (mark->has_locker()) { 796 return thread->is_lock_owned((address)mark->locker()); 797 } 798 // Contended case, header points to ObjectMonitor (tagged pointer) 799 if (mark->has_monitor()) { 800 ObjectMonitor* monitor = mark->monitor(); 801 return monitor->is_entered(thread) != 0; 802 } 803 // Unlocked case, header in place 804 assert(mark->is_neutral(), "sanity check"); 805 return false; 806 } 807 808 // Be aware of this method could revoke bias of the lock object. 809 // This method queries the ownership of the lock handle specified by 'h_obj'. 810 // If the current thread owns the lock, it returns owner_self. If no 811 // thread owns the lock, it returns owner_none. Otherwise, it will return 812 // owner_other. 813 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 814 (JavaThread *self, Handle h_obj) { 815 // The caller must beware this method can revoke bias, and 816 // revocation can result in a safepoint. 817 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 818 assert(self->thread_state() != _thread_blocked, "invariant"); 819 820 // Possible mark states: neutral, biased, stack-locked, inflated 821 822 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 823 // CASE: biased 824 BiasedLocking::revoke_and_rebias(h_obj, false, self); 825 assert(!h_obj->mark()->has_bias_pattern(), 826 "biases should be revoked by now"); 827 } 828 829 assert(self == JavaThread::current(), "Can only be called on current thread"); 830 oop obj = h_obj(); 831 markOop mark = ReadStableMark(obj); 832 833 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 834 if (mark->has_locker()) { 835 return self->is_lock_owned((address)mark->locker()) ? 836 owner_self : owner_other; 837 } 838 839 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 840 // The Object:ObjectMonitor relationship is stable as long as we're 841 // not at a safepoint. 842 if (mark->has_monitor()) { 843 void * owner = mark->monitor()->_owner; 844 if (owner == NULL) return owner_none; 845 return (owner == self || 846 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 847 } 848 849 // CASE: neutral 850 assert(mark->is_neutral(), "sanity check"); 851 return owner_none; // it's unlocked 852 } 853 854 // FIXME: jvmti should call this 855 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 856 if (UseBiasedLocking) { 857 if (SafepointSynchronize::is_at_safepoint()) { 858 BiasedLocking::revoke_at_safepoint(h_obj); 859 } else { 860 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 861 } 862 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 863 } 864 865 oop obj = h_obj(); 866 address owner = NULL; 867 868 markOop mark = ReadStableMark(obj); 869 870 // Uncontended case, header points to stack 871 if (mark->has_locker()) { 872 owner = (address) mark->locker(); 873 } 874 875 // Contended case, header points to ObjectMonitor (tagged pointer) 876 if (mark->has_monitor()) { 877 ObjectMonitor* monitor = mark->monitor(); 878 assert(monitor != NULL, "monitor should be non-null"); 879 owner = (address) monitor->owner(); 880 } 881 882 if (owner != NULL) { 883 // owning_thread_from_monitor_owner() may also return NULL here 884 return Threads::owning_thread_from_monitor_owner(owner, doLock); 885 } 886 887 // Unlocked case, header in place 888 // Cannot have assertion since this object may have been 889 // locked by another thread when reaching here. 890 // assert(mark->is_neutral(), "sanity check"); 891 892 return NULL; 893 } 894 895 // Visitors ... 896 897 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 898 PaddedEnd<ObjectMonitor> * block = 899 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 900 while (block != NULL) { 901 assert(block->object() == CHAINMARKER, "must be a block header"); 902 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 903 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 904 oop object = (oop)mid->object(); 905 if (object != NULL) { 906 closure->do_monitor(mid); 907 } 908 } 909 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 910 } 911 } 912 913 // Get the next block in the block list. 914 static inline ObjectMonitor* next(ObjectMonitor* block) { 915 assert(block->object() == CHAINMARKER, "must be a block header"); 916 block = block->FreeNext; 917 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 918 return block; 919 } 920 921 922 void ObjectSynchronizer::oops_do(OopClosure* f) { 923 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 924 PaddedEnd<ObjectMonitor> * block = 925 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 926 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 927 assert(block->object() == CHAINMARKER, "must be a block header"); 928 for (int i = 1; i < _BLOCKSIZE; i++) { 929 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 930 if (mid->object() != NULL) { 931 f->do_oop((oop*)mid->object_addr()); 932 } 933 } 934 } 935 } 936 937 938 // ----------------------------------------------------------------------------- 939 // ObjectMonitor Lifecycle 940 // ----------------------- 941 // Inflation unlinks monitors from the global gFreeList and 942 // associates them with objects. Deflation -- which occurs at 943 // STW-time -- disassociates idle monitors from objects. Such 944 // scavenged monitors are returned to the gFreeList. 945 // 946 // The global list is protected by gListLock. All the critical sections 947 // are short and operate in constant-time. 948 // 949 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 950 // 951 // Lifecycle: 952 // -- unassigned and on the global free list 953 // -- unassigned and on a thread's private omFreeList 954 // -- assigned to an object. The object is inflated and the mark refers 955 // to the objectmonitor. 956 957 958 // Constraining monitor pool growth via MonitorBound ... 959 // 960 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 961 // the rate of scavenging is driven primarily by GC. As such, we can find 962 // an inordinate number of monitors in circulation. 963 // To avoid that scenario we can artificially induce a STW safepoint 964 // if the pool appears to be growing past some reasonable bound. 965 // Generally we favor time in space-time tradeoffs, but as there's no 966 // natural back-pressure on the # of extant monitors we need to impose some 967 // type of limit. Beware that if MonitorBound is set to too low a value 968 // we could just loop. In addition, if MonitorBound is set to a low value 969 // we'll incur more safepoints, which are harmful to performance. 970 // See also: GuaranteedSafepointInterval 971 // 972 // The current implementation uses asynchronous VM operations. 973 974 static void InduceScavenge(Thread * Self, const char * Whence) { 975 // Induce STW safepoint to trim monitors 976 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 977 // More precisely, trigger an asynchronous STW safepoint as the number 978 // of active monitors passes the specified threshold. 979 // TODO: assert thread state is reasonable 980 981 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 982 if (ObjectMonitor::Knob_Verbose) { 983 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 984 Whence, ForceMonitorScavenge) ; 985 tty->flush(); 986 } 987 // Induce a 'null' safepoint to scavenge monitors 988 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 989 // to the VMthread and have a lifespan longer than that of this activation record. 990 // The VMThread will delete the op when completed. 991 VMThread::execute(new VM_ForceAsyncSafepoint()); 992 993 if (ObjectMonitor::Knob_Verbose) { 994 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 995 Whence, ForceMonitorScavenge) ; 996 tty->flush(); 997 } 998 } 999 } 1000 1001 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1002 ObjectMonitor* mid; 1003 int in_use_tally = 0; 1004 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1005 in_use_tally++; 1006 } 1007 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1008 1009 int free_tally = 0; 1010 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1011 free_tally++; 1012 } 1013 assert(free_tally == Self->omFreeCount, "free count off"); 1014 } 1015 1016 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { 1017 // A large MAXPRIVATE value reduces both list lock contention 1018 // and list coherency traffic, but also tends to increase the 1019 // number of objectMonitors in circulation as well as the STW 1020 // scavenge costs. As usual, we lean toward time in space-time 1021 // tradeoffs. 1022 const int MAXPRIVATE = 1024; 1023 for (;;) { 1024 ObjectMonitor * m; 1025 1026 // 1: try to allocate from the thread's local omFreeList. 1027 // Threads will attempt to allocate first from their local list, then 1028 // from the global list, and only after those attempts fail will the thread 1029 // attempt to instantiate new monitors. Thread-local free lists take 1030 // heat off the gListLock and improve allocation latency, as well as reducing 1031 // coherency traffic on the shared global list. 1032 m = Self->omFreeList; 1033 if (m != NULL) { 1034 Self->omFreeList = m->FreeNext; 1035 Self->omFreeCount--; 1036 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1037 guarantee(m->object() == NULL, "invariant"); 1038 if (MonitorInUseLists) { 1039 m->FreeNext = Self->omInUseList; 1040 Self->omInUseList = m; 1041 Self->omInUseCount++; 1042 if (ObjectMonitor::Knob_VerifyInUse) { 1043 verifyInUse(Self); 1044 } 1045 } else { 1046 m->FreeNext = NULL; 1047 } 1048 return m; 1049 } 1050 1051 // 2: try to allocate from the global gFreeList 1052 // CONSIDER: use muxTry() instead of muxAcquire(). 1053 // If the muxTry() fails then drop immediately into case 3. 1054 // If we're using thread-local free lists then try 1055 // to reprovision the caller's free list. 1056 if (gFreeList != NULL) { 1057 // Reprovision the thread's omFreeList. 1058 // Use bulk transfers to reduce the allocation rate and heat 1059 // on various locks. 1060 Thread::muxAcquire(&gListLock, "omAlloc"); 1061 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1062 gMonitorFreeCount--; 1063 ObjectMonitor * take = gFreeList; 1064 gFreeList = take->FreeNext; 1065 guarantee(take->object() == NULL, "invariant"); 1066 guarantee(!take->is_busy(), "invariant"); 1067 take->Recycle(); 1068 omRelease(Self, take, false); 1069 } 1070 Thread::muxRelease(&gListLock); 1071 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1072 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1073 TEVENT(omFirst - reprovision); 1074 1075 const int mx = MonitorBound; 1076 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1077 // We can't safely induce a STW safepoint from omAlloc() as our thread 1078 // state may not be appropriate for such activities and callers may hold 1079 // naked oops, so instead we defer the action. 1080 InduceScavenge(Self, "omAlloc"); 1081 } 1082 continue; 1083 } 1084 1085 // 3: allocate a block of new ObjectMonitors 1086 // Both the local and global free lists are empty -- resort to malloc(). 1087 // In the current implementation objectMonitors are TSM - immortal. 1088 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1089 // each ObjectMonitor to start at the beginning of a cache line, 1090 // so we use align_size_up(). 1091 // A better solution would be to use C++ placement-new. 1092 // BEWARE: As it stands currently, we don't run the ctors! 1093 assert(_BLOCKSIZE > 1, "invariant"); 1094 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1095 PaddedEnd<ObjectMonitor> * temp; 1096 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1097 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1098 mtInternal); 1099 temp = (PaddedEnd<ObjectMonitor> *) 1100 align_size_up((intptr_t)real_malloc_addr, 1101 DEFAULT_CACHE_LINE_SIZE); 1102 1103 // NOTE: (almost) no way to recover if allocation failed. 1104 // We might be able to induce a STW safepoint and scavenge enough 1105 // objectMonitors to permit progress. 1106 if (temp == NULL) { 1107 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1108 "Allocate ObjectMonitors"); 1109 } 1110 (void)memset((void *) temp, 0, neededsize); 1111 1112 // Format the block. 1113 // initialize the linked list, each monitor points to its next 1114 // forming the single linked free list, the very first monitor 1115 // will points to next block, which forms the block list. 1116 // The trick of using the 1st element in the block as gBlockList 1117 // linkage should be reconsidered. A better implementation would 1118 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1119 1120 for (int i = 1; i < _BLOCKSIZE; i++) { 1121 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1122 } 1123 1124 // terminate the last monitor as the end of list 1125 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1126 1127 // Element [0] is reserved for global list linkage 1128 temp[0].set_object(CHAINMARKER); 1129 1130 // Consider carving out this thread's current request from the 1131 // block in hand. This avoids some lock traffic and redundant 1132 // list activity. 1133 1134 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1135 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1136 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1137 gMonitorPopulation += _BLOCKSIZE-1; 1138 gMonitorFreeCount += _BLOCKSIZE-1; 1139 1140 // Add the new block to the list of extant blocks (gBlockList). 1141 // The very first objectMonitor in a block is reserved and dedicated. 1142 // It serves as blocklist "next" linkage. 1143 temp[0].FreeNext = gBlockList; 1144 // There are lock-free uses of gBlockList so make sure that 1145 // the previous stores happen before we update gBlockList. 1146 OrderAccess::release_store_ptr(&gBlockList, temp); 1147 1148 // Add the new string of objectMonitors to the global free list 1149 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1150 gFreeList = temp + 1; 1151 Thread::muxRelease(&gListLock); 1152 TEVENT(Allocate block of monitors); 1153 } 1154 } 1155 1156 // Place "m" on the caller's private per-thread omFreeList. 1157 // In practice there's no need to clamp or limit the number of 1158 // monitors on a thread's omFreeList as the only time we'll call 1159 // omRelease is to return a monitor to the free list after a CAS 1160 // attempt failed. This doesn't allow unbounded #s of monitors to 1161 // accumulate on a thread's free list. 1162 // 1163 // Key constraint: all ObjectMonitors on a thread's free list and the global 1164 // free list must have their object field set to null. This prevents the 1165 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1166 1167 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1168 bool fromPerThreadAlloc) { 1169 guarantee(m->object() == NULL, "invariant"); 1170 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1171 // Remove from omInUseList 1172 if (MonitorInUseLists && fromPerThreadAlloc) { 1173 ObjectMonitor* cur_mid_in_use = NULL; 1174 bool extracted = false; 1175 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1176 if (m == mid) { 1177 // extract from per-thread in-use list 1178 if (mid == Self->omInUseList) { 1179 Self->omInUseList = mid->FreeNext; 1180 } else if (cur_mid_in_use != NULL) { 1181 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1182 } 1183 extracted = true; 1184 Self->omInUseCount--; 1185 if (ObjectMonitor::Knob_VerifyInUse) { 1186 verifyInUse(Self); 1187 } 1188 break; 1189 } 1190 } 1191 assert(extracted, "Should have extracted from in-use list"); 1192 } 1193 1194 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1195 m->FreeNext = Self->omFreeList; 1196 Self->omFreeList = m; 1197 Self->omFreeCount++; 1198 } 1199 1200 // Return the monitors of a moribund thread's local free list to 1201 // the global free list. Typically a thread calls omFlush() when 1202 // it's dying. We could also consider having the VM thread steal 1203 // monitors from threads that have not run java code over a few 1204 // consecutive STW safepoints. Relatedly, we might decay 1205 // omFreeProvision at STW safepoints. 1206 // 1207 // Also return the monitors of a moribund thread's omInUseList to 1208 // a global gOmInUseList under the global list lock so these 1209 // will continue to be scanned. 1210 // 1211 // We currently call omFlush() from the Thread:: dtor _after the thread 1212 // has been excised from the thread list and is no longer a mutator. 1213 // That means that omFlush() can run concurrently with a safepoint and 1214 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1215 // be a better choice as we could safely reason that that the JVM is 1216 // not at a safepoint at the time of the call, and thus there could 1217 // be not inopportune interleavings between omFlush() and the scavenge 1218 // operator. 1219 1220 void ObjectSynchronizer::omFlush(Thread * Self) { 1221 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1222 Self->omFreeList = NULL; 1223 ObjectMonitor * tail = NULL; 1224 int tally = 0; 1225 if (list != NULL) { 1226 ObjectMonitor * s; 1227 // The thread is going away, the per-thread free monitors 1228 // are freed via set_owner(NULL) 1229 // Link them to tail, which will be linked into the global free list 1230 // gFreeList below, under the gListLock 1231 for (s = list; s != NULL; s = s->FreeNext) { 1232 tally++; 1233 tail = s; 1234 guarantee(s->object() == NULL, "invariant"); 1235 guarantee(!s->is_busy(), "invariant"); 1236 s->set_owner(NULL); // redundant but good hygiene 1237 TEVENT(omFlush - Move one); 1238 } 1239 guarantee(tail != NULL && list != NULL, "invariant"); 1240 } 1241 1242 ObjectMonitor * inUseList = Self->omInUseList; 1243 ObjectMonitor * inUseTail = NULL; 1244 int inUseTally = 0; 1245 if (inUseList != NULL) { 1246 Self->omInUseList = NULL; 1247 ObjectMonitor *cur_om; 1248 // The thread is going away, however the omInUseList inflated 1249 // monitors may still be in-use by other threads. 1250 // Link them to inUseTail, which will be linked into the global in-use list 1251 // gOmInUseList below, under the gListLock 1252 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1253 inUseTail = cur_om; 1254 inUseTally++; 1255 } 1256 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1257 Self->omInUseCount = 0; 1258 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1259 } 1260 1261 Thread::muxAcquire(&gListLock, "omFlush"); 1262 if (tail != NULL) { 1263 tail->FreeNext = gFreeList; 1264 gFreeList = list; 1265 gMonitorFreeCount += tally; 1266 } 1267 1268 if (inUseTail != NULL) { 1269 inUseTail->FreeNext = gOmInUseList; 1270 gOmInUseList = inUseList; 1271 gOmInUseCount += inUseTally; 1272 } 1273 1274 Thread::muxRelease(&gListLock); 1275 TEVENT(omFlush); 1276 } 1277 1278 // Fast path code shared by multiple functions 1279 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1280 markOop mark = obj->mark(); 1281 if (mark->has_monitor()) { 1282 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1283 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1284 return mark->monitor(); 1285 } 1286 return ObjectSynchronizer::inflate(Thread::current(), obj); 1287 } 1288 1289 1290 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, 1291 oop object) { 1292 // Inflate mutates the heap ... 1293 // Relaxing assertion for bug 6320749. 1294 assert(Universe::verify_in_progress() || 1295 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1296 1297 for (;;) { 1298 const markOop mark = object->mark(); 1299 assert(!mark->has_bias_pattern(), "invariant"); 1300 1301 // The mark can be in one of the following states: 1302 // * Inflated - just return 1303 // * Stack-locked - coerce it to inflated 1304 // * INFLATING - busy wait for conversion to complete 1305 // * Neutral - aggressively inflate the object. 1306 // * BIASED - Illegal. We should never see this 1307 1308 // CASE: inflated 1309 if (mark->has_monitor()) { 1310 ObjectMonitor * inf = mark->monitor(); 1311 assert(inf->header()->is_neutral(), "invariant"); 1312 assert(inf->object() == object, "invariant"); 1313 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1314 return inf; 1315 } 1316 1317 // CASE: inflation in progress - inflating over a stack-lock. 1318 // Some other thread is converting from stack-locked to inflated. 1319 // Only that thread can complete inflation -- other threads must wait. 1320 // The INFLATING value is transient. 1321 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1322 // We could always eliminate polling by parking the thread on some auxiliary list. 1323 if (mark == markOopDesc::INFLATING()) { 1324 TEVENT(Inflate: spin while INFLATING); 1325 ReadStableMark(object); 1326 continue; 1327 } 1328 1329 // CASE: stack-locked 1330 // Could be stack-locked either by this thread or by some other thread. 1331 // 1332 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1333 // to install INFLATING into the mark word. We originally installed INFLATING, 1334 // allocated the objectmonitor, and then finally STed the address of the 1335 // objectmonitor into the mark. This was correct, but artificially lengthened 1336 // the interval in which INFLATED appeared in the mark, thus increasing 1337 // the odds of inflation contention. 1338 // 1339 // We now use per-thread private objectmonitor free lists. 1340 // These list are reprovisioned from the global free list outside the 1341 // critical INFLATING...ST interval. A thread can transfer 1342 // multiple objectmonitors en-mass from the global free list to its local free list. 1343 // This reduces coherency traffic and lock contention on the global free list. 1344 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1345 // before or after the CAS(INFLATING) operation. 1346 // See the comments in omAlloc(). 1347 1348 if (mark->has_locker()) { 1349 ObjectMonitor * m = omAlloc(Self); 1350 // Optimistically prepare the objectmonitor - anticipate successful CAS 1351 // We do this before the CAS in order to minimize the length of time 1352 // in which INFLATING appears in the mark. 1353 m->Recycle(); 1354 m->_Responsible = NULL; 1355 m->_recursions = 0; 1356 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1357 1358 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1359 if (cmp != mark) { 1360 omRelease(Self, m, true); 1361 continue; // Interference -- just retry 1362 } 1363 1364 // We've successfully installed INFLATING (0) into the mark-word. 1365 // This is the only case where 0 will appear in a mark-word. 1366 // Only the singular thread that successfully swings the mark-word 1367 // to 0 can perform (or more precisely, complete) inflation. 1368 // 1369 // Why do we CAS a 0 into the mark-word instead of just CASing the 1370 // mark-word from the stack-locked value directly to the new inflated state? 1371 // Consider what happens when a thread unlocks a stack-locked object. 1372 // It attempts to use CAS to swing the displaced header value from the 1373 // on-stack basiclock back into the object header. Recall also that the 1374 // header value (hashcode, etc) can reside in (a) the object header, or 1375 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1376 // header in an objectMonitor. The inflate() routine must copy the header 1377 // value from the basiclock on the owner's stack to the objectMonitor, all 1378 // the while preserving the hashCode stability invariants. If the owner 1379 // decides to release the lock while the value is 0, the unlock will fail 1380 // and control will eventually pass from slow_exit() to inflate. The owner 1381 // will then spin, waiting for the 0 value to disappear. Put another way, 1382 // the 0 causes the owner to stall if the owner happens to try to 1383 // drop the lock (restoring the header from the basiclock to the object) 1384 // while inflation is in-progress. This protocol avoids races that might 1385 // would otherwise permit hashCode values to change or "flicker" for an object. 1386 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1387 // 0 serves as a "BUSY" inflate-in-progress indicator. 1388 1389 1390 // fetch the displaced mark from the owner's stack. 1391 // The owner can't die or unwind past the lock while our INFLATING 1392 // object is in the mark. Furthermore the owner can't complete 1393 // an unlock on the object, either. 1394 markOop dmw = mark->displaced_mark_helper(); 1395 assert(dmw->is_neutral(), "invariant"); 1396 1397 // Setup monitor fields to proper values -- prepare the monitor 1398 m->set_header(dmw); 1399 1400 // Optimization: if the mark->locker stack address is associated 1401 // with this thread we could simply set m->_owner = Self. 1402 // Note that a thread can inflate an object 1403 // that it has stack-locked -- as might happen in wait() -- directly 1404 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1405 m->set_owner(mark->locker()); 1406 m->set_object(object); 1407 // TODO-FIXME: assert BasicLock->dhw != 0. 1408 1409 // Must preserve store ordering. The monitor state must 1410 // be stable at the time of publishing the monitor address. 1411 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1412 object->release_set_mark(markOopDesc::encode(m)); 1413 1414 // Hopefully the performance counters are allocated on distinct cache lines 1415 // to avoid false sharing on MP systems ... 1416 OM_PERFDATA_OP(Inflations, inc()); 1417 TEVENT(Inflate: overwrite stacklock); 1418 if (log_is_enabled(Debug, monitorinflation)) { 1419 if (object->is_instance()) { 1420 ResourceMark rm; 1421 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1422 p2i(object), p2i(object->mark()), 1423 object->klass()->external_name()); 1424 } 1425 } 1426 return m; 1427 } 1428 1429 // CASE: neutral 1430 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1431 // If we know we're inflating for entry it's better to inflate by swinging a 1432 // pre-locked objectMonitor pointer into the object header. A successful 1433 // CAS inflates the object *and* confers ownership to the inflating thread. 1434 // In the current implementation we use a 2-step mechanism where we CAS() 1435 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1436 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1437 // would be useful. 1438 1439 assert(mark->is_neutral(), "invariant"); 1440 ObjectMonitor * m = omAlloc(Self); 1441 // prepare m for installation - set monitor to initial state 1442 m->Recycle(); 1443 m->set_header(mark); 1444 m->set_owner(NULL); 1445 m->set_object(object); 1446 m->_recursions = 0; 1447 m->_Responsible = NULL; 1448 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1449 1450 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1451 m->set_object(NULL); 1452 m->set_owner(NULL); 1453 m->Recycle(); 1454 omRelease(Self, m, true); 1455 m = NULL; 1456 continue; 1457 // interference - the markword changed - just retry. 1458 // The state-transitions are one-way, so there's no chance of 1459 // live-lock -- "Inflated" is an absorbing state. 1460 } 1461 1462 // Hopefully the performance counters are allocated on distinct 1463 // cache lines to avoid false sharing on MP systems ... 1464 OM_PERFDATA_OP(Inflations, inc()); 1465 TEVENT(Inflate: overwrite neutral); 1466 if (log_is_enabled(Debug, monitorinflation)) { 1467 if (object->is_instance()) { 1468 ResourceMark rm; 1469 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1470 p2i(object), p2i(object->mark()), 1471 object->klass()->external_name()); 1472 } 1473 } 1474 return m; 1475 } 1476 } 1477 1478 1479 // Deflate_idle_monitors() is called at all safepoints, immediately 1480 // after all mutators are stopped, but before any objects have moved. 1481 // It traverses the list of known monitors, deflating where possible. 1482 // The scavenged monitor are returned to the monitor free list. 1483 // 1484 // Beware that we scavenge at *every* stop-the-world point. 1485 // Having a large number of monitors in-circulation negatively 1486 // impacts the performance of some applications (e.g., PointBase). 1487 // Broadly, we want to minimize the # of monitors in circulation. 1488 // 1489 // We have added a flag, MonitorInUseLists, which creates a list 1490 // of active monitors for each thread. deflate_idle_monitors() 1491 // only scans the per-thread in-use lists. omAlloc() puts all 1492 // assigned monitors on the per-thread list. deflate_idle_monitors() 1493 // returns the non-busy monitors to the global free list. 1494 // When a thread dies, omFlush() adds the list of active monitors for 1495 // that thread to a global gOmInUseList acquiring the 1496 // global list lock. deflate_idle_monitors() acquires the global 1497 // list lock to scan for non-busy monitors to the global free list. 1498 // An alternative could have used a single global in-use list. The 1499 // downside would have been the additional cost of acquiring the global list lock 1500 // for every omAlloc(). 1501 // 1502 // Perversely, the heap size -- and thus the STW safepoint rate -- 1503 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1504 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1505 // This is an unfortunate aspect of this design. 1506 1507 enum ManifestConstants { 1508 ClearResponsibleAtSTW = 0 1509 }; 1510 1511 // Deflate a single monitor if not in-use 1512 // Return true if deflated, false if in-use 1513 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1514 ObjectMonitor** freeHeadp, 1515 ObjectMonitor** freeTailp) { 1516 bool deflated; 1517 // Normal case ... The monitor is associated with obj. 1518 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1519 guarantee(mid == obj->mark()->monitor(), "invariant"); 1520 guarantee(mid->header()->is_neutral(), "invariant"); 1521 1522 if (mid->is_busy()) { 1523 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1524 deflated = false; 1525 } else { 1526 // Deflate the monitor if it is no longer being used 1527 // It's idle - scavenge and return to the global free list 1528 // plain old deflation ... 1529 TEVENT(deflate_idle_monitors - scavenge1); 1530 if (log_is_enabled(Debug, monitorinflation)) { 1531 if (obj->is_instance()) { 1532 ResourceMark rm; 1533 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1534 "mark " INTPTR_FORMAT " , type %s", 1535 p2i(obj), p2i(obj->mark()), 1536 obj->klass()->external_name()); 1537 } 1538 } 1539 1540 // Restore the header back to obj 1541 obj->release_set_mark(mid->header()); 1542 mid->clear(); 1543 1544 assert(mid->object() == NULL, "invariant"); 1545 1546 // Move the object to the working free list defined by freeHeadp, freeTailp 1547 if (*freeHeadp == NULL) *freeHeadp = mid; 1548 if (*freeTailp != NULL) { 1549 ObjectMonitor * prevtail = *freeTailp; 1550 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1551 prevtail->FreeNext = mid; 1552 } 1553 *freeTailp = mid; 1554 deflated = true; 1555 } 1556 return deflated; 1557 } 1558 1559 // Walk a given monitor list, and deflate idle monitors 1560 // The given list could be a per-thread list or a global list 1561 // Caller acquires gListLock 1562 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1563 ObjectMonitor** freeHeadp, 1564 ObjectMonitor** freeTailp) { 1565 ObjectMonitor* mid; 1566 ObjectMonitor* next; 1567 ObjectMonitor* cur_mid_in_use = NULL; 1568 int deflated_count = 0; 1569 1570 for (mid = *listHeadp; mid != NULL;) { 1571 oop obj = (oop) mid->object(); 1572 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1573 // if deflate_monitor succeeded, 1574 // extract from per-thread in-use list 1575 if (mid == *listHeadp) { 1576 *listHeadp = mid->FreeNext; 1577 } else if (cur_mid_in_use != NULL) { 1578 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1579 } 1580 next = mid->FreeNext; 1581 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1582 mid = next; 1583 deflated_count++; 1584 } else { 1585 cur_mid_in_use = mid; 1586 mid = mid->FreeNext; 1587 } 1588 } 1589 return deflated_count; 1590 } 1591 1592 void ObjectSynchronizer::deflate_idle_monitors() { 1593 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1594 int nInuse = 0; // currently associated with objects 1595 int nInCirculation = 0; // extant 1596 int nScavenged = 0; // reclaimed 1597 bool deflated = false; 1598 1599 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1600 ObjectMonitor * freeTailp = NULL; 1601 1602 TEVENT(deflate_idle_monitors); 1603 // Prevent omFlush from changing mids in Thread dtor's during deflation 1604 // And in case the vm thread is acquiring a lock during a safepoint 1605 // See e.g. 6320749 1606 Thread::muxAcquire(&gListLock, "scavenge - return"); 1607 1608 if (MonitorInUseLists) { 1609 int inUse = 0; 1610 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1611 nInCirculation+= cur->omInUseCount; 1612 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1613 cur->omInUseCount-= deflated_count; 1614 if (ObjectMonitor::Knob_VerifyInUse) { 1615 verifyInUse(cur); 1616 } 1617 nScavenged += deflated_count; 1618 nInuse += cur->omInUseCount; 1619 } 1620 1621 // For moribund threads, scan gOmInUseList 1622 if (gOmInUseList) { 1623 nInCirculation += gOmInUseCount; 1624 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1625 gOmInUseCount-= deflated_count; 1626 nScavenged += deflated_count; 1627 nInuse += gOmInUseCount; 1628 } 1629 1630 } else { 1631 PaddedEnd<ObjectMonitor> * block = 1632 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1633 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1634 // Iterate over all extant monitors - Scavenge all idle monitors. 1635 assert(block->object() == CHAINMARKER, "must be a block header"); 1636 nInCirculation += _BLOCKSIZE; 1637 for (int i = 1; i < _BLOCKSIZE; i++) { 1638 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1639 oop obj = (oop)mid->object(); 1640 1641 if (obj == NULL) { 1642 // The monitor is not associated with an object. 1643 // The monitor should either be a thread-specific private 1644 // free list or the global free list. 1645 // obj == NULL IMPLIES mid->is_busy() == 0 1646 guarantee(!mid->is_busy(), "invariant"); 1647 continue; 1648 } 1649 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1650 1651 if (deflated) { 1652 mid->FreeNext = NULL; 1653 nScavenged++; 1654 } else { 1655 nInuse++; 1656 } 1657 } 1658 } 1659 } 1660 1661 gMonitorFreeCount += nScavenged; 1662 1663 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1664 1665 if (ObjectMonitor::Knob_Verbose) { 1666 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1667 "ForceMonitorScavenge=%d : pop=%d free=%d", 1668 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1669 gMonitorPopulation, gMonitorFreeCount); 1670 tty->flush(); 1671 } 1672 1673 ForceMonitorScavenge = 0; // Reset 1674 1675 // Move the scavenged monitors back to the global free list. 1676 if (freeHeadp != NULL) { 1677 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1678 assert(freeTailp->FreeNext == NULL, "invariant"); 1679 // constant-time list splice - prepend scavenged segment to gFreeList 1680 freeTailp->FreeNext = gFreeList; 1681 gFreeList = freeHeadp; 1682 } 1683 Thread::muxRelease(&gListLock); 1684 1685 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1686 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1687 1688 // TODO: Add objectMonitor leak detection. 1689 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1690 GVars.stwRandom = os::random(); 1691 GVars.stwCycle++; 1692 } 1693 1694 // Monitor cleanup on JavaThread::exit 1695 1696 // Iterate through monitor cache and attempt to release thread's monitors 1697 // Gives up on a particular monitor if an exception occurs, but continues 1698 // the overall iteration, swallowing the exception. 1699 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1700 private: 1701 TRAPS; 1702 1703 public: 1704 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1705 void do_monitor(ObjectMonitor* mid) { 1706 if (mid->owner() == THREAD) { 1707 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1708 Handle obj((oop) mid->object()); 1709 tty->print("INFO: unexpected locked object:"); 1710 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1711 fatal("exiting JavaThread=" INTPTR_FORMAT 1712 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1713 p2i(THREAD), p2i(mid)); 1714 } 1715 (void)mid->complete_exit(CHECK); 1716 } 1717 } 1718 }; 1719 1720 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1721 // ignored. This is meant to be called during JNI thread detach which assumes 1722 // all remaining monitors are heavyweight. All exceptions are swallowed. 1723 // Scanning the extant monitor list can be time consuming. 1724 // A simple optimization is to add a per-thread flag that indicates a thread 1725 // called jni_monitorenter() during its lifetime. 1726 // 1727 // Instead of No_Savepoint_Verifier it might be cheaper to 1728 // use an idiom of the form: 1729 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1730 // <code that must not run at safepoint> 1731 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1732 // Since the tests are extremely cheap we could leave them enabled 1733 // for normal product builds. 1734 1735 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1736 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1737 NoSafepointVerifier nsv; 1738 ReleaseJavaMonitorsClosure rjmc(THREAD); 1739 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1740 ObjectSynchronizer::monitors_iterate(&rjmc); 1741 Thread::muxRelease(&gListLock); 1742 THREAD->clear_pending_exception(); 1743 } 1744 1745 //------------------------------------------------------------------------------ 1746 // Debugging code 1747 1748 void ObjectSynchronizer::sanity_checks(const bool verbose, 1749 const uint cache_line_size, 1750 int *error_cnt_ptr, 1751 int *warning_cnt_ptr) { 1752 u_char *addr_begin = (u_char*)&GVars; 1753 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1754 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1755 1756 if (verbose) { 1757 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1758 sizeof(SharedGlobals)); 1759 } 1760 1761 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1762 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1763 1764 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1765 if (verbose) { 1766 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1767 } 1768 1769 if (cache_line_size != 0) { 1770 // We were able to determine the L1 data cache line size so 1771 // do some cache line specific sanity checks 1772 1773 if (offset_stwRandom < cache_line_size) { 1774 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1775 "to the struct beginning than a cache line which permits " 1776 "false sharing."); 1777 (*warning_cnt_ptr)++; 1778 } 1779 1780 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1781 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1782 "SharedGlobals.hcSequence fields are closer than a cache " 1783 "line which permits false sharing."); 1784 (*warning_cnt_ptr)++; 1785 } 1786 1787 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1788 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1789 "to the struct end than a cache line which permits false " 1790 "sharing."); 1791 (*warning_cnt_ptr)++; 1792 } 1793 } 1794 } 1795 1796 #ifndef PRODUCT 1797 1798 // Verify all monitors in the monitor cache, the verification is weak. 1799 void ObjectSynchronizer::verify() { 1800 PaddedEnd<ObjectMonitor> * block = 1801 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1802 while (block != NULL) { 1803 assert(block->object() == CHAINMARKER, "must be a block header"); 1804 for (int i = 1; i < _BLOCKSIZE; i++) { 1805 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1806 oop object = (oop)mid->object(); 1807 if (object != NULL) { 1808 mid->verify(); 1809 } 1810 } 1811 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1812 } 1813 } 1814 1815 // Check if monitor belongs to the monitor cache 1816 // The list is grow-only so it's *relatively* safe to traverse 1817 // the list of extant blocks without taking a lock. 1818 1819 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1820 PaddedEnd<ObjectMonitor> * block = 1821 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1822 while (block != NULL) { 1823 assert(block->object() == CHAINMARKER, "must be a block header"); 1824 if (monitor > (ObjectMonitor *)&block[0] && 1825 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1826 address mon = (address)monitor; 1827 address blk = (address)block; 1828 size_t diff = mon - blk; 1829 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 1830 return 1; 1831 } 1832 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1833 } 1834 return 0; 1835 } 1836 1837 #endif