1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "memory/metaspaceShared.hpp" 29 #include "memory/padded.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/osThread.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/vframe.hpp" 45 #include "utilities/dtrace.hpp" 46 #include "utilities/events.hpp" 47 #include "utilities/macros.hpp" 48 #include "utilities/preserveException.hpp" 49 #if INCLUDE_TRACE 50 #include "trace/tracing.hpp" 51 #endif 52 53 // The "core" versions of monitor enter and exit reside in this file. 54 // The interpreter and compilers contain specialized transliterated 55 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 56 // for instance. If you make changes here, make sure to modify the 57 // interpreter, and both C1 and C2 fast-path inline locking code emission. 58 // 59 // ----------------------------------------------------------------------------- 60 61 #ifdef DTRACE_ENABLED 62 63 // Only bother with this argument setup if dtrace is available 64 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 65 66 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 67 char* bytes = NULL; \ 68 int len = 0; \ 69 jlong jtid = SharedRuntime::get_java_tid(thread); \ 70 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 71 if (klassname != NULL) { \ 72 bytes = (char*)klassname->bytes(); \ 73 len = klassname->utf8_length(); \ 74 } 75 76 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 77 { \ 78 if (DTraceMonitorProbes) { \ 79 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 80 HOTSPOT_MONITOR_WAIT(jtid, \ 81 (uintptr_t)(monitor), bytes, len, (millis)); \ 82 } \ 83 } 84 85 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 86 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 87 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 88 89 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 90 { \ 91 if (DTraceMonitorProbes) { \ 92 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 93 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 94 (uintptr_t)(monitor), bytes, len); \ 95 } \ 96 } 97 98 #else // ndef DTRACE_ENABLED 99 100 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 101 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 102 103 #endif // ndef DTRACE_ENABLED 104 105 // This exists only as a workaround of dtrace bug 6254741 106 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 107 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 108 return 0; 109 } 110 111 #define NINFLATIONLOCKS 256 112 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 113 114 // global list of blocks of monitors 115 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 116 // want to expose the PaddedEnd template more than necessary. 117 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; 118 // global monitor free list 119 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 120 // global monitor in-use list, for moribund threads, 121 // monitors they inflated need to be scanned for deflation 122 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 123 // count of entries in gOmInUseList 124 int ObjectSynchronizer::gOmInUseCount = 0; 125 126 static volatile intptr_t gListLock = 0; // protects global monitor lists 127 static volatile int gMonitorFreeCount = 0; // # on gFreeList 128 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 129 130 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 131 132 133 // =====================> Quick functions 134 135 // The quick_* forms are special fast-path variants used to improve 136 // performance. In the simplest case, a "quick_*" implementation could 137 // simply return false, in which case the caller will perform the necessary 138 // state transitions and call the slow-path form. 139 // The fast-path is designed to handle frequently arising cases in an efficient 140 // manner and is just a degenerate "optimistic" variant of the slow-path. 141 // returns true -- to indicate the call was satisfied. 142 // returns false -- to indicate the call needs the services of the slow-path. 143 // A no-loitering ordinance is in effect for code in the quick_* family 144 // operators: safepoints or indefinite blocking (blocking that might span a 145 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 146 // entry. 147 // 148 // Consider: An interesting optimization is to have the JIT recognize the 149 // following common idiom: 150 // synchronized (someobj) { .... ; notify(); } 151 // That is, we find a notify() or notifyAll() call that immediately precedes 152 // the monitorexit operation. In that case the JIT could fuse the operations 153 // into a single notifyAndExit() runtime primitive. 154 155 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 156 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 157 assert(self->is_Java_thread(), "invariant"); 158 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 159 NoSafepointVerifier nsv; 160 if (obj == NULL) return false; // slow-path for invalid obj 161 const markOop mark = obj->mark(); 162 163 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 164 // Degenerate notify 165 // stack-locked by caller so by definition the implied waitset is empty. 166 return true; 167 } 168 169 if (mark->has_monitor()) { 170 ObjectMonitor * const mon = mark->monitor(); 171 assert(mon->object() == obj, "invariant"); 172 if (mon->owner() != self) return false; // slow-path for IMS exception 173 174 if (mon->first_waiter() != NULL) { 175 // We have one or more waiters. Since this is an inflated monitor 176 // that we own, we can transfer one or more threads from the waitset 177 // to the entrylist here and now, avoiding the slow-path. 178 if (all) { 179 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 180 } else { 181 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 182 } 183 int tally = 0; 184 do { 185 mon->INotify(self); 186 ++tally; 187 } while (mon->first_waiter() != NULL && all); 188 OM_PERFDATA_OP(Notifications, inc(tally)); 189 } 190 return true; 191 } 192 193 // biased locking and any other IMS exception states take the slow-path 194 return false; 195 } 196 197 198 // The LockNode emitted directly at the synchronization site would have 199 // been too big if it were to have included support for the cases of inflated 200 // recursive enter and exit, so they go here instead. 201 // Note that we can't safely call AsyncPrintJavaStack() from within 202 // quick_enter() as our thread state remains _in_Java. 203 204 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 205 BasicLock * lock) { 206 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 207 assert(Self->is_Java_thread(), "invariant"); 208 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 209 NoSafepointVerifier nsv; 210 if (obj == NULL) return false; // Need to throw NPE 211 const markOop mark = obj->mark(); 212 213 if (mark->has_monitor()) { 214 ObjectMonitor * const m = mark->monitor(); 215 assert(m->object() == obj, "invariant"); 216 Thread * const owner = (Thread *) m->_owner; 217 218 // Lock contention and Transactional Lock Elision (TLE) diagnostics 219 // and observability 220 // Case: light contention possibly amenable to TLE 221 // Case: TLE inimical operations such as nested/recursive synchronization 222 223 if (owner == Self) { 224 m->_recursions++; 225 return true; 226 } 227 228 // This Java Monitor is inflated so obj's header will never be 229 // displaced to this thread's BasicLock. Make the displaced header 230 // non-NULL so this BasicLock is not seen as recursive nor as 231 // being locked. We do this unconditionally so that this thread's 232 // BasicLock cannot be mis-interpreted by any stack walkers. For 233 // performance reasons, stack walkers generally first check for 234 // Biased Locking in the object's header, the second check is for 235 // stack-locking in the object's header, the third check is for 236 // recursive stack-locking in the displaced header in the BasicLock, 237 // and last are the inflated Java Monitor (ObjectMonitor) checks. 238 lock->set_displaced_header(markOopDesc::unused_mark()); 239 240 if (owner == NULL && 241 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 242 assert(m->_recursions == 0, "invariant"); 243 assert(m->_owner == Self, "invariant"); 244 return true; 245 } 246 } 247 248 // Note that we could inflate in quick_enter. 249 // This is likely a useful optimization 250 // Critically, in quick_enter() we must not: 251 // -- perform bias revocation, or 252 // -- block indefinitely, or 253 // -- reach a safepoint 254 255 return false; // revert to slow-path 256 } 257 258 // ----------------------------------------------------------------------------- 259 // Fast Monitor Enter/Exit 260 // This the fast monitor enter. The interpreter and compiler use 261 // some assembly copies of this code. Make sure update those code 262 // if the following function is changed. The implementation is 263 // extremely sensitive to race condition. Be careful. 264 265 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 266 bool attempt_rebias, TRAPS) { 267 if (UseBiasedLocking) { 268 if (!SafepointSynchronize::is_at_safepoint()) { 269 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 270 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 271 return; 272 } 273 } else { 274 assert(!attempt_rebias, "can not rebias toward VM thread"); 275 BiasedLocking::revoke_at_safepoint(obj); 276 } 277 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 278 } 279 280 slow_enter(obj, lock, THREAD); 281 } 282 283 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 284 markOop mark = object->mark(); 285 // We cannot check for Biased Locking if we are racing an inflation. 286 assert(mark == markOopDesc::INFLATING() || 287 !mark->has_bias_pattern(), "should not see bias pattern here"); 288 289 markOop dhw = lock->displaced_header(); 290 if (dhw == NULL) { 291 // If the displaced header is NULL, then this exit matches up with 292 // a recursive enter. No real work to do here except for diagnostics. 293 #ifndef PRODUCT 294 if (mark != markOopDesc::INFLATING()) { 295 // Only do diagnostics if we are not racing an inflation. Simply 296 // exiting a recursive enter of a Java Monitor that is being 297 // inflated is safe; see the has_monitor() comment below. 298 assert(!mark->is_neutral(), "invariant"); 299 assert(!mark->has_locker() || 300 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 301 if (mark->has_monitor()) { 302 // The BasicLock's displaced_header is marked as a recursive 303 // enter and we have an inflated Java Monitor (ObjectMonitor). 304 // This is a special case where the Java Monitor was inflated 305 // after this thread entered the stack-lock recursively. When a 306 // Java Monitor is inflated, we cannot safely walk the Java 307 // Monitor owner's stack and update the BasicLocks because a 308 // Java Monitor can be asynchronously inflated by a thread that 309 // does not own the Java Monitor. 310 ObjectMonitor * m = mark->monitor(); 311 assert(((oop)(m->object()))->mark() == mark, "invariant"); 312 assert(m->is_entered(THREAD), "invariant"); 313 } 314 } 315 #endif 316 return; 317 } 318 319 if (mark == (markOop) lock) { 320 // If the object is stack-locked by the current thread, try to 321 // swing the displaced header from the BasicLock back to the mark. 322 assert(dhw->is_neutral(), "invariant"); 323 if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) { 324 TEVENT(fast_exit: release stack-lock); 325 return; 326 } 327 } 328 329 // We have to take the slow-path of possible inflation and then exit. 330 ObjectSynchronizer::inflate(THREAD, 331 object, 332 inflate_cause_vm_internal)->exit(true, THREAD); 333 } 334 335 // ----------------------------------------------------------------------------- 336 // Interpreter/Compiler Slow Case 337 // This routine is used to handle interpreter/compiler slow case 338 // We don't need to use fast path here, because it must have been 339 // failed in the interpreter/compiler code. 340 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 341 markOop mark = obj->mark(); 342 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 343 344 if (mark->is_neutral()) { 345 // Anticipate successful CAS -- the ST of the displaced mark must 346 // be visible <= the ST performed by the CAS. 347 lock->set_displaced_header(mark); 348 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 349 TEVENT(slow_enter: release stacklock); 350 return; 351 } 352 // Fall through to inflate() ... 353 } else if (mark->has_locker() && 354 THREAD->is_lock_owned((address)mark->locker())) { 355 assert(lock != mark->locker(), "must not re-lock the same lock"); 356 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 357 lock->set_displaced_header(NULL); 358 return; 359 } 360 361 // The object header will never be displaced to this lock, 362 // so it does not matter what the value is, except that it 363 // must be non-zero to avoid looking like a re-entrant lock, 364 // and must not look locked either. 365 lock->set_displaced_header(markOopDesc::unused_mark()); 366 ObjectSynchronizer::inflate(THREAD, 367 obj(), 368 inflate_cause_monitor_enter)->enter(THREAD); 369 } 370 371 // This routine is used to handle interpreter/compiler slow case 372 // We don't need to use fast path here, because it must have 373 // failed in the interpreter/compiler code. Simply use the heavy 374 // weight monitor should be ok, unless someone find otherwise. 375 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 376 fast_exit(object, lock, THREAD); 377 } 378 379 // ----------------------------------------------------------------------------- 380 // Class Loader support to workaround deadlocks on the class loader lock objects 381 // Also used by GC 382 // complete_exit()/reenter() are used to wait on a nested lock 383 // i.e. to give up an outer lock completely and then re-enter 384 // Used when holding nested locks - lock acquisition order: lock1 then lock2 385 // 1) complete_exit lock1 - saving recursion count 386 // 2) wait on lock2 387 // 3) when notified on lock2, unlock lock2 388 // 4) reenter lock1 with original recursion count 389 // 5) lock lock2 390 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 391 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 392 TEVENT(complete_exit); 393 if (UseBiasedLocking) { 394 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 395 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 396 } 397 398 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 399 obj(), 400 inflate_cause_vm_internal); 401 402 return monitor->complete_exit(THREAD); 403 } 404 405 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 406 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 407 TEVENT(reenter); 408 if (UseBiasedLocking) { 409 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 410 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 411 } 412 413 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 414 obj(), 415 inflate_cause_vm_internal); 416 417 monitor->reenter(recursion, THREAD); 418 } 419 // ----------------------------------------------------------------------------- 420 // JNI locks on java objects 421 // NOTE: must use heavy weight monitor to handle jni monitor enter 422 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 423 // the current locking is from JNI instead of Java code 424 TEVENT(jni_enter); 425 if (UseBiasedLocking) { 426 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 427 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 428 } 429 THREAD->set_current_pending_monitor_is_from_java(false); 430 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 431 THREAD->set_current_pending_monitor_is_from_java(true); 432 } 433 434 // NOTE: must use heavy weight monitor to handle jni monitor exit 435 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 436 TEVENT(jni_exit); 437 if (UseBiasedLocking) { 438 Handle h_obj(THREAD, obj); 439 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 440 obj = h_obj(); 441 } 442 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 443 444 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 445 obj, 446 inflate_cause_jni_exit); 447 // If this thread has locked the object, exit the monitor. Note: can't use 448 // monitor->check(CHECK); must exit even if an exception is pending. 449 if (monitor->check(THREAD)) { 450 monitor->exit(true, THREAD); 451 } 452 } 453 454 // ----------------------------------------------------------------------------- 455 // Internal VM locks on java objects 456 // standard constructor, allows locking failures 457 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 458 _dolock = doLock; 459 _thread = thread; 460 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 461 _obj = obj; 462 463 if (_dolock) { 464 TEVENT(ObjectLocker); 465 466 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 467 } 468 } 469 470 ObjectLocker::~ObjectLocker() { 471 if (_dolock) { 472 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 473 } 474 } 475 476 477 // ----------------------------------------------------------------------------- 478 // Wait/Notify/NotifyAll 479 // NOTE: must use heavy weight monitor to handle wait() 480 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 481 if (UseBiasedLocking) { 482 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 483 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 484 } 485 if (millis < 0) { 486 TEVENT(wait - throw IAX); 487 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 488 } 489 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 490 obj(), 491 inflate_cause_wait); 492 493 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 494 monitor->wait(millis, true, THREAD); 495 496 // This dummy call is in place to get around dtrace bug 6254741. Once 497 // that's fixed we can uncomment the following line, remove the call 498 // and change this function back into a "void" func. 499 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 500 return dtrace_waited_probe(monitor, obj, THREAD); 501 } 502 503 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 504 if (UseBiasedLocking) { 505 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 506 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 507 } 508 if (millis < 0) { 509 TEVENT(wait - throw IAX); 510 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 511 } 512 ObjectSynchronizer::inflate(THREAD, 513 obj(), 514 inflate_cause_wait)->wait(millis, false, THREAD); 515 } 516 517 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 518 if (UseBiasedLocking) { 519 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 520 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 521 } 522 523 markOop mark = obj->mark(); 524 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 525 return; 526 } 527 ObjectSynchronizer::inflate(THREAD, 528 obj(), 529 inflate_cause_notify)->notify(THREAD); 530 } 531 532 // NOTE: see comment of notify() 533 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 534 if (UseBiasedLocking) { 535 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 536 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 537 } 538 539 markOop mark = obj->mark(); 540 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 541 return; 542 } 543 ObjectSynchronizer::inflate(THREAD, 544 obj(), 545 inflate_cause_notify)->notifyAll(THREAD); 546 } 547 548 // ----------------------------------------------------------------------------- 549 // Hash Code handling 550 // 551 // Performance concern: 552 // OrderAccess::storestore() calls release() which at one time stored 0 553 // into the global volatile OrderAccess::dummy variable. This store was 554 // unnecessary for correctness. Many threads storing into a common location 555 // causes considerable cache migration or "sloshing" on large SMP systems. 556 // As such, I avoided using OrderAccess::storestore(). In some cases 557 // OrderAccess::fence() -- which incurs local latency on the executing 558 // processor -- is a better choice as it scales on SMP systems. 559 // 560 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 561 // a discussion of coherency costs. Note that all our current reference 562 // platforms provide strong ST-ST order, so the issue is moot on IA32, 563 // x64, and SPARC. 564 // 565 // As a general policy we use "volatile" to control compiler-based reordering 566 // and explicit fences (barriers) to control for architectural reordering 567 // performed by the CPU(s) or platform. 568 569 struct SharedGlobals { 570 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 571 // These are highly shared mostly-read variables. 572 // To avoid false-sharing they need to be the sole occupants of a cache line. 573 volatile int stwRandom; 574 volatile int stwCycle; 575 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 576 // Hot RW variable -- Sequester to avoid false-sharing 577 volatile int hcSequence; 578 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 579 }; 580 581 static SharedGlobals GVars; 582 static int MonitorScavengeThreshold = 1000000; 583 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 584 585 static markOop ReadStableMark(oop obj) { 586 markOop mark = obj->mark(); 587 if (!mark->is_being_inflated()) { 588 return mark; // normal fast-path return 589 } 590 591 int its = 0; 592 for (;;) { 593 markOop mark = obj->mark(); 594 if (!mark->is_being_inflated()) { 595 return mark; // normal fast-path return 596 } 597 598 // The object is being inflated by some other thread. 599 // The caller of ReadStableMark() must wait for inflation to complete. 600 // Avoid live-lock 601 // TODO: consider calling SafepointSynchronize::do_call_back() while 602 // spinning to see if there's a safepoint pending. If so, immediately 603 // yielding or blocking would be appropriate. Avoid spinning while 604 // there is a safepoint pending. 605 // TODO: add inflation contention performance counters. 606 // TODO: restrict the aggregate number of spinners. 607 608 ++its; 609 if (its > 10000 || !os::is_MP()) { 610 if (its & 1) { 611 os::naked_yield(); 612 TEVENT(Inflate: INFLATING - yield); 613 } else { 614 // Note that the following code attenuates the livelock problem but is not 615 // a complete remedy. A more complete solution would require that the inflating 616 // thread hold the associated inflation lock. The following code simply restricts 617 // the number of spinners to at most one. We'll have N-2 threads blocked 618 // on the inflationlock, 1 thread holding the inflation lock and using 619 // a yield/park strategy, and 1 thread in the midst of inflation. 620 // A more refined approach would be to change the encoding of INFLATING 621 // to allow encapsulation of a native thread pointer. Threads waiting for 622 // inflation to complete would use CAS to push themselves onto a singly linked 623 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 624 // and calling park(). When inflation was complete the thread that accomplished inflation 625 // would detach the list and set the markword to inflated with a single CAS and 626 // then for each thread on the list, set the flag and unpark() the thread. 627 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 628 // wakes at most one thread whereas we need to wake the entire list. 629 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 630 int YieldThenBlock = 0; 631 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 632 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 633 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 634 while (obj->mark() == markOopDesc::INFLATING()) { 635 // Beware: NakedYield() is advisory and has almost no effect on some platforms 636 // so we periodically call Self->_ParkEvent->park(1). 637 // We use a mixed spin/yield/block mechanism. 638 if ((YieldThenBlock++) >= 16) { 639 Thread::current()->_ParkEvent->park(1); 640 } else { 641 os::naked_yield(); 642 } 643 } 644 Thread::muxRelease(gInflationLocks + ix); 645 TEVENT(Inflate: INFLATING - yield/park); 646 } 647 } else { 648 SpinPause(); // SMP-polite spinning 649 } 650 } 651 } 652 653 // hashCode() generation : 654 // 655 // Possibilities: 656 // * MD5Digest of {obj,stwRandom} 657 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 658 // * A DES- or AES-style SBox[] mechanism 659 // * One of the Phi-based schemes, such as: 660 // 2654435761 = 2^32 * Phi (golden ratio) 661 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 662 // * A variation of Marsaglia's shift-xor RNG scheme. 663 // * (obj ^ stwRandom) is appealing, but can result 664 // in undesirable regularity in the hashCode values of adjacent objects 665 // (objects allocated back-to-back, in particular). This could potentially 666 // result in hashtable collisions and reduced hashtable efficiency. 667 // There are simple ways to "diffuse" the middle address bits over the 668 // generated hashCode values: 669 670 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 671 intptr_t value = 0; 672 if (hashCode == 0) { 673 // This form uses an unguarded global Park-Miller RNG, 674 // so it's possible for two threads to race and generate the same RNG. 675 // On MP system we'll have lots of RW access to a global, so the 676 // mechanism induces lots of coherency traffic. 677 value = os::random(); 678 } else if (hashCode == 1) { 679 // This variation has the property of being stable (idempotent) 680 // between STW operations. This can be useful in some of the 1-0 681 // synchronization schemes. 682 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 683 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 684 } else if (hashCode == 2) { 685 value = 1; // for sensitivity testing 686 } else if (hashCode == 3) { 687 value = ++GVars.hcSequence; 688 } else if (hashCode == 4) { 689 value = cast_from_oop<intptr_t>(obj); 690 } else { 691 // Marsaglia's xor-shift scheme with thread-specific state 692 // This is probably the best overall implementation -- we'll 693 // likely make this the default in future releases. 694 unsigned t = Self->_hashStateX; 695 t ^= (t << 11); 696 Self->_hashStateX = Self->_hashStateY; 697 Self->_hashStateY = Self->_hashStateZ; 698 Self->_hashStateZ = Self->_hashStateW; 699 unsigned v = Self->_hashStateW; 700 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 701 Self->_hashStateW = v; 702 value = v; 703 } 704 705 value &= markOopDesc::hash_mask; 706 if (value == 0) value = 0xBAD; 707 assert(value != markOopDesc::no_hash, "invariant"); 708 TEVENT(hashCode: GENERATE); 709 return value; 710 } 711 712 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 713 if (UseBiasedLocking) { 714 // NOTE: many places throughout the JVM do not expect a safepoint 715 // to be taken here, in particular most operations on perm gen 716 // objects. However, we only ever bias Java instances and all of 717 // the call sites of identity_hash that might revoke biases have 718 // been checked to make sure they can handle a safepoint. The 719 // added check of the bias pattern is to avoid useless calls to 720 // thread-local storage. 721 if (obj->mark()->has_bias_pattern()) { 722 // Handle for oop obj in case of STW safepoint 723 Handle hobj(Self, obj); 724 // Relaxing assertion for bug 6320749. 725 assert(Universe::verify_in_progress() || 726 !SafepointSynchronize::is_at_safepoint(), 727 "biases should not be seen by VM thread here"); 728 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 729 obj = hobj(); 730 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 731 } 732 } 733 734 // hashCode() is a heap mutator ... 735 // Relaxing assertion for bug 6320749. 736 assert(Universe::verify_in_progress() || DumpSharedSpaces || 737 !SafepointSynchronize::is_at_safepoint(), "invariant"); 738 assert(Universe::verify_in_progress() || DumpSharedSpaces || 739 Self->is_Java_thread() , "invariant"); 740 assert(Universe::verify_in_progress() || DumpSharedSpaces || 741 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 742 743 ObjectMonitor* monitor = NULL; 744 markOop temp, test; 745 intptr_t hash; 746 markOop mark = ReadStableMark(obj); 747 748 // object should remain ineligible for biased locking 749 assert(!mark->has_bias_pattern(), "invariant"); 750 751 if (mark->is_neutral()) { 752 hash = mark->hash(); // this is a normal header 753 if (hash) { // if it has hash, just return it 754 return hash; 755 } 756 hash = get_next_hash(Self, obj); // allocate a new hash code 757 temp = mark->copy_set_hash(hash); // merge the hash code into header 758 // use (machine word version) atomic operation to install the hash 759 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 760 if (test == mark) { 761 return hash; 762 } 763 // If atomic operation failed, we must inflate the header 764 // into heavy weight monitor. We could add more code here 765 // for fast path, but it does not worth the complexity. 766 } else if (mark->has_monitor()) { 767 monitor = mark->monitor(); 768 temp = monitor->header(); 769 assert(temp->is_neutral(), "invariant"); 770 hash = temp->hash(); 771 if (hash) { 772 return hash; 773 } 774 // Skip to the following code to reduce code size 775 } else if (Self->is_lock_owned((address)mark->locker())) { 776 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 777 assert(temp->is_neutral(), "invariant"); 778 hash = temp->hash(); // by current thread, check if the displaced 779 if (hash) { // header contains hash code 780 return hash; 781 } 782 // WARNING: 783 // The displaced header is strictly immutable. 784 // It can NOT be changed in ANY cases. So we have 785 // to inflate the header into heavyweight monitor 786 // even the current thread owns the lock. The reason 787 // is the BasicLock (stack slot) will be asynchronously 788 // read by other threads during the inflate() function. 789 // Any change to stack may not propagate to other threads 790 // correctly. 791 } 792 793 // Inflate the monitor to set hash code 794 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); 795 // Load displaced header and check it has hash code 796 mark = monitor->header(); 797 assert(mark->is_neutral(), "invariant"); 798 hash = mark->hash(); 799 if (hash == 0) { 800 hash = get_next_hash(Self, obj); 801 temp = mark->copy_set_hash(hash); // merge hash code into header 802 assert(temp->is_neutral(), "invariant"); 803 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 804 if (test != mark) { 805 // The only update to the header in the monitor (outside GC) 806 // is install the hash code. If someone add new usage of 807 // displaced header, please update this code 808 hash = test->hash(); 809 assert(test->is_neutral(), "invariant"); 810 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 811 } 812 } 813 // We finally get the hash 814 return hash; 815 } 816 817 // Deprecated -- use FastHashCode() instead. 818 819 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 820 return FastHashCode(Thread::current(), obj()); 821 } 822 823 824 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 825 Handle h_obj) { 826 if (UseBiasedLocking) { 827 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 828 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 829 } 830 831 assert(thread == JavaThread::current(), "Can only be called on current thread"); 832 oop obj = h_obj(); 833 834 markOop mark = ReadStableMark(obj); 835 836 // Uncontended case, header points to stack 837 if (mark->has_locker()) { 838 return thread->is_lock_owned((address)mark->locker()); 839 } 840 // Contended case, header points to ObjectMonitor (tagged pointer) 841 if (mark->has_monitor()) { 842 ObjectMonitor* monitor = mark->monitor(); 843 return monitor->is_entered(thread) != 0; 844 } 845 // Unlocked case, header in place 846 assert(mark->is_neutral(), "sanity check"); 847 return false; 848 } 849 850 // Be aware of this method could revoke bias of the lock object. 851 // This method queries the ownership of the lock handle specified by 'h_obj'. 852 // If the current thread owns the lock, it returns owner_self. If no 853 // thread owns the lock, it returns owner_none. Otherwise, it will return 854 // owner_other. 855 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 856 (JavaThread *self, Handle h_obj) { 857 // The caller must beware this method can revoke bias, and 858 // revocation can result in a safepoint. 859 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 860 assert(self->thread_state() != _thread_blocked, "invariant"); 861 862 // Possible mark states: neutral, biased, stack-locked, inflated 863 864 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 865 // CASE: biased 866 BiasedLocking::revoke_and_rebias(h_obj, false, self); 867 assert(!h_obj->mark()->has_bias_pattern(), 868 "biases should be revoked by now"); 869 } 870 871 assert(self == JavaThread::current(), "Can only be called on current thread"); 872 oop obj = h_obj(); 873 markOop mark = ReadStableMark(obj); 874 875 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 876 if (mark->has_locker()) { 877 return self->is_lock_owned((address)mark->locker()) ? 878 owner_self : owner_other; 879 } 880 881 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 882 // The Object:ObjectMonitor relationship is stable as long as we're 883 // not at a safepoint. 884 if (mark->has_monitor()) { 885 void * owner = mark->monitor()->_owner; 886 if (owner == NULL) return owner_none; 887 return (owner == self || 888 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 889 } 890 891 // CASE: neutral 892 assert(mark->is_neutral(), "sanity check"); 893 return owner_none; // it's unlocked 894 } 895 896 // FIXME: jvmti should call this 897 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 898 if (UseBiasedLocking) { 899 if (SafepointSynchronize::is_at_safepoint()) { 900 BiasedLocking::revoke_at_safepoint(h_obj); 901 } else { 902 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 903 } 904 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 905 } 906 907 oop obj = h_obj(); 908 address owner = NULL; 909 910 markOop mark = ReadStableMark(obj); 911 912 // Uncontended case, header points to stack 913 if (mark->has_locker()) { 914 owner = (address) mark->locker(); 915 } 916 917 // Contended case, header points to ObjectMonitor (tagged pointer) 918 if (mark->has_monitor()) { 919 ObjectMonitor* monitor = mark->monitor(); 920 assert(monitor != NULL, "monitor should be non-null"); 921 owner = (address) monitor->owner(); 922 } 923 924 if (owner != NULL) { 925 // owning_thread_from_monitor_owner() may also return NULL here 926 return Threads::owning_thread_from_monitor_owner(owner, doLock); 927 } 928 929 // Unlocked case, header in place 930 // Cannot have assertion since this object may have been 931 // locked by another thread when reaching here. 932 // assert(mark->is_neutral(), "sanity check"); 933 934 return NULL; 935 } 936 937 // Visitors ... 938 939 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 940 PaddedEnd<ObjectMonitor> * block = 941 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 942 while (block != NULL) { 943 assert(block->object() == CHAINMARKER, "must be a block header"); 944 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 945 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 946 oop object = (oop)mid->object(); 947 if (object != NULL) { 948 closure->do_monitor(mid); 949 } 950 } 951 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 952 } 953 } 954 955 // Get the next block in the block list. 956 static inline ObjectMonitor* next(ObjectMonitor* block) { 957 assert(block->object() == CHAINMARKER, "must be a block header"); 958 block = block->FreeNext; 959 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 960 return block; 961 } 962 963 static bool monitors_used_above_threshold() { 964 if (gMonitorPopulation == 0) { 965 return false; 966 } 967 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 968 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; 969 return monitor_usage > MonitorUsedDeflationThreshold; 970 } 971 972 bool ObjectSynchronizer::is_cleanup_needed() { 973 if (MonitorUsedDeflationThreshold > 0) { 974 return monitors_used_above_threshold(); 975 } 976 return false; 977 } 978 979 void ObjectSynchronizer::oops_do(OopClosure* f) { 980 if (MonitorInUseLists) { 981 // When using thread local monitor lists, we only scan the 982 // global used list here (for moribund threads), and 983 // the thread-local monitors in Thread::oops_do(). 984 global_used_oops_do(f); 985 } else { 986 global_oops_do(f); 987 } 988 } 989 990 void ObjectSynchronizer::global_oops_do(OopClosure* f) { 991 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 992 PaddedEnd<ObjectMonitor> * block = 993 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 994 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 995 assert(block->object() == CHAINMARKER, "must be a block header"); 996 for (int i = 1; i < _BLOCKSIZE; i++) { 997 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 998 if (mid->object() != NULL) { 999 f->do_oop((oop*)mid->object_addr()); 1000 } 1001 } 1002 } 1003 } 1004 1005 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1006 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1007 list_oops_do(gOmInUseList, f); 1008 } 1009 1010 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1011 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1012 list_oops_do(thread->omInUseList, f); 1013 } 1014 1015 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1016 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1017 ObjectMonitor* mid; 1018 for (mid = list; mid != NULL; mid = mid->FreeNext) { 1019 if (mid->object() != NULL) { 1020 f->do_oop((oop*)mid->object_addr()); 1021 } 1022 } 1023 } 1024 1025 1026 // ----------------------------------------------------------------------------- 1027 // ObjectMonitor Lifecycle 1028 // ----------------------- 1029 // Inflation unlinks monitors from the global gFreeList and 1030 // associates them with objects. Deflation -- which occurs at 1031 // STW-time -- disassociates idle monitors from objects. Such 1032 // scavenged monitors are returned to the gFreeList. 1033 // 1034 // The global list is protected by gListLock. All the critical sections 1035 // are short and operate in constant-time. 1036 // 1037 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1038 // 1039 // Lifecycle: 1040 // -- unassigned and on the global free list 1041 // -- unassigned and on a thread's private omFreeList 1042 // -- assigned to an object. The object is inflated and the mark refers 1043 // to the objectmonitor. 1044 1045 1046 // Constraining monitor pool growth via MonitorBound ... 1047 // 1048 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1049 // the rate of scavenging is driven primarily by GC. As such, we can find 1050 // an inordinate number of monitors in circulation. 1051 // To avoid that scenario we can artificially induce a STW safepoint 1052 // if the pool appears to be growing past some reasonable bound. 1053 // Generally we favor time in space-time tradeoffs, but as there's no 1054 // natural back-pressure on the # of extant monitors we need to impose some 1055 // type of limit. Beware that if MonitorBound is set to too low a value 1056 // we could just loop. In addition, if MonitorBound is set to a low value 1057 // we'll incur more safepoints, which are harmful to performance. 1058 // See also: GuaranteedSafepointInterval 1059 // 1060 // The current implementation uses asynchronous VM operations. 1061 1062 static void InduceScavenge(Thread * Self, const char * Whence) { 1063 // Induce STW safepoint to trim monitors 1064 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1065 // More precisely, trigger an asynchronous STW safepoint as the number 1066 // of active monitors passes the specified threshold. 1067 // TODO: assert thread state is reasonable 1068 1069 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1070 if (ObjectMonitor::Knob_Verbose) { 1071 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 1072 Whence, ForceMonitorScavenge) ; 1073 tty->flush(); 1074 } 1075 // Induce a 'null' safepoint to scavenge monitors 1076 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1077 // to the VMthread and have a lifespan longer than that of this activation record. 1078 // The VMThread will delete the op when completed. 1079 VMThread::execute(new VM_ScavengeMonitors()); 1080 1081 if (ObjectMonitor::Knob_Verbose) { 1082 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 1083 Whence, ForceMonitorScavenge) ; 1084 tty->flush(); 1085 } 1086 } 1087 } 1088 1089 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1090 ObjectMonitor* mid; 1091 int in_use_tally = 0; 1092 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1093 in_use_tally++; 1094 } 1095 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1096 1097 int free_tally = 0; 1098 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1099 free_tally++; 1100 } 1101 assert(free_tally == Self->omFreeCount, "free count off"); 1102 } 1103 1104 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1105 // A large MAXPRIVATE value reduces both list lock contention 1106 // and list coherency traffic, but also tends to increase the 1107 // number of objectMonitors in circulation as well as the STW 1108 // scavenge costs. As usual, we lean toward time in space-time 1109 // tradeoffs. 1110 const int MAXPRIVATE = 1024; 1111 for (;;) { 1112 ObjectMonitor * m; 1113 1114 // 1: try to allocate from the thread's local omFreeList. 1115 // Threads will attempt to allocate first from their local list, then 1116 // from the global list, and only after those attempts fail will the thread 1117 // attempt to instantiate new monitors. Thread-local free lists take 1118 // heat off the gListLock and improve allocation latency, as well as reducing 1119 // coherency traffic on the shared global list. 1120 m = Self->omFreeList; 1121 if (m != NULL) { 1122 Self->omFreeList = m->FreeNext; 1123 Self->omFreeCount--; 1124 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1125 guarantee(m->object() == NULL, "invariant"); 1126 if (MonitorInUseLists) { 1127 m->FreeNext = Self->omInUseList; 1128 Self->omInUseList = m; 1129 Self->omInUseCount++; 1130 if (ObjectMonitor::Knob_VerifyInUse) { 1131 verifyInUse(Self); 1132 } 1133 } else { 1134 m->FreeNext = NULL; 1135 } 1136 return m; 1137 } 1138 1139 // 2: try to allocate from the global gFreeList 1140 // CONSIDER: use muxTry() instead of muxAcquire(). 1141 // If the muxTry() fails then drop immediately into case 3. 1142 // If we're using thread-local free lists then try 1143 // to reprovision the caller's free list. 1144 if (gFreeList != NULL) { 1145 // Reprovision the thread's omFreeList. 1146 // Use bulk transfers to reduce the allocation rate and heat 1147 // on various locks. 1148 Thread::muxAcquire(&gListLock, "omAlloc"); 1149 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1150 gMonitorFreeCount--; 1151 ObjectMonitor * take = gFreeList; 1152 gFreeList = take->FreeNext; 1153 guarantee(take->object() == NULL, "invariant"); 1154 guarantee(!take->is_busy(), "invariant"); 1155 take->Recycle(); 1156 omRelease(Self, take, false); 1157 } 1158 Thread::muxRelease(&gListLock); 1159 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1160 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1161 TEVENT(omFirst - reprovision); 1162 1163 const int mx = MonitorBound; 1164 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1165 // We can't safely induce a STW safepoint from omAlloc() as our thread 1166 // state may not be appropriate for such activities and callers may hold 1167 // naked oops, so instead we defer the action. 1168 InduceScavenge(Self, "omAlloc"); 1169 } 1170 continue; 1171 } 1172 1173 // 3: allocate a block of new ObjectMonitors 1174 // Both the local and global free lists are empty -- resort to malloc(). 1175 // In the current implementation objectMonitors are TSM - immortal. 1176 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1177 // each ObjectMonitor to start at the beginning of a cache line, 1178 // so we use align_size_up(). 1179 // A better solution would be to use C++ placement-new. 1180 // BEWARE: As it stands currently, we don't run the ctors! 1181 assert(_BLOCKSIZE > 1, "invariant"); 1182 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1183 PaddedEnd<ObjectMonitor> * temp; 1184 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1185 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1186 mtInternal); 1187 temp = (PaddedEnd<ObjectMonitor> *) 1188 align_size_up((intptr_t)real_malloc_addr, 1189 DEFAULT_CACHE_LINE_SIZE); 1190 1191 // NOTE: (almost) no way to recover if allocation failed. 1192 // We might be able to induce a STW safepoint and scavenge enough 1193 // objectMonitors to permit progress. 1194 if (temp == NULL) { 1195 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1196 "Allocate ObjectMonitors"); 1197 } 1198 (void)memset((void *) temp, 0, neededsize); 1199 1200 // Format the block. 1201 // initialize the linked list, each monitor points to its next 1202 // forming the single linked free list, the very first monitor 1203 // will points to next block, which forms the block list. 1204 // The trick of using the 1st element in the block as gBlockList 1205 // linkage should be reconsidered. A better implementation would 1206 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1207 1208 for (int i = 1; i < _BLOCKSIZE; i++) { 1209 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1210 } 1211 1212 // terminate the last monitor as the end of list 1213 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1214 1215 // Element [0] is reserved for global list linkage 1216 temp[0].set_object(CHAINMARKER); 1217 1218 // Consider carving out this thread's current request from the 1219 // block in hand. This avoids some lock traffic and redundant 1220 // list activity. 1221 1222 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1223 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1224 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1225 gMonitorPopulation += _BLOCKSIZE-1; 1226 gMonitorFreeCount += _BLOCKSIZE-1; 1227 1228 // Add the new block to the list of extant blocks (gBlockList). 1229 // The very first objectMonitor in a block is reserved and dedicated. 1230 // It serves as blocklist "next" linkage. 1231 temp[0].FreeNext = gBlockList; 1232 // There are lock-free uses of gBlockList so make sure that 1233 // the previous stores happen before we update gBlockList. 1234 OrderAccess::release_store_ptr(&gBlockList, temp); 1235 1236 // Add the new string of objectMonitors to the global free list 1237 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1238 gFreeList = temp + 1; 1239 Thread::muxRelease(&gListLock); 1240 TEVENT(Allocate block of monitors); 1241 } 1242 } 1243 1244 // Place "m" on the caller's private per-thread omFreeList. 1245 // In practice there's no need to clamp or limit the number of 1246 // monitors on a thread's omFreeList as the only time we'll call 1247 // omRelease is to return a monitor to the free list after a CAS 1248 // attempt failed. This doesn't allow unbounded #s of monitors to 1249 // accumulate on a thread's free list. 1250 // 1251 // Key constraint: all ObjectMonitors on a thread's free list and the global 1252 // free list must have their object field set to null. This prevents the 1253 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1254 1255 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1256 bool fromPerThreadAlloc) { 1257 guarantee(m->object() == NULL, "invariant"); 1258 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1259 // Remove from omInUseList 1260 if (MonitorInUseLists && fromPerThreadAlloc) { 1261 ObjectMonitor* cur_mid_in_use = NULL; 1262 bool extracted = false; 1263 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1264 if (m == mid) { 1265 // extract from per-thread in-use list 1266 if (mid == Self->omInUseList) { 1267 Self->omInUseList = mid->FreeNext; 1268 } else if (cur_mid_in_use != NULL) { 1269 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1270 } 1271 extracted = true; 1272 Self->omInUseCount--; 1273 if (ObjectMonitor::Knob_VerifyInUse) { 1274 verifyInUse(Self); 1275 } 1276 break; 1277 } 1278 } 1279 assert(extracted, "Should have extracted from in-use list"); 1280 } 1281 1282 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1283 m->FreeNext = Self->omFreeList; 1284 Self->omFreeList = m; 1285 Self->omFreeCount++; 1286 } 1287 1288 // Return the monitors of a moribund thread's local free list to 1289 // the global free list. Typically a thread calls omFlush() when 1290 // it's dying. We could also consider having the VM thread steal 1291 // monitors from threads that have not run java code over a few 1292 // consecutive STW safepoints. Relatedly, we might decay 1293 // omFreeProvision at STW safepoints. 1294 // 1295 // Also return the monitors of a moribund thread's omInUseList to 1296 // a global gOmInUseList under the global list lock so these 1297 // will continue to be scanned. 1298 // 1299 // We currently call omFlush() from Threads::remove() _before the thread 1300 // has been excised from the thread list and is no longer a mutator. 1301 // This means that omFlush() can not run concurrently with a safepoint and 1302 // interleave with the scavenge operator. In particular, this ensures that 1303 // the thread's monitors are scanned by a GC safepoint, either via 1304 // Thread::oops_do() (if safepoint happens before omFlush()) or via 1305 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1306 // monitors have been transferred to the global in-use list). 1307 1308 void ObjectSynchronizer::omFlush(Thread * Self) { 1309 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1310 Self->omFreeList = NULL; 1311 ObjectMonitor * tail = NULL; 1312 int tally = 0; 1313 if (list != NULL) { 1314 ObjectMonitor * s; 1315 // The thread is going away, the per-thread free monitors 1316 // are freed via set_owner(NULL) 1317 // Link them to tail, which will be linked into the global free list 1318 // gFreeList below, under the gListLock 1319 for (s = list; s != NULL; s = s->FreeNext) { 1320 tally++; 1321 tail = s; 1322 guarantee(s->object() == NULL, "invariant"); 1323 guarantee(!s->is_busy(), "invariant"); 1324 s->set_owner(NULL); // redundant but good hygiene 1325 TEVENT(omFlush - Move one); 1326 } 1327 guarantee(tail != NULL && list != NULL, "invariant"); 1328 } 1329 1330 ObjectMonitor * inUseList = Self->omInUseList; 1331 ObjectMonitor * inUseTail = NULL; 1332 int inUseTally = 0; 1333 if (inUseList != NULL) { 1334 Self->omInUseList = NULL; 1335 ObjectMonitor *cur_om; 1336 // The thread is going away, however the omInUseList inflated 1337 // monitors may still be in-use by other threads. 1338 // Link them to inUseTail, which will be linked into the global in-use list 1339 // gOmInUseList below, under the gListLock 1340 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1341 inUseTail = cur_om; 1342 inUseTally++; 1343 } 1344 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1345 Self->omInUseCount = 0; 1346 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1347 } 1348 1349 Thread::muxAcquire(&gListLock, "omFlush"); 1350 if (tail != NULL) { 1351 tail->FreeNext = gFreeList; 1352 gFreeList = list; 1353 gMonitorFreeCount += tally; 1354 assert(Self->omFreeCount == tally, "free-count off"); 1355 Self->omFreeCount = 0; 1356 } 1357 1358 if (inUseTail != NULL) { 1359 inUseTail->FreeNext = gOmInUseList; 1360 gOmInUseList = inUseList; 1361 gOmInUseCount += inUseTally; 1362 } 1363 1364 Thread::muxRelease(&gListLock); 1365 TEVENT(omFlush); 1366 } 1367 1368 // Fast path code shared by multiple functions 1369 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1370 markOop mark = obj->mark(); 1371 if (mark->has_monitor()) { 1372 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1373 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1374 return mark->monitor(); 1375 } 1376 return ObjectSynchronizer::inflate(Thread::current(), 1377 obj, 1378 inflate_cause_vm_internal); 1379 } 1380 1381 #if INCLUDE_TRACE 1382 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1383 const oop obj, 1384 ObjectSynchronizer::InflateCause cause) { 1385 assert(event != NULL, "invariant"); 1386 if (event->should_commit()) { 1387 event->set_monitorClass(obj->klass()); 1388 event->set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj); 1389 event->set_cause((u1)cause); 1390 event->commit(); 1391 } 1392 } 1393 #endif 1394 1395 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1396 oop object, 1397 const InflateCause cause) { 1398 1399 // Inflate mutates the heap ... 1400 // Relaxing assertion for bug 6320749. 1401 assert(Universe::verify_in_progress() || 1402 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1403 1404 TRACE_ONLY(EventJavaMonitorInflate event;) 1405 1406 for (;;) { 1407 const markOop mark = object->mark(); 1408 assert(!mark->has_bias_pattern(), "invariant"); 1409 1410 // The mark can be in one of the following states: 1411 // * Inflated - just return 1412 // * Stack-locked - coerce it to inflated 1413 // * INFLATING - busy wait for conversion to complete 1414 // * Neutral - aggressively inflate the object. 1415 // * BIASED - Illegal. We should never see this 1416 1417 // CASE: inflated 1418 if (mark->has_monitor()) { 1419 ObjectMonitor * inf = mark->monitor(); 1420 assert(inf->header()->is_neutral(), "invariant"); 1421 assert(inf->object() == object, "invariant"); 1422 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1423 return inf; 1424 } 1425 1426 // CASE: inflation in progress - inflating over a stack-lock. 1427 // Some other thread is converting from stack-locked to inflated. 1428 // Only that thread can complete inflation -- other threads must wait. 1429 // The INFLATING value is transient. 1430 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1431 // We could always eliminate polling by parking the thread on some auxiliary list. 1432 if (mark == markOopDesc::INFLATING()) { 1433 TEVENT(Inflate: spin while INFLATING); 1434 ReadStableMark(object); 1435 continue; 1436 } 1437 1438 // CASE: stack-locked 1439 // Could be stack-locked either by this thread or by some other thread. 1440 // 1441 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1442 // to install INFLATING into the mark word. We originally installed INFLATING, 1443 // allocated the objectmonitor, and then finally STed the address of the 1444 // objectmonitor into the mark. This was correct, but artificially lengthened 1445 // the interval in which INFLATED appeared in the mark, thus increasing 1446 // the odds of inflation contention. 1447 // 1448 // We now use per-thread private objectmonitor free lists. 1449 // These list are reprovisioned from the global free list outside the 1450 // critical INFLATING...ST interval. A thread can transfer 1451 // multiple objectmonitors en-mass from the global free list to its local free list. 1452 // This reduces coherency traffic and lock contention on the global free list. 1453 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1454 // before or after the CAS(INFLATING) operation. 1455 // See the comments in omAlloc(). 1456 1457 if (mark->has_locker()) { 1458 ObjectMonitor * m = omAlloc(Self); 1459 // Optimistically prepare the objectmonitor - anticipate successful CAS 1460 // We do this before the CAS in order to minimize the length of time 1461 // in which INFLATING appears in the mark. 1462 m->Recycle(); 1463 m->_Responsible = NULL; 1464 m->_recursions = 0; 1465 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1466 1467 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1468 if (cmp != mark) { 1469 omRelease(Self, m, true); 1470 continue; // Interference -- just retry 1471 } 1472 1473 // We've successfully installed INFLATING (0) into the mark-word. 1474 // This is the only case where 0 will appear in a mark-word. 1475 // Only the singular thread that successfully swings the mark-word 1476 // to 0 can perform (or more precisely, complete) inflation. 1477 // 1478 // Why do we CAS a 0 into the mark-word instead of just CASing the 1479 // mark-word from the stack-locked value directly to the new inflated state? 1480 // Consider what happens when a thread unlocks a stack-locked object. 1481 // It attempts to use CAS to swing the displaced header value from the 1482 // on-stack basiclock back into the object header. Recall also that the 1483 // header value (hashcode, etc) can reside in (a) the object header, or 1484 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1485 // header in an objectMonitor. The inflate() routine must copy the header 1486 // value from the basiclock on the owner's stack to the objectMonitor, all 1487 // the while preserving the hashCode stability invariants. If the owner 1488 // decides to release the lock while the value is 0, the unlock will fail 1489 // and control will eventually pass from slow_exit() to inflate. The owner 1490 // will then spin, waiting for the 0 value to disappear. Put another way, 1491 // the 0 causes the owner to stall if the owner happens to try to 1492 // drop the lock (restoring the header from the basiclock to the object) 1493 // while inflation is in-progress. This protocol avoids races that might 1494 // would otherwise permit hashCode values to change or "flicker" for an object. 1495 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1496 // 0 serves as a "BUSY" inflate-in-progress indicator. 1497 1498 1499 // fetch the displaced mark from the owner's stack. 1500 // The owner can't die or unwind past the lock while our INFLATING 1501 // object is in the mark. Furthermore the owner can't complete 1502 // an unlock on the object, either. 1503 markOop dmw = mark->displaced_mark_helper(); 1504 assert(dmw->is_neutral(), "invariant"); 1505 1506 // Setup monitor fields to proper values -- prepare the monitor 1507 m->set_header(dmw); 1508 1509 // Optimization: if the mark->locker stack address is associated 1510 // with this thread we could simply set m->_owner = Self. 1511 // Note that a thread can inflate an object 1512 // that it has stack-locked -- as might happen in wait() -- directly 1513 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1514 m->set_owner(mark->locker()); 1515 m->set_object(object); 1516 // TODO-FIXME: assert BasicLock->dhw != 0. 1517 1518 // Must preserve store ordering. The monitor state must 1519 // be stable at the time of publishing the monitor address. 1520 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1521 object->release_set_mark(markOopDesc::encode(m)); 1522 1523 // Hopefully the performance counters are allocated on distinct cache lines 1524 // to avoid false sharing on MP systems ... 1525 OM_PERFDATA_OP(Inflations, inc()); 1526 TEVENT(Inflate: overwrite stacklock); 1527 if (log_is_enabled(Debug, monitorinflation)) { 1528 if (object->is_instance()) { 1529 ResourceMark rm; 1530 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1531 p2i(object), p2i(object->mark()), 1532 object->klass()->external_name()); 1533 } 1534 } 1535 TRACE_ONLY(post_monitor_inflate_event(&event, object, cause);) 1536 return m; 1537 } 1538 1539 // CASE: neutral 1540 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1541 // If we know we're inflating for entry it's better to inflate by swinging a 1542 // pre-locked objectMonitor pointer into the object header. A successful 1543 // CAS inflates the object *and* confers ownership to the inflating thread. 1544 // In the current implementation we use a 2-step mechanism where we CAS() 1545 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1546 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1547 // would be useful. 1548 1549 assert(mark->is_neutral(), "invariant"); 1550 ObjectMonitor * m = omAlloc(Self); 1551 // prepare m for installation - set monitor to initial state 1552 m->Recycle(); 1553 m->set_header(mark); 1554 m->set_owner(NULL); 1555 m->set_object(object); 1556 m->_recursions = 0; 1557 m->_Responsible = NULL; 1558 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1559 1560 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1561 m->set_object(NULL); 1562 m->set_owner(NULL); 1563 m->Recycle(); 1564 omRelease(Self, m, true); 1565 m = NULL; 1566 continue; 1567 // interference - the markword changed - just retry. 1568 // The state-transitions are one-way, so there's no chance of 1569 // live-lock -- "Inflated" is an absorbing state. 1570 } 1571 1572 // Hopefully the performance counters are allocated on distinct 1573 // cache lines to avoid false sharing on MP systems ... 1574 OM_PERFDATA_OP(Inflations, inc()); 1575 TEVENT(Inflate: overwrite neutral); 1576 if (log_is_enabled(Debug, monitorinflation)) { 1577 if (object->is_instance()) { 1578 ResourceMark rm; 1579 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1580 p2i(object), p2i(object->mark()), 1581 object->klass()->external_name()); 1582 } 1583 } 1584 TRACE_ONLY(post_monitor_inflate_event(&event, object, cause);) 1585 return m; 1586 } 1587 } 1588 1589 1590 // Deflate_idle_monitors() is called at all safepoints, immediately 1591 // after all mutators are stopped, but before any objects have moved. 1592 // It traverses the list of known monitors, deflating where possible. 1593 // The scavenged monitor are returned to the monitor free list. 1594 // 1595 // Beware that we scavenge at *every* stop-the-world point. 1596 // Having a large number of monitors in-circulation negatively 1597 // impacts the performance of some applications (e.g., PointBase). 1598 // Broadly, we want to minimize the # of monitors in circulation. 1599 // 1600 // We have added a flag, MonitorInUseLists, which creates a list 1601 // of active monitors for each thread. deflate_idle_monitors() 1602 // only scans the per-thread in-use lists. omAlloc() puts all 1603 // assigned monitors on the per-thread list. deflate_idle_monitors() 1604 // returns the non-busy monitors to the global free list. 1605 // When a thread dies, omFlush() adds the list of active monitors for 1606 // that thread to a global gOmInUseList acquiring the 1607 // global list lock. deflate_idle_monitors() acquires the global 1608 // list lock to scan for non-busy monitors to the global free list. 1609 // An alternative could have used a single global in-use list. The 1610 // downside would have been the additional cost of acquiring the global list lock 1611 // for every omAlloc(). 1612 // 1613 // Perversely, the heap size -- and thus the STW safepoint rate -- 1614 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1615 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1616 // This is an unfortunate aspect of this design. 1617 1618 enum ManifestConstants { 1619 ClearResponsibleAtSTW = 0 1620 }; 1621 1622 // Deflate a single monitor if not in-use 1623 // Return true if deflated, false if in-use 1624 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1625 ObjectMonitor** freeHeadp, 1626 ObjectMonitor** freeTailp) { 1627 bool deflated; 1628 // Normal case ... The monitor is associated with obj. 1629 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1630 guarantee(mid == obj->mark()->monitor(), "invariant"); 1631 guarantee(mid->header()->is_neutral(), "invariant"); 1632 1633 if (mid->is_busy()) { 1634 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1635 deflated = false; 1636 } else { 1637 // Deflate the monitor if it is no longer being used 1638 // It's idle - scavenge and return to the global free list 1639 // plain old deflation ... 1640 TEVENT(deflate_idle_monitors - scavenge1); 1641 if (log_is_enabled(Debug, monitorinflation)) { 1642 if (obj->is_instance()) { 1643 ResourceMark rm; 1644 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1645 "mark " INTPTR_FORMAT " , type %s", 1646 p2i(obj), p2i(obj->mark()), 1647 obj->klass()->external_name()); 1648 } 1649 } 1650 1651 // Restore the header back to obj 1652 obj->release_set_mark(mid->header()); 1653 mid->clear(); 1654 1655 assert(mid->object() == NULL, "invariant"); 1656 1657 // Move the object to the working free list defined by freeHeadp, freeTailp 1658 if (*freeHeadp == NULL) *freeHeadp = mid; 1659 if (*freeTailp != NULL) { 1660 ObjectMonitor * prevtail = *freeTailp; 1661 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1662 prevtail->FreeNext = mid; 1663 } 1664 *freeTailp = mid; 1665 deflated = true; 1666 } 1667 return deflated; 1668 } 1669 1670 // Walk a given monitor list, and deflate idle monitors 1671 // The given list could be a per-thread list or a global list 1672 // Caller acquires gListLock 1673 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1674 ObjectMonitor** freeHeadp, 1675 ObjectMonitor** freeTailp) { 1676 ObjectMonitor* mid; 1677 ObjectMonitor* next; 1678 ObjectMonitor* cur_mid_in_use = NULL; 1679 int deflated_count = 0; 1680 1681 for (mid = *listHeadp; mid != NULL;) { 1682 oop obj = (oop) mid->object(); 1683 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1684 // if deflate_monitor succeeded, 1685 // extract from per-thread in-use list 1686 if (mid == *listHeadp) { 1687 *listHeadp = mid->FreeNext; 1688 } else if (cur_mid_in_use != NULL) { 1689 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1690 } 1691 next = mid->FreeNext; 1692 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1693 mid = next; 1694 deflated_count++; 1695 } else { 1696 cur_mid_in_use = mid; 1697 mid = mid->FreeNext; 1698 } 1699 } 1700 return deflated_count; 1701 } 1702 1703 void ObjectSynchronizer::deflate_idle_monitors() { 1704 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1705 int nInuse = 0; // currently associated with objects 1706 int nInCirculation = 0; // extant 1707 int nScavenged = 0; // reclaimed 1708 bool deflated = false; 1709 1710 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1711 ObjectMonitor * freeTailp = NULL; 1712 1713 TEVENT(deflate_idle_monitors); 1714 // Prevent omFlush from changing mids in Thread dtor's during deflation 1715 // And in case the vm thread is acquiring a lock during a safepoint 1716 // See e.g. 6320749 1717 Thread::muxAcquire(&gListLock, "scavenge - return"); 1718 1719 if (MonitorInUseLists) { 1720 int inUse = 0; 1721 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1722 nInCirculation+= cur->omInUseCount; 1723 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1724 cur->omInUseCount-= deflated_count; 1725 if (ObjectMonitor::Knob_VerifyInUse) { 1726 verifyInUse(cur); 1727 } 1728 nScavenged += deflated_count; 1729 nInuse += cur->omInUseCount; 1730 } 1731 1732 // For moribund threads, scan gOmInUseList 1733 if (gOmInUseList) { 1734 nInCirculation += gOmInUseCount; 1735 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1736 gOmInUseCount-= deflated_count; 1737 nScavenged += deflated_count; 1738 nInuse += gOmInUseCount; 1739 } 1740 1741 } else { 1742 PaddedEnd<ObjectMonitor> * block = 1743 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1744 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1745 // Iterate over all extant monitors - Scavenge all idle monitors. 1746 assert(block->object() == CHAINMARKER, "must be a block header"); 1747 nInCirculation += _BLOCKSIZE; 1748 for (int i = 1; i < _BLOCKSIZE; i++) { 1749 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1750 oop obj = (oop)mid->object(); 1751 1752 if (obj == NULL) { 1753 // The monitor is not associated with an object. 1754 // The monitor should either be a thread-specific private 1755 // free list or the global free list. 1756 // obj == NULL IMPLIES mid->is_busy() == 0 1757 guarantee(!mid->is_busy(), "invariant"); 1758 continue; 1759 } 1760 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1761 1762 if (deflated) { 1763 mid->FreeNext = NULL; 1764 nScavenged++; 1765 } else { 1766 nInuse++; 1767 } 1768 } 1769 } 1770 } 1771 1772 gMonitorFreeCount += nScavenged; 1773 1774 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1775 1776 if (ObjectMonitor::Knob_Verbose) { 1777 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1778 "ForceMonitorScavenge=%d : pop=%d free=%d", 1779 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1780 gMonitorPopulation, gMonitorFreeCount); 1781 tty->flush(); 1782 } 1783 1784 ForceMonitorScavenge = 0; // Reset 1785 1786 // Move the scavenged monitors back to the global free list. 1787 if (freeHeadp != NULL) { 1788 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1789 assert(freeTailp->FreeNext == NULL, "invariant"); 1790 // constant-time list splice - prepend scavenged segment to gFreeList 1791 freeTailp->FreeNext = gFreeList; 1792 gFreeList = freeHeadp; 1793 } 1794 Thread::muxRelease(&gListLock); 1795 1796 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1797 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1798 1799 // TODO: Add objectMonitor leak detection. 1800 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1801 GVars.stwRandom = os::random(); 1802 GVars.stwCycle++; 1803 } 1804 1805 // Monitor cleanup on JavaThread::exit 1806 1807 // Iterate through monitor cache and attempt to release thread's monitors 1808 // Gives up on a particular monitor if an exception occurs, but continues 1809 // the overall iteration, swallowing the exception. 1810 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1811 private: 1812 TRAPS; 1813 1814 public: 1815 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1816 void do_monitor(ObjectMonitor* mid) { 1817 if (mid->owner() == THREAD) { 1818 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1819 ResourceMark rm; 1820 Handle obj(THREAD, (oop) mid->object()); 1821 tty->print("INFO: unexpected locked object:"); 1822 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1823 fatal("exiting JavaThread=" INTPTR_FORMAT 1824 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1825 p2i(THREAD), p2i(mid)); 1826 } 1827 (void)mid->complete_exit(CHECK); 1828 } 1829 } 1830 }; 1831 1832 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1833 // ignored. This is meant to be called during JNI thread detach which assumes 1834 // all remaining monitors are heavyweight. All exceptions are swallowed. 1835 // Scanning the extant monitor list can be time consuming. 1836 // A simple optimization is to add a per-thread flag that indicates a thread 1837 // called jni_monitorenter() during its lifetime. 1838 // 1839 // Instead of No_Savepoint_Verifier it might be cheaper to 1840 // use an idiom of the form: 1841 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1842 // <code that must not run at safepoint> 1843 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1844 // Since the tests are extremely cheap we could leave them enabled 1845 // for normal product builds. 1846 1847 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1848 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1849 NoSafepointVerifier nsv; 1850 ReleaseJavaMonitorsClosure rjmc(THREAD); 1851 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1852 ObjectSynchronizer::monitors_iterate(&rjmc); 1853 Thread::muxRelease(&gListLock); 1854 THREAD->clear_pending_exception(); 1855 } 1856 1857 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1858 switch (cause) { 1859 case inflate_cause_vm_internal: return "VM Internal"; 1860 case inflate_cause_monitor_enter: return "Monitor Enter"; 1861 case inflate_cause_wait: return "Monitor Wait"; 1862 case inflate_cause_notify: return "Monitor Notify"; 1863 case inflate_cause_hash_code: return "Monitor Hash Code"; 1864 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1865 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1866 default: 1867 ShouldNotReachHere(); 1868 } 1869 return "Unknown"; 1870 } 1871 1872 //------------------------------------------------------------------------------ 1873 // Debugging code 1874 1875 void ObjectSynchronizer::sanity_checks(const bool verbose, 1876 const uint cache_line_size, 1877 int *error_cnt_ptr, 1878 int *warning_cnt_ptr) { 1879 u_char *addr_begin = (u_char*)&GVars; 1880 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1881 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1882 1883 if (verbose) { 1884 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1885 sizeof(SharedGlobals)); 1886 } 1887 1888 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1889 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1890 1891 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1892 if (verbose) { 1893 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1894 } 1895 1896 if (cache_line_size != 0) { 1897 // We were able to determine the L1 data cache line size so 1898 // do some cache line specific sanity checks 1899 1900 if (offset_stwRandom < cache_line_size) { 1901 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1902 "to the struct beginning than a cache line which permits " 1903 "false sharing."); 1904 (*warning_cnt_ptr)++; 1905 } 1906 1907 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1908 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1909 "SharedGlobals.hcSequence fields are closer than a cache " 1910 "line which permits false sharing."); 1911 (*warning_cnt_ptr)++; 1912 } 1913 1914 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1915 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1916 "to the struct end than a cache line which permits false " 1917 "sharing."); 1918 (*warning_cnt_ptr)++; 1919 } 1920 } 1921 } 1922 1923 #ifndef PRODUCT 1924 1925 // Check if monitor belongs to the monitor cache 1926 // The list is grow-only so it's *relatively* safe to traverse 1927 // the list of extant blocks without taking a lock. 1928 1929 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1930 PaddedEnd<ObjectMonitor> * block = 1931 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1932 while (block != NULL) { 1933 assert(block->object() == CHAINMARKER, "must be a block header"); 1934 if (monitor > (ObjectMonitor *)&block[0] && 1935 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1936 address mon = (address)monitor; 1937 address blk = (address)block; 1938 size_t diff = mon - blk; 1939 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 1940 return 1; 1941 } 1942 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1943 } 1944 return 0; 1945 } 1946 1947 #endif