1 /* 2 * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "memory/metaspaceShared.hpp" 29 #include "memory/padded.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/osThread.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/vframe.hpp" 45 #include "trace/traceMacros.hpp" 46 #include "trace/tracing.hpp" 47 #include "utilities/dtrace.hpp" 48 #include "utilities/events.hpp" 49 #include "utilities/preserveException.hpp" 50 51 // The "core" versions of monitor enter and exit reside in this file. 52 // The interpreter and compilers contain specialized transliterated 53 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 54 // for instance. If you make changes here, make sure to modify the 55 // interpreter, and both C1 and C2 fast-path inline locking code emission. 56 // 57 // ----------------------------------------------------------------------------- 58 59 #ifdef DTRACE_ENABLED 60 61 // Only bother with this argument setup if dtrace is available 62 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 63 64 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 65 char* bytes = NULL; \ 66 int len = 0; \ 67 jlong jtid = SharedRuntime::get_java_tid(thread); \ 68 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 69 if (klassname != NULL) { \ 70 bytes = (char*)klassname->bytes(); \ 71 len = klassname->utf8_length(); \ 72 } 73 74 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 75 { \ 76 if (DTraceMonitorProbes) { \ 77 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 78 HOTSPOT_MONITOR_WAIT(jtid, \ 79 (uintptr_t)(monitor), bytes, len, (millis)); \ 80 } \ 81 } 82 83 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 84 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 85 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 86 87 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 88 { \ 89 if (DTraceMonitorProbes) { \ 90 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 91 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 92 (uintptr_t)(monitor), bytes, len); \ 93 } \ 94 } 95 96 #else // ndef DTRACE_ENABLED 97 98 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 99 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 100 101 #endif // ndef DTRACE_ENABLED 102 103 // This exists only as a workaround of dtrace bug 6254741 104 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 105 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 106 return 0; 107 } 108 109 #define NINFLATIONLOCKS 256 110 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 111 112 // global list of blocks of monitors 113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 114 // want to expose the PaddedEnd template more than necessary. 115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; 116 // global monitor free list 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 118 // global monitor in-use list, for moribund threads, 119 // monitors they inflated need to be scanned for deflation 120 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 121 // count of entries in gOmInUseList 122 int ObjectSynchronizer::gOmInUseCount = 0; 123 124 static volatile intptr_t gListLock = 0; // protects global monitor lists 125 static volatile int gMonitorFreeCount = 0; // # on gFreeList 126 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 127 128 static void post_monitor_inflate_event(EventJavaMonitorInflate&, 129 const oop, 130 const ObjectSynchronizer::InflateCause); 131 132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 133 #define CLAIMEDMARKER (cast_to_oop<intptr_t>(-2)) 134 135 136 // =====================> Quick functions 137 138 // The quick_* forms are special fast-path variants used to improve 139 // performance. In the simplest case, a "quick_*" implementation could 140 // simply return false, in which case the caller will perform the necessary 141 // state transitions and call the slow-path form. 142 // The fast-path is designed to handle frequently arising cases in an efficient 143 // manner and is just a degenerate "optimistic" variant of the slow-path. 144 // returns true -- to indicate the call was satisfied. 145 // returns false -- to indicate the call needs the services of the slow-path. 146 // A no-loitering ordinance is in effect for code in the quick_* family 147 // operators: safepoints or indefinite blocking (blocking that might span a 148 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 149 // entry. 150 // 151 // Consider: An interesting optimization is to have the JIT recognize the 152 // following common idiom: 153 // synchronized (someobj) { .... ; notify(); } 154 // That is, we find a notify() or notifyAll() call that immediately precedes 155 // the monitorexit operation. In that case the JIT could fuse the operations 156 // into a single notifyAndExit() runtime primitive. 157 158 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 159 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 160 assert(self->is_Java_thread(), "invariant"); 161 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 162 NoSafepointVerifier nsv; 163 if (obj == NULL) return false; // slow-path for invalid obj 164 const markOop mark = obj->mark(); 165 166 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 167 // Degenerate notify 168 // stack-locked by caller so by definition the implied waitset is empty. 169 return true; 170 } 171 172 if (mark->has_monitor()) { 173 ObjectMonitor * const mon = mark->monitor(); 174 assert(oopDesc::equals((oop) mon->object(), obj), "invariant"); 175 if (mon->owner() != self) return false; // slow-path for IMS exception 176 177 if (mon->first_waiter() != NULL) { 178 // We have one or more waiters. Since this is an inflated monitor 179 // that we own, we can transfer one or more threads from the waitset 180 // to the entrylist here and now, avoiding the slow-path. 181 if (all) { 182 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 183 } else { 184 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 185 } 186 int tally = 0; 187 do { 188 mon->INotify(self); 189 ++tally; 190 } while (mon->first_waiter() != NULL && all); 191 OM_PERFDATA_OP(Notifications, inc(tally)); 192 } 193 return true; 194 } 195 196 // biased locking and any other IMS exception states take the slow-path 197 return false; 198 } 199 200 201 // The LockNode emitted directly at the synchronization site would have 202 // been too big if it were to have included support for the cases of inflated 203 // recursive enter and exit, so they go here instead. 204 // Note that we can't safely call AsyncPrintJavaStack() from within 205 // quick_enter() as our thread state remains _in_Java. 206 207 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 208 BasicLock * lock) { 209 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 210 assert(Self->is_Java_thread(), "invariant"); 211 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 212 NoSafepointVerifier nsv; 213 if (obj == NULL) return false; // Need to throw NPE 214 const markOop mark = obj->mark(); 215 216 if (mark->has_monitor()) { 217 ObjectMonitor * const m = mark->monitor(); 218 assert(oopDesc::equals((oop) m->object(), obj), "invariant"); 219 Thread * const owner = (Thread *) m->_owner; 220 221 // Lock contention and Transactional Lock Elision (TLE) diagnostics 222 // and observability 223 // Case: light contention possibly amenable to TLE 224 // Case: TLE inimical operations such as nested/recursive synchronization 225 226 if (owner == Self) { 227 m->_recursions++; 228 return true; 229 } 230 231 // This Java Monitor is inflated so obj's header will never be 232 // displaced to this thread's BasicLock. Make the displaced header 233 // non-NULL so this BasicLock is not seen as recursive nor as 234 // being locked. We do this unconditionally so that this thread's 235 // BasicLock cannot be mis-interpreted by any stack walkers. For 236 // performance reasons, stack walkers generally first check for 237 // Biased Locking in the object's header, the second check is for 238 // stack-locking in the object's header, the third check is for 239 // recursive stack-locking in the displaced header in the BasicLock, 240 // and last are the inflated Java Monitor (ObjectMonitor) checks. 241 lock->set_displaced_header(markOopDesc::unused_mark()); 242 243 if (owner == NULL && 244 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 245 assert(m->_recursions == 0, "invariant"); 246 assert(m->_owner == Self, "invariant"); 247 return true; 248 } 249 } 250 251 // Note that we could inflate in quick_enter. 252 // This is likely a useful optimization 253 // Critically, in quick_enter() we must not: 254 // -- perform bias revocation, or 255 // -- block indefinitely, or 256 // -- reach a safepoint 257 258 return false; // revert to slow-path 259 } 260 261 // ----------------------------------------------------------------------------- 262 // Fast Monitor Enter/Exit 263 // This the fast monitor enter. The interpreter and compiler use 264 // some assembly copies of this code. Make sure update those code 265 // if the following function is changed. The implementation is 266 // extremely sensitive to race condition. Be careful. 267 268 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 269 bool attempt_rebias, TRAPS) { 270 if (UseBiasedLocking) { 271 if (!SafepointSynchronize::is_at_safepoint()) { 272 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 273 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 274 return; 275 } 276 } else { 277 assert(!attempt_rebias, "can not rebias toward VM thread"); 278 BiasedLocking::revoke_at_safepoint(obj); 279 } 280 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 281 } 282 283 slow_enter(obj, lock, THREAD); 284 } 285 286 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 287 markOop mark = object->mark(); 288 // We cannot check for Biased Locking if we are racing an inflation. 289 assert(mark == markOopDesc::INFLATING() || 290 !mark->has_bias_pattern(), "should not see bias pattern here"); 291 292 markOop dhw = lock->displaced_header(); 293 if (dhw == NULL) { 294 // If the displaced header is NULL, then this exit matches up with 295 // a recursive enter. No real work to do here except for diagnostics. 296 #ifndef PRODUCT 297 if (mark != markOopDesc::INFLATING()) { 298 // Only do diagnostics if we are not racing an inflation. Simply 299 // exiting a recursive enter of a Java Monitor that is being 300 // inflated is safe; see the has_monitor() comment below. 301 assert(!mark->is_neutral(), "invariant"); 302 assert(!mark->has_locker() || 303 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 304 if (mark->has_monitor()) { 305 // The BasicLock's displaced_header is marked as a recursive 306 // enter and we have an inflated Java Monitor (ObjectMonitor). 307 // This is a special case where the Java Monitor was inflated 308 // after this thread entered the stack-lock recursively. When a 309 // Java Monitor is inflated, we cannot safely walk the Java 310 // Monitor owner's stack and update the BasicLocks because a 311 // Java Monitor can be asynchronously inflated by a thread that 312 // does not own the Java Monitor. 313 ObjectMonitor * m = mark->monitor(); 314 assert(((oop)(m->object()))->mark() == mark, "invariant"); 315 assert(m->is_entered(THREAD), "invariant"); 316 } 317 } 318 #endif 319 return; 320 } 321 322 if (mark == (markOop) lock) { 323 // If the object is stack-locked by the current thread, try to 324 // swing the displaced header from the BasicLock back to the mark. 325 assert(dhw->is_neutral(), "invariant"); 326 if (object->cas_set_mark(dhw, mark) == mark) { 327 TEVENT(fast_exit: release stack-lock); 328 return; 329 } 330 } 331 332 // We have to take the slow-path of possible inflation and then exit. 333 ObjectSynchronizer::inflate(THREAD, 334 object, 335 inflate_cause_vm_internal)->exit(true, THREAD); 336 } 337 338 // ----------------------------------------------------------------------------- 339 // Interpreter/Compiler Slow Case 340 // This routine is used to handle interpreter/compiler slow case 341 // We don't need to use fast path here, because it must have been 342 // failed in the interpreter/compiler code. 343 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 344 markOop mark = obj->mark(); 345 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 346 347 if (mark->is_neutral()) { 348 // Anticipate successful CAS -- the ST of the displaced mark must 349 // be visible <= the ST performed by the CAS. 350 lock->set_displaced_header(mark); 351 if (mark == obj()->cas_set_mark((markOop) lock, mark)) { 352 TEVENT(slow_enter: release stacklock); 353 return; 354 } 355 // Fall through to inflate() ... 356 } else if (mark->has_locker() && 357 THREAD->is_lock_owned((address)mark->locker())) { 358 assert(lock != mark->locker(), "must not re-lock the same lock"); 359 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 360 lock->set_displaced_header(NULL); 361 return; 362 } 363 364 // The object header will never be displaced to this lock, 365 // so it does not matter what the value is, except that it 366 // must be non-zero to avoid looking like a re-entrant lock, 367 // and must not look locked either. 368 lock->set_displaced_header(markOopDesc::unused_mark()); 369 ObjectSynchronizer::inflate(THREAD, 370 obj(), 371 inflate_cause_monitor_enter)->enter(THREAD); 372 } 373 374 // This routine is used to handle interpreter/compiler slow case 375 // We don't need to use fast path here, because it must have 376 // failed in the interpreter/compiler code. Simply use the heavy 377 // weight monitor should be ok, unless someone find otherwise. 378 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 379 fast_exit(object, lock, THREAD); 380 } 381 382 // ----------------------------------------------------------------------------- 383 // Class Loader support to workaround deadlocks on the class loader lock objects 384 // Also used by GC 385 // complete_exit()/reenter() are used to wait on a nested lock 386 // i.e. to give up an outer lock completely and then re-enter 387 // Used when holding nested locks - lock acquisition order: lock1 then lock2 388 // 1) complete_exit lock1 - saving recursion count 389 // 2) wait on lock2 390 // 3) when notified on lock2, unlock lock2 391 // 4) reenter lock1 with original recursion count 392 // 5) lock lock2 393 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 394 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 395 TEVENT(complete_exit); 396 if (UseBiasedLocking) { 397 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 398 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 399 } 400 401 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 402 obj(), 403 inflate_cause_vm_internal); 404 405 return monitor->complete_exit(THREAD); 406 } 407 408 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 409 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 410 TEVENT(reenter); 411 if (UseBiasedLocking) { 412 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 413 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 414 } 415 416 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 417 obj(), 418 inflate_cause_vm_internal); 419 420 monitor->reenter(recursion, THREAD); 421 } 422 // ----------------------------------------------------------------------------- 423 // JNI locks on java objects 424 // NOTE: must use heavy weight monitor to handle jni monitor enter 425 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 426 // the current locking is from JNI instead of Java code 427 TEVENT(jni_enter); 428 if (UseBiasedLocking) { 429 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 430 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 431 } 432 THREAD->set_current_pending_monitor_is_from_java(false); 433 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 434 THREAD->set_current_pending_monitor_is_from_java(true); 435 } 436 437 // NOTE: must use heavy weight monitor to handle jni monitor exit 438 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 439 TEVENT(jni_exit); 440 if (UseBiasedLocking) { 441 Handle h_obj(THREAD, obj); 442 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 443 obj = h_obj(); 444 } 445 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 446 447 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 448 obj, 449 inflate_cause_jni_exit); 450 // If this thread has locked the object, exit the monitor. Note: can't use 451 // monitor->check(CHECK); must exit even if an exception is pending. 452 if (monitor->check(THREAD)) { 453 monitor->exit(true, THREAD); 454 } 455 } 456 457 // ----------------------------------------------------------------------------- 458 // Internal VM locks on java objects 459 // standard constructor, allows locking failures 460 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 461 _dolock = doLock; 462 _thread = thread; 463 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 464 _obj = obj; 465 466 if (_dolock) { 467 TEVENT(ObjectLocker); 468 469 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 470 } 471 } 472 473 ObjectLocker::~ObjectLocker() { 474 if (_dolock) { 475 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 476 } 477 } 478 479 480 // ----------------------------------------------------------------------------- 481 // Wait/Notify/NotifyAll 482 // NOTE: must use heavy weight monitor to handle wait() 483 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 484 if (UseBiasedLocking) { 485 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 486 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 487 } 488 if (millis < 0) { 489 TEVENT(wait - throw IAX); 490 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 491 } 492 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 493 obj(), 494 inflate_cause_wait); 495 496 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 497 monitor->wait(millis, true, THREAD); 498 499 // This dummy call is in place to get around dtrace bug 6254741. Once 500 // that's fixed we can uncomment the following line, remove the call 501 // and change this function back into a "void" func. 502 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 503 return dtrace_waited_probe(monitor, obj, THREAD); 504 } 505 506 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 507 if (UseBiasedLocking) { 508 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 509 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 510 } 511 if (millis < 0) { 512 TEVENT(wait - throw IAX); 513 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 514 } 515 ObjectSynchronizer::inflate(THREAD, 516 obj(), 517 inflate_cause_wait)->wait(millis, false, THREAD); 518 } 519 520 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 521 if (UseBiasedLocking) { 522 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 523 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 524 } 525 526 markOop mark = obj->mark(); 527 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 528 return; 529 } 530 ObjectSynchronizer::inflate(THREAD, 531 obj(), 532 inflate_cause_notify)->notify(THREAD); 533 } 534 535 // NOTE: see comment of notify() 536 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 537 if (UseBiasedLocking) { 538 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 539 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 540 } 541 542 markOop mark = obj->mark(); 543 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 544 return; 545 } 546 ObjectSynchronizer::inflate(THREAD, 547 obj(), 548 inflate_cause_notify)->notifyAll(THREAD); 549 } 550 551 // ----------------------------------------------------------------------------- 552 // Hash Code handling 553 // 554 // Performance concern: 555 // OrderAccess::storestore() calls release() which at one time stored 0 556 // into the global volatile OrderAccess::dummy variable. This store was 557 // unnecessary for correctness. Many threads storing into a common location 558 // causes considerable cache migration or "sloshing" on large SMP systems. 559 // As such, I avoided using OrderAccess::storestore(). In some cases 560 // OrderAccess::fence() -- which incurs local latency on the executing 561 // processor -- is a better choice as it scales on SMP systems. 562 // 563 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 564 // a discussion of coherency costs. Note that all our current reference 565 // platforms provide strong ST-ST order, so the issue is moot on IA32, 566 // x64, and SPARC. 567 // 568 // As a general policy we use "volatile" to control compiler-based reordering 569 // and explicit fences (barriers) to control for architectural reordering 570 // performed by the CPU(s) or platform. 571 572 struct SharedGlobals { 573 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 574 // These are highly shared mostly-read variables. 575 // To avoid false-sharing they need to be the sole occupants of a cache line. 576 volatile int stwRandom; 577 volatile int stwCycle; 578 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 579 // Hot RW variable -- Sequester to avoid false-sharing 580 volatile int hcSequence; 581 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 582 }; 583 584 static SharedGlobals GVars; 585 static int MonitorScavengeThreshold = 1000000; 586 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 587 588 static markOop ReadStableMark(oop obj) { 589 markOop mark = obj->mark(); 590 if (!mark->is_being_inflated()) { 591 return mark; // normal fast-path return 592 } 593 594 int its = 0; 595 for (;;) { 596 markOop mark = obj->mark(); 597 if (!mark->is_being_inflated()) { 598 return mark; // normal fast-path return 599 } 600 601 // The object is being inflated by some other thread. 602 // The caller of ReadStableMark() must wait for inflation to complete. 603 // Avoid live-lock 604 // TODO: consider calling SafepointSynchronize::do_call_back() while 605 // spinning to see if there's a safepoint pending. If so, immediately 606 // yielding or blocking would be appropriate. Avoid spinning while 607 // there is a safepoint pending. 608 // TODO: add inflation contention performance counters. 609 // TODO: restrict the aggregate number of spinners. 610 611 ++its; 612 if (its > 10000 || !os::is_MP()) { 613 if (its & 1) { 614 os::naked_yield(); 615 TEVENT(Inflate: INFLATING - yield); 616 } else { 617 // Note that the following code attenuates the livelock problem but is not 618 // a complete remedy. A more complete solution would require that the inflating 619 // thread hold the associated inflation lock. The following code simply restricts 620 // the number of spinners to at most one. We'll have N-2 threads blocked 621 // on the inflationlock, 1 thread holding the inflation lock and using 622 // a yield/park strategy, and 1 thread in the midst of inflation. 623 // A more refined approach would be to change the encoding of INFLATING 624 // to allow encapsulation of a native thread pointer. Threads waiting for 625 // inflation to complete would use CAS to push themselves onto a singly linked 626 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 627 // and calling park(). When inflation was complete the thread that accomplished inflation 628 // would detach the list and set the markword to inflated with a single CAS and 629 // then for each thread on the list, set the flag and unpark() the thread. 630 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 631 // wakes at most one thread whereas we need to wake the entire list. 632 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 633 int YieldThenBlock = 0; 634 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 635 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 636 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 637 while (obj->mark() == markOopDesc::INFLATING()) { 638 // Beware: NakedYield() is advisory and has almost no effect on some platforms 639 // so we periodically call Self->_ParkEvent->park(1). 640 // We use a mixed spin/yield/block mechanism. 641 if ((YieldThenBlock++) >= 16) { 642 Thread::current()->_ParkEvent->park(1); 643 } else { 644 os::naked_yield(); 645 } 646 } 647 Thread::muxRelease(gInflationLocks + ix); 648 TEVENT(Inflate: INFLATING - yield/park); 649 } 650 } else { 651 SpinPause(); // SMP-polite spinning 652 } 653 } 654 } 655 656 // hashCode() generation : 657 // 658 // Possibilities: 659 // * MD5Digest of {obj,stwRandom} 660 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 661 // * A DES- or AES-style SBox[] mechanism 662 // * One of the Phi-based schemes, such as: 663 // 2654435761 = 2^32 * Phi (golden ratio) 664 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 665 // * A variation of Marsaglia's shift-xor RNG scheme. 666 // * (obj ^ stwRandom) is appealing, but can result 667 // in undesirable regularity in the hashCode values of adjacent objects 668 // (objects allocated back-to-back, in particular). This could potentially 669 // result in hashtable collisions and reduced hashtable efficiency. 670 // There are simple ways to "diffuse" the middle address bits over the 671 // generated hashCode values: 672 673 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 674 intptr_t value = 0; 675 if (hashCode == 0) { 676 // This form uses an unguarded global Park-Miller RNG, 677 // so it's possible for two threads to race and generate the same RNG. 678 // On MP system we'll have lots of RW access to a global, so the 679 // mechanism induces lots of coherency traffic. 680 value = os::random(); 681 } else if (hashCode == 1) { 682 // This variation has the property of being stable (idempotent) 683 // between STW operations. This can be useful in some of the 1-0 684 // synchronization schemes. 685 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 686 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 687 } else if (hashCode == 2) { 688 value = 1; // for sensitivity testing 689 } else if (hashCode == 3) { 690 value = ++GVars.hcSequence; 691 } else if (hashCode == 4) { 692 value = cast_from_oop<intptr_t>(obj); 693 } else { 694 // Marsaglia's xor-shift scheme with thread-specific state 695 // This is probably the best overall implementation -- we'll 696 // likely make this the default in future releases. 697 unsigned t = Self->_hashStateX; 698 t ^= (t << 11); 699 Self->_hashStateX = Self->_hashStateY; 700 Self->_hashStateY = Self->_hashStateZ; 701 Self->_hashStateZ = Self->_hashStateW; 702 unsigned v = Self->_hashStateW; 703 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 704 Self->_hashStateW = v; 705 value = v; 706 } 707 708 value &= markOopDesc::hash_mask; 709 if (value == 0) value = 0xBAD; 710 assert(value != markOopDesc::no_hash, "invariant"); 711 TEVENT(hashCode: GENERATE); 712 return value; 713 } 714 715 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 716 if (UseBiasedLocking) { 717 // NOTE: many places throughout the JVM do not expect a safepoint 718 // to be taken here, in particular most operations on perm gen 719 // objects. However, we only ever bias Java instances and all of 720 // the call sites of identity_hash that might revoke biases have 721 // been checked to make sure they can handle a safepoint. The 722 // added check of the bias pattern is to avoid useless calls to 723 // thread-local storage. 724 if (obj->mark()->has_bias_pattern()) { 725 // Handle for oop obj in case of STW safepoint 726 Handle hobj(Self, obj); 727 // Relaxing assertion for bug 6320749. 728 assert(Universe::verify_in_progress() || 729 !SafepointSynchronize::is_at_safepoint(), 730 "biases should not be seen by VM thread here"); 731 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 732 obj = hobj(); 733 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 734 } 735 } 736 737 // hashCode() is a heap mutator ... 738 // Relaxing assertion for bug 6320749. 739 assert(Universe::verify_in_progress() || DumpSharedSpaces || 740 !SafepointSynchronize::is_at_safepoint(), "invariant"); 741 assert(Universe::verify_in_progress() || DumpSharedSpaces || 742 Self->is_Java_thread() , "invariant"); 743 assert(Universe::verify_in_progress() || DumpSharedSpaces || 744 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 745 746 ObjectMonitor* monitor = NULL; 747 markOop temp, test; 748 intptr_t hash; 749 markOop mark = ReadStableMark(obj); 750 751 // object should remain ineligible for biased locking 752 assert(!mark->has_bias_pattern(), "invariant"); 753 754 if (mark->is_neutral()) { 755 hash = mark->hash(); // this is a normal header 756 if (hash) { // if it has hash, just return it 757 return hash; 758 } 759 hash = get_next_hash(Self, obj); // allocate a new hash code 760 temp = mark->copy_set_hash(hash); // merge the hash code into header 761 // use (machine word version) atomic operation to install the hash 762 test = obj->cas_set_mark(temp, mark); 763 if (test == mark) { 764 return hash; 765 } 766 // If atomic operation failed, we must inflate the header 767 // into heavy weight monitor. We could add more code here 768 // for fast path, but it does not worth the complexity. 769 } else if (mark->has_monitor()) { 770 monitor = mark->monitor(); 771 temp = monitor->header(); 772 assert(temp->is_neutral(), "invariant"); 773 hash = temp->hash(); 774 if (hash) { 775 return hash; 776 } 777 // Skip to the following code to reduce code size 778 } else if (Self->is_lock_owned((address)mark->locker())) { 779 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 780 assert(temp->is_neutral(), "invariant"); 781 hash = temp->hash(); // by current thread, check if the displaced 782 if (hash) { // header contains hash code 783 return hash; 784 } 785 // WARNING: 786 // The displaced header is strictly immutable. 787 // It can NOT be changed in ANY cases. So we have 788 // to inflate the header into heavyweight monitor 789 // even the current thread owns the lock. The reason 790 // is the BasicLock (stack slot) will be asynchronously 791 // read by other threads during the inflate() function. 792 // Any change to stack may not propagate to other threads 793 // correctly. 794 } 795 796 // Inflate the monitor to set hash code 797 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); 798 // Load displaced header and check it has hash code 799 mark = monitor->header(); 800 assert(mark->is_neutral(), "invariant"); 801 hash = mark->hash(); 802 if (hash == 0) { 803 hash = get_next_hash(Self, obj); 804 temp = mark->copy_set_hash(hash); // merge hash code into header 805 assert(temp->is_neutral(), "invariant"); 806 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 807 if (test != mark) { 808 // The only update to the header in the monitor (outside GC) 809 // is install the hash code. If someone add new usage of 810 // displaced header, please update this code 811 hash = test->hash(); 812 assert(test->is_neutral(), "invariant"); 813 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 814 } 815 } 816 // We finally get the hash 817 return hash; 818 } 819 820 // Deprecated -- use FastHashCode() instead. 821 822 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 823 return FastHashCode(Thread::current(), obj()); 824 } 825 826 827 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 828 Handle h_obj) { 829 if (UseBiasedLocking) { 830 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 831 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 832 } 833 834 assert(thread == JavaThread::current(), "Can only be called on current thread"); 835 oop obj = h_obj(); 836 837 markOop mark = ReadStableMark(obj); 838 839 // Uncontended case, header points to stack 840 if (mark->has_locker()) { 841 return thread->is_lock_owned((address)mark->locker()); 842 } 843 // Contended case, header points to ObjectMonitor (tagged pointer) 844 if (mark->has_monitor()) { 845 ObjectMonitor* monitor = mark->monitor(); 846 return monitor->is_entered(thread) != 0; 847 } 848 // Unlocked case, header in place 849 assert(mark->is_neutral(), "sanity check"); 850 return false; 851 } 852 853 // Be aware of this method could revoke bias of the lock object. 854 // This method queries the ownership of the lock handle specified by 'h_obj'. 855 // If the current thread owns the lock, it returns owner_self. If no 856 // thread owns the lock, it returns owner_none. Otherwise, it will return 857 // owner_other. 858 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 859 (JavaThread *self, Handle h_obj) { 860 // The caller must beware this method can revoke bias, and 861 // revocation can result in a safepoint. 862 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 863 assert(self->thread_state() != _thread_blocked, "invariant"); 864 865 // Possible mark states: neutral, biased, stack-locked, inflated 866 867 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 868 // CASE: biased 869 BiasedLocking::revoke_and_rebias(h_obj, false, self); 870 assert(!h_obj->mark()->has_bias_pattern(), 871 "biases should be revoked by now"); 872 } 873 874 assert(self == JavaThread::current(), "Can only be called on current thread"); 875 oop obj = h_obj(); 876 markOop mark = ReadStableMark(obj); 877 878 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 879 if (mark->has_locker()) { 880 return self->is_lock_owned((address)mark->locker()) ? 881 owner_self : owner_other; 882 } 883 884 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 885 // The Object:ObjectMonitor relationship is stable as long as we're 886 // not at a safepoint. 887 if (mark->has_monitor()) { 888 void * owner = mark->monitor()->_owner; 889 if (owner == NULL) return owner_none; 890 return (owner == self || 891 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 892 } 893 894 // CASE: neutral 895 assert(mark->is_neutral(), "sanity check"); 896 return owner_none; // it's unlocked 897 } 898 899 // FIXME: jvmti should call this 900 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 901 if (UseBiasedLocking) { 902 if (SafepointSynchronize::is_at_safepoint()) { 903 BiasedLocking::revoke_at_safepoint(h_obj); 904 } else { 905 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 906 } 907 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 908 } 909 910 oop obj = h_obj(); 911 address owner = NULL; 912 913 markOop mark = ReadStableMark(obj); 914 915 // Uncontended case, header points to stack 916 if (mark->has_locker()) { 917 owner = (address) mark->locker(); 918 } 919 920 // Contended case, header points to ObjectMonitor (tagged pointer) 921 if (mark->has_monitor()) { 922 ObjectMonitor* monitor = mark->monitor(); 923 assert(monitor != NULL, "monitor should be non-null"); 924 owner = (address) monitor->owner(); 925 } 926 927 if (owner != NULL) { 928 // owning_thread_from_monitor_owner() may also return NULL here 929 return Threads::owning_thread_from_monitor_owner(owner, doLock); 930 } 931 932 // Unlocked case, header in place 933 // Cannot have assertion since this object may have been 934 // locked by another thread when reaching here. 935 // assert(mark->is_neutral(), "sanity check"); 936 937 return NULL; 938 } 939 940 // Visitors ... 941 942 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 943 PaddedEnd<ObjectMonitor> * block = 944 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 945 while (block != NULL) { 946 assert(block->object() == CHAINMARKER, "must be a block header"); 947 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 948 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 949 oop object = (oop)mid->object(); 950 if (object != NULL) { 951 closure->do_monitor(mid); 952 } 953 } 954 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 955 } 956 } 957 958 // Get the next block in the block list. 959 static inline ObjectMonitor* next(ObjectMonitor* block) { 960 assert(block->object() == CHAINMARKER || block->object() == CLAIMEDMARKER, "must be a valid block header"); 961 block = block->FreeNext; 962 assert(block == NULL || block->object() == CHAINMARKER || block->object() == CLAIMEDMARKER, "must be a valid block header"); 963 return block; 964 } 965 966 967 void ObjectSynchronizer::oops_do(OopClosure* f) { 968 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 969 PaddedEnd<ObjectMonitor> * block = 970 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 971 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 972 assert(block->object() == CHAINMARKER, "must be a block header"); 973 for (int i = 1; i < _BLOCKSIZE; i++) { 974 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 975 if (mid->object() != NULL) { 976 f->do_oop((oop*)mid->object_addr()); 977 } 978 } 979 } 980 } 981 982 983 // ----------------------------------------------------------------------------- 984 // ObjectMonitor Lifecycle 985 // ----------------------- 986 // Inflation unlinks monitors from the global gFreeList and 987 // associates them with objects. Deflation -- which occurs at 988 // STW-time -- disassociates idle monitors from objects. Such 989 // scavenged monitors are returned to the gFreeList. 990 // 991 // The global list is protected by gListLock. All the critical sections 992 // are short and operate in constant-time. 993 // 994 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 995 // 996 // Lifecycle: 997 // -- unassigned and on the global free list 998 // -- unassigned and on a thread's private omFreeList 999 // -- assigned to an object. The object is inflated and the mark refers 1000 // to the objectmonitor. 1001 1002 1003 // Constraining monitor pool growth via MonitorBound ... 1004 // 1005 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1006 // the rate of scavenging is driven primarily by GC. As such, we can find 1007 // an inordinate number of monitors in circulation. 1008 // To avoid that scenario we can artificially induce a STW safepoint 1009 // if the pool appears to be growing past some reasonable bound. 1010 // Generally we favor time in space-time tradeoffs, but as there's no 1011 // natural back-pressure on the # of extant monitors we need to impose some 1012 // type of limit. Beware that if MonitorBound is set to too low a value 1013 // we could just loop. In addition, if MonitorBound is set to a low value 1014 // we'll incur more safepoints, which are harmful to performance. 1015 // See also: GuaranteedSafepointInterval 1016 // 1017 // The current implementation uses asynchronous VM operations. 1018 1019 static void InduceScavenge(Thread * Self, const char * Whence) { 1020 // Induce STW safepoint to trim monitors 1021 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1022 // More precisely, trigger an asynchronous STW safepoint as the number 1023 // of active monitors passes the specified threshold. 1024 // TODO: assert thread state is reasonable 1025 1026 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1027 if (ObjectMonitor::Knob_Verbose) { 1028 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 1029 Whence, ForceMonitorScavenge) ; 1030 tty->flush(); 1031 } 1032 // Induce a 'null' safepoint to scavenge monitors 1033 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1034 // to the VMthread and have a lifespan longer than that of this activation record. 1035 // The VMThread will delete the op when completed. 1036 VMThread::execute(new VM_ForceAsyncSafepoint()); 1037 1038 if (ObjectMonitor::Knob_Verbose) { 1039 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 1040 Whence, ForceMonitorScavenge) ; 1041 tty->flush(); 1042 } 1043 } 1044 } 1045 1046 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1047 ObjectMonitor* mid; 1048 int in_use_tally = 0; 1049 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1050 in_use_tally++; 1051 } 1052 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1053 1054 int free_tally = 0; 1055 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1056 free_tally++; 1057 } 1058 assert(free_tally == Self->omFreeCount, "free count off"); 1059 } 1060 1061 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1062 // A large MAXPRIVATE value reduces both list lock contention 1063 // and list coherency traffic, but also tends to increase the 1064 // number of objectMonitors in circulation as well as the STW 1065 // scavenge costs. As usual, we lean toward time in space-time 1066 // tradeoffs. 1067 const int MAXPRIVATE = 1024; 1068 for (;;) { 1069 ObjectMonitor * m; 1070 1071 // 1: try to allocate from the thread's local omFreeList. 1072 // Threads will attempt to allocate first from their local list, then 1073 // from the global list, and only after those attempts fail will the thread 1074 // attempt to instantiate new monitors. Thread-local free lists take 1075 // heat off the gListLock and improve allocation latency, as well as reducing 1076 // coherency traffic on the shared global list. 1077 m = Self->omFreeList; 1078 if (m != NULL) { 1079 Self->omFreeList = m->FreeNext; 1080 Self->omFreeCount--; 1081 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1082 guarantee(m->object() == NULL, "invariant"); 1083 if (MonitorInUseLists) { 1084 m->FreeNext = Self->omInUseList; 1085 Self->omInUseList = m; 1086 Self->omInUseCount++; 1087 if (ObjectMonitor::Knob_VerifyInUse) { 1088 verifyInUse(Self); 1089 } 1090 } else { 1091 m->FreeNext = NULL; 1092 } 1093 return m; 1094 } 1095 1096 // 2: try to allocate from the global gFreeList 1097 // CONSIDER: use muxTry() instead of muxAcquire(). 1098 // If the muxTry() fails then drop immediately into case 3. 1099 // If we're using thread-local free lists then try 1100 // to reprovision the caller's free list. 1101 if (gFreeList != NULL) { 1102 // Reprovision the thread's omFreeList. 1103 // Use bulk transfers to reduce the allocation rate and heat 1104 // on various locks. 1105 Thread::muxAcquire(&gListLock, "omAlloc"); 1106 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1107 gMonitorFreeCount--; 1108 ObjectMonitor * take = gFreeList; 1109 gFreeList = take->FreeNext; 1110 guarantee(take->object() == NULL, "invariant"); 1111 guarantee(!take->is_busy(), "invariant"); 1112 take->Recycle(); 1113 omRelease(Self, take, false); 1114 } 1115 Thread::muxRelease(&gListLock); 1116 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1117 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1118 TEVENT(omFirst - reprovision); 1119 1120 const int mx = MonitorBound; 1121 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1122 // We can't safely induce a STW safepoint from omAlloc() as our thread 1123 // state may not be appropriate for such activities and callers may hold 1124 // naked oops, so instead we defer the action. 1125 InduceScavenge(Self, "omAlloc"); 1126 } 1127 continue; 1128 } 1129 1130 // 3: allocate a block of new ObjectMonitors 1131 // Both the local and global free lists are empty -- resort to malloc(). 1132 // In the current implementation objectMonitors are TSM - immortal. 1133 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1134 // each ObjectMonitor to start at the beginning of a cache line, 1135 // so we use align_size_up(). 1136 // A better solution would be to use C++ placement-new. 1137 // BEWARE: As it stands currently, we don't run the ctors! 1138 assert(_BLOCKSIZE > 1, "invariant"); 1139 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1140 PaddedEnd<ObjectMonitor> * temp; 1141 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1142 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1143 mtInternal); 1144 temp = (PaddedEnd<ObjectMonitor> *) 1145 align_size_up((intptr_t)real_malloc_addr, 1146 DEFAULT_CACHE_LINE_SIZE); 1147 1148 // NOTE: (almost) no way to recover if allocation failed. 1149 // We might be able to induce a STW safepoint and scavenge enough 1150 // objectMonitors to permit progress. 1151 if (temp == NULL) { 1152 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1153 "Allocate ObjectMonitors"); 1154 } 1155 (void)memset((void *) temp, 0, neededsize); 1156 1157 // Format the block. 1158 // initialize the linked list, each monitor points to its next 1159 // forming the single linked free list, the very first monitor 1160 // will points to next block, which forms the block list. 1161 // The trick of using the 1st element in the block as gBlockList 1162 // linkage should be reconsidered. A better implementation would 1163 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1164 1165 for (int i = 1; i < _BLOCKSIZE; i++) { 1166 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1167 } 1168 1169 // terminate the last monitor as the end of list 1170 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1171 1172 // Element [0] is reserved for global list linkage 1173 temp[0].set_object(CHAINMARKER); 1174 1175 // Consider carving out this thread's current request from the 1176 // block in hand. This avoids some lock traffic and redundant 1177 // list activity. 1178 1179 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1180 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1181 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1182 gMonitorPopulation += _BLOCKSIZE-1; 1183 gMonitorFreeCount += _BLOCKSIZE-1; 1184 1185 // Add the new block to the list of extant blocks (gBlockList). 1186 // The very first objectMonitor in a block is reserved and dedicated. 1187 // It serves as blocklist "next" linkage. 1188 temp[0].FreeNext = gBlockList; 1189 // There are lock-free uses of gBlockList so make sure that 1190 // the previous stores happen before we update gBlockList. 1191 OrderAccess::release_store_ptr(&gBlockList, temp); 1192 1193 // Add the new string of objectMonitors to the global free list 1194 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1195 gFreeList = temp + 1; 1196 Thread::muxRelease(&gListLock); 1197 TEVENT(Allocate block of monitors); 1198 } 1199 } 1200 1201 // Place "m" on the caller's private per-thread omFreeList. 1202 // In practice there's no need to clamp or limit the number of 1203 // monitors on a thread's omFreeList as the only time we'll call 1204 // omRelease is to return a monitor to the free list after a CAS 1205 // attempt failed. This doesn't allow unbounded #s of monitors to 1206 // accumulate on a thread's free list. 1207 // 1208 // Key constraint: all ObjectMonitors on a thread's free list and the global 1209 // free list must have their object field set to null. This prevents the 1210 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1211 1212 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1213 bool fromPerThreadAlloc) { 1214 guarantee(m->object() == NULL, "invariant"); 1215 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1216 // Remove from omInUseList 1217 if (MonitorInUseLists && fromPerThreadAlloc) { 1218 ObjectMonitor* cur_mid_in_use = NULL; 1219 bool extracted = false; 1220 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1221 if (m == mid) { 1222 // extract from per-thread in-use list 1223 if (mid == Self->omInUseList) { 1224 Self->omInUseList = mid->FreeNext; 1225 } else if (cur_mid_in_use != NULL) { 1226 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1227 } 1228 extracted = true; 1229 Self->omInUseCount--; 1230 if (ObjectMonitor::Knob_VerifyInUse) { 1231 verifyInUse(Self); 1232 } 1233 break; 1234 } 1235 } 1236 assert(extracted, "Should have extracted from in-use list"); 1237 } 1238 1239 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1240 m->FreeNext = Self->omFreeList; 1241 Self->omFreeList = m; 1242 Self->omFreeCount++; 1243 } 1244 1245 // Return the monitors of a moribund thread's local free list to 1246 // the global free list. Typically a thread calls omFlush() when 1247 // it's dying. We could also consider having the VM thread steal 1248 // monitors from threads that have not run java code over a few 1249 // consecutive STW safepoints. Relatedly, we might decay 1250 // omFreeProvision at STW safepoints. 1251 // 1252 // Also return the monitors of a moribund thread's omInUseList to 1253 // a global gOmInUseList under the global list lock so these 1254 // will continue to be scanned. 1255 // 1256 // We currently call omFlush() from the Thread:: dtor _after the thread 1257 // has been excised from the thread list and is no longer a mutator. 1258 // That means that omFlush() can run concurrently with a safepoint and 1259 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1260 // be a better choice as we could safely reason that that the JVM is 1261 // not at a safepoint at the time of the call, and thus there could 1262 // be not inopportune interleavings between omFlush() and the scavenge 1263 // operator. 1264 1265 void ObjectSynchronizer::omFlush(Thread * Self) { 1266 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1267 Self->omFreeList = NULL; 1268 ObjectMonitor * tail = NULL; 1269 int tally = 0; 1270 if (list != NULL) { 1271 ObjectMonitor * s; 1272 // The thread is going away, the per-thread free monitors 1273 // are freed via set_owner(NULL) 1274 // Link them to tail, which will be linked into the global free list 1275 // gFreeList below, under the gListLock 1276 for (s = list; s != NULL; s = s->FreeNext) { 1277 tally++; 1278 tail = s; 1279 guarantee(s->object() == NULL, "invariant"); 1280 guarantee(!s->is_busy(), "invariant"); 1281 s->set_owner(NULL); // redundant but good hygiene 1282 TEVENT(omFlush - Move one); 1283 } 1284 guarantee(tail != NULL && list != NULL, "invariant"); 1285 } 1286 1287 ObjectMonitor * inUseList = Self->omInUseList; 1288 ObjectMonitor * inUseTail = NULL; 1289 int inUseTally = 0; 1290 if (inUseList != NULL) { 1291 Self->omInUseList = NULL; 1292 ObjectMonitor *cur_om; 1293 // The thread is going away, however the omInUseList inflated 1294 // monitors may still be in-use by other threads. 1295 // Link them to inUseTail, which will be linked into the global in-use list 1296 // gOmInUseList below, under the gListLock 1297 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1298 inUseTail = cur_om; 1299 inUseTally++; 1300 } 1301 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1302 Self->omInUseCount = 0; 1303 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1304 } 1305 1306 Thread::muxAcquire(&gListLock, "omFlush"); 1307 if (tail != NULL) { 1308 tail->FreeNext = gFreeList; 1309 gFreeList = list; 1310 gMonitorFreeCount += tally; 1311 } 1312 1313 if (inUseTail != NULL) { 1314 inUseTail->FreeNext = gOmInUseList; 1315 gOmInUseList = inUseList; 1316 gOmInUseCount += inUseTally; 1317 } 1318 1319 Thread::muxRelease(&gListLock); 1320 TEVENT(omFlush); 1321 } 1322 1323 // Fast path code shared by multiple functions 1324 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1325 markOop mark = obj->mark(); 1326 if (mark->has_monitor()) { 1327 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1328 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1329 return mark->monitor(); 1330 } 1331 return ObjectSynchronizer::inflate(Thread::current(), 1332 obj, 1333 inflate_cause_vm_internal); 1334 } 1335 1336 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1337 oop object, 1338 const InflateCause cause) { 1339 1340 // Inflate mutates the heap ... 1341 // Relaxing assertion for bug 6320749. 1342 assert(Universe::verify_in_progress() || 1343 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1344 1345 EventJavaMonitorInflate event; 1346 1347 for (;;) { 1348 const markOop mark = object->mark(); 1349 assert(!mark->has_bias_pattern(), "invariant"); 1350 1351 // The mark can be in one of the following states: 1352 // * Inflated - just return 1353 // * Stack-locked - coerce it to inflated 1354 // * INFLATING - busy wait for conversion to complete 1355 // * Neutral - aggressively inflate the object. 1356 // * BIASED - Illegal. We should never see this 1357 1358 // CASE: inflated 1359 if (mark->has_monitor()) { 1360 ObjectMonitor * inf = mark->monitor(); 1361 assert(inf->header()->is_neutral(), "invariant"); 1362 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); 1363 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1364 event.cancel(); // let's not post an inflation event, unless we did the deed ourselves 1365 return inf; 1366 } 1367 1368 // CASE: inflation in progress - inflating over a stack-lock. 1369 // Some other thread is converting from stack-locked to inflated. 1370 // Only that thread can complete inflation -- other threads must wait. 1371 // The INFLATING value is transient. 1372 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1373 // We could always eliminate polling by parking the thread on some auxiliary list. 1374 if (mark == markOopDesc::INFLATING()) { 1375 TEVENT(Inflate: spin while INFLATING); 1376 ReadStableMark(object); 1377 continue; 1378 } 1379 1380 // CASE: stack-locked 1381 // Could be stack-locked either by this thread or by some other thread. 1382 // 1383 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1384 // to install INFLATING into the mark word. We originally installed INFLATING, 1385 // allocated the objectmonitor, and then finally STed the address of the 1386 // objectmonitor into the mark. This was correct, but artificially lengthened 1387 // the interval in which INFLATED appeared in the mark, thus increasing 1388 // the odds of inflation contention. 1389 // 1390 // We now use per-thread private objectmonitor free lists. 1391 // These list are reprovisioned from the global free list outside the 1392 // critical INFLATING...ST interval. A thread can transfer 1393 // multiple objectmonitors en-mass from the global free list to its local free list. 1394 // This reduces coherency traffic and lock contention on the global free list. 1395 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1396 // before or after the CAS(INFLATING) operation. 1397 // See the comments in omAlloc(). 1398 1399 if (mark->has_locker()) { 1400 ObjectMonitor * m = omAlloc(Self); 1401 // Optimistically prepare the objectmonitor - anticipate successful CAS 1402 // We do this before the CAS in order to minimize the length of time 1403 // in which INFLATING appears in the mark. 1404 m->Recycle(); 1405 m->_Responsible = NULL; 1406 m->_recursions = 0; 1407 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1408 1409 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark); 1410 if (cmp != mark) { 1411 omRelease(Self, m, true); 1412 continue; // Interference -- just retry 1413 } 1414 1415 // We've successfully installed INFLATING (0) into the mark-word. 1416 // This is the only case where 0 will appear in a mark-word. 1417 // Only the singular thread that successfully swings the mark-word 1418 // to 0 can perform (or more precisely, complete) inflation. 1419 // 1420 // Why do we CAS a 0 into the mark-word instead of just CASing the 1421 // mark-word from the stack-locked value directly to the new inflated state? 1422 // Consider what happens when a thread unlocks a stack-locked object. 1423 // It attempts to use CAS to swing the displaced header value from the 1424 // on-stack basiclock back into the object header. Recall also that the 1425 // header value (hashcode, etc) can reside in (a) the object header, or 1426 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1427 // header in an objectMonitor. The inflate() routine must copy the header 1428 // value from the basiclock on the owner's stack to the objectMonitor, all 1429 // the while preserving the hashCode stability invariants. If the owner 1430 // decides to release the lock while the value is 0, the unlock will fail 1431 // and control will eventually pass from slow_exit() to inflate. The owner 1432 // will then spin, waiting for the 0 value to disappear. Put another way, 1433 // the 0 causes the owner to stall if the owner happens to try to 1434 // drop the lock (restoring the header from the basiclock to the object) 1435 // while inflation is in-progress. This protocol avoids races that might 1436 // would otherwise permit hashCode values to change or "flicker" for an object. 1437 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1438 // 0 serves as a "BUSY" inflate-in-progress indicator. 1439 1440 1441 // fetch the displaced mark from the owner's stack. 1442 // The owner can't die or unwind past the lock while our INFLATING 1443 // object is in the mark. Furthermore the owner can't complete 1444 // an unlock on the object, either. 1445 markOop dmw = mark->displaced_mark_helper(); 1446 assert(dmw->is_neutral(), "invariant"); 1447 1448 // Setup monitor fields to proper values -- prepare the monitor 1449 m->set_header(dmw); 1450 1451 // Optimization: if the mark->locker stack address is associated 1452 // with this thread we could simply set m->_owner = Self. 1453 // Note that a thread can inflate an object 1454 // that it has stack-locked -- as might happen in wait() -- directly 1455 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1456 m->set_owner(mark->locker()); 1457 m->set_object(object); 1458 // TODO-FIXME: assert BasicLock->dhw != 0. 1459 1460 // Must preserve store ordering. The monitor state must 1461 // be stable at the time of publishing the monitor address. 1462 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1463 object->release_set_mark(markOopDesc::encode(m)); 1464 1465 // Hopefully the performance counters are allocated on distinct cache lines 1466 // to avoid false sharing on MP systems ... 1467 OM_PERFDATA_OP(Inflations, inc()); 1468 TEVENT(Inflate: overwrite stacklock); 1469 if (log_is_enabled(Debug, monitorinflation)) { 1470 if (object->is_instance()) { 1471 ResourceMark rm; 1472 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1473 p2i(object), p2i(object->mark()), 1474 object->klass()->external_name()); 1475 } 1476 } 1477 if (event.should_commit()) { 1478 post_monitor_inflate_event(event, object, cause); 1479 } 1480 return m; 1481 } 1482 1483 // CASE: neutral 1484 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1485 // If we know we're inflating for entry it's better to inflate by swinging a 1486 // pre-locked objectMonitor pointer into the object header. A successful 1487 // CAS inflates the object *and* confers ownership to the inflating thread. 1488 // In the current implementation we use a 2-step mechanism where we CAS() 1489 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1490 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1491 // would be useful. 1492 1493 assert(mark->is_neutral(), "invariant"); 1494 ObjectMonitor * m = omAlloc(Self); 1495 // prepare m for installation - set monitor to initial state 1496 m->Recycle(); 1497 m->set_header(mark); 1498 m->set_owner(NULL); 1499 m->set_object(object); 1500 m->_recursions = 0; 1501 m->_Responsible = NULL; 1502 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1503 1504 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { 1505 m->set_object(NULL); 1506 m->set_owner(NULL); 1507 m->Recycle(); 1508 omRelease(Self, m, true); 1509 m = NULL; 1510 continue; 1511 // interference - the markword changed - just retry. 1512 // The state-transitions are one-way, so there's no chance of 1513 // live-lock -- "Inflated" is an absorbing state. 1514 } 1515 1516 // Hopefully the performance counters are allocated on distinct 1517 // cache lines to avoid false sharing on MP systems ... 1518 OM_PERFDATA_OP(Inflations, inc()); 1519 TEVENT(Inflate: overwrite neutral); 1520 if (log_is_enabled(Debug, monitorinflation)) { 1521 if (object->is_instance()) { 1522 ResourceMark rm; 1523 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1524 p2i(object), p2i(object->mark()), 1525 object->klass()->external_name()); 1526 } 1527 } 1528 if (event.should_commit()) { 1529 post_monitor_inflate_event(event, object, cause); 1530 } 1531 return m; 1532 } 1533 } 1534 1535 1536 // Deflate_idle_monitors() is called at all safepoints, immediately 1537 // after all mutators are stopped, but before any objects have moved. 1538 // It traverses the list of known monitors, deflating where possible. 1539 // The scavenged monitor are returned to the monitor free list. 1540 // 1541 // Beware that we scavenge at *every* stop-the-world point. 1542 // Having a large number of monitors in-circulation negatively 1543 // impacts the performance of some applications (e.g., PointBase). 1544 // Broadly, we want to minimize the # of monitors in circulation. 1545 // 1546 // We have added a flag, MonitorInUseLists, which creates a list 1547 // of active monitors for each thread. deflate_idle_monitors() 1548 // only scans the per-thread in-use lists. omAlloc() puts all 1549 // assigned monitors on the per-thread list. deflate_idle_monitors() 1550 // returns the non-busy monitors to the global free list. 1551 // When a thread dies, omFlush() adds the list of active monitors for 1552 // that thread to a global gOmInUseList acquiring the 1553 // global list lock. deflate_idle_monitors() acquires the global 1554 // list lock to scan for non-busy monitors to the global free list. 1555 // An alternative could have used a single global in-use list. The 1556 // downside would have been the additional cost of acquiring the global list lock 1557 // for every omAlloc(). 1558 // 1559 // Perversely, the heap size -- and thus the STW safepoint rate -- 1560 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1561 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1562 // This is an unfortunate aspect of this design. 1563 1564 enum ManifestConstants { 1565 ClearResponsibleAtSTW = 0 1566 }; 1567 1568 // Deflate a single monitor if not in-use 1569 // Return true if deflated, false if in-use 1570 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1571 ObjectMonitor** freeHeadp, 1572 ObjectMonitor** freeTailp) { 1573 bool deflated; 1574 // Normal case ... The monitor is associated with obj. 1575 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1576 guarantee(mid == obj->mark()->monitor(), "invariant"); 1577 guarantee(mid->header()->is_neutral(), "invariant"); 1578 1579 if (mid->is_busy()) { 1580 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1581 deflated = false; 1582 } else { 1583 // Deflate the monitor if it is no longer being used 1584 // It's idle - scavenge and return to the global free list 1585 // plain old deflation ... 1586 TEVENT(deflate_idle_monitors - scavenge1); 1587 if (log_is_enabled(Debug, monitorinflation)) { 1588 if (obj->is_instance()) { 1589 ResourceMark rm; 1590 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1591 "mark " INTPTR_FORMAT " , type %s", 1592 p2i(obj), p2i(obj->mark()), 1593 obj->klass()->external_name()); 1594 } 1595 } 1596 1597 // Restore the header back to obj 1598 obj->release_set_mark(mid->header()); 1599 mid->clear(); 1600 1601 assert(mid->object() == NULL, "invariant"); 1602 1603 // Move the object to the working free list defined by freeHeadp, freeTailp 1604 if (*freeHeadp == NULL) *freeHeadp = mid; 1605 if (*freeTailp != NULL) { 1606 ObjectMonitor * prevtail = *freeTailp; 1607 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1608 prevtail->FreeNext = mid; 1609 } 1610 *freeTailp = mid; 1611 deflated = true; 1612 } 1613 return deflated; 1614 } 1615 1616 // Walk a given monitor list, and deflate idle monitors 1617 // The given list could be a per-thread list or a global list 1618 // Caller acquires gListLock 1619 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1620 ObjectMonitor** freeHeadp, 1621 ObjectMonitor** freeTailp) { 1622 ObjectMonitor* mid; 1623 ObjectMonitor* next; 1624 ObjectMonitor* cur_mid_in_use = NULL; 1625 int deflated_count = 0; 1626 1627 for (mid = *listHeadp; mid != NULL;) { 1628 oop obj = (oop) mid->object(); 1629 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1630 // if deflate_monitor succeeded, 1631 // extract from per-thread in-use list 1632 if (mid == *listHeadp) { 1633 *listHeadp = mid->FreeNext; 1634 } else if (cur_mid_in_use != NULL) { 1635 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1636 } 1637 next = mid->FreeNext; 1638 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1639 mid = next; 1640 deflated_count++; 1641 } else { 1642 cur_mid_in_use = mid; 1643 mid = mid->FreeNext; 1644 } 1645 } 1646 return deflated_count; 1647 } 1648 1649 void ObjectSynchronizer::deflate_idle_monitors() { 1650 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1651 int nInuse = 0; // currently associated with objects 1652 int nInCirculation = 0; // extant 1653 int nScavenged = 0; // reclaimed 1654 bool deflated = false; 1655 1656 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1657 ObjectMonitor * freeTailp = NULL; 1658 1659 TEVENT(deflate_idle_monitors); 1660 // Prevent omFlush from changing mids in Thread dtor's during deflation 1661 // And in case the vm thread is acquiring a lock during a safepoint 1662 // See e.g. 6320749 1663 Thread::muxAcquire(&gListLock, "scavenge - return"); 1664 1665 if (MonitorInUseLists) { 1666 int inUse = 0; 1667 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1668 nInCirculation+= cur->omInUseCount; 1669 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1670 cur->omInUseCount-= deflated_count; 1671 if (ObjectMonitor::Knob_VerifyInUse) { 1672 verifyInUse(cur); 1673 } 1674 nScavenged += deflated_count; 1675 nInuse += cur->omInUseCount; 1676 } 1677 1678 // For moribund threads, scan gOmInUseList 1679 if (gOmInUseList) { 1680 nInCirculation += gOmInUseCount; 1681 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1682 gOmInUseCount-= deflated_count; 1683 nScavenged += deflated_count; 1684 nInuse += gOmInUseCount; 1685 } 1686 1687 } else { 1688 PaddedEnd<ObjectMonitor> * block = 1689 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1690 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1691 // Iterate over all extant monitors - Scavenge all idle monitors. 1692 assert(block->object() == CHAINMARKER, "must be a block header"); 1693 nInCirculation += _BLOCKSIZE; 1694 for (int i = 1; i < _BLOCKSIZE; i++) { 1695 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1696 oop obj = (oop)mid->object(); 1697 1698 if (obj == NULL) { 1699 // The monitor is not associated with an object. 1700 // The monitor should either be a thread-specific private 1701 // free list or the global free list. 1702 // obj == NULL IMPLIES mid->is_busy() == 0 1703 guarantee(!mid->is_busy(), "invariant"); 1704 continue; 1705 } 1706 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1707 1708 if (deflated) { 1709 mid->FreeNext = NULL; 1710 nScavenged++; 1711 } else { 1712 nInuse++; 1713 } 1714 } 1715 } 1716 } 1717 1718 gMonitorFreeCount += nScavenged; 1719 1720 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1721 1722 if (ObjectMonitor::Knob_Verbose) { 1723 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1724 "ForceMonitorScavenge=%d : pop=%d free=%d", 1725 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1726 gMonitorPopulation, gMonitorFreeCount); 1727 tty->flush(); 1728 } 1729 1730 ForceMonitorScavenge = 0; // Reset 1731 1732 // Move the scavenged monitors back to the global free list. 1733 if (freeHeadp != NULL) { 1734 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1735 assert(freeTailp->FreeNext == NULL, "invariant"); 1736 // constant-time list splice - prepend scavenged segment to gFreeList 1737 freeTailp->FreeNext = gFreeList; 1738 gFreeList = freeHeadp; 1739 } 1740 Thread::muxRelease(&gListLock); 1741 1742 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1743 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1744 1745 // TODO: Add objectMonitor leak detection. 1746 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1747 GVars.stwRandom = os::random(); 1748 GVars.stwCycle++; 1749 } 1750 1751 // Monitor cleanup on JavaThread::exit 1752 1753 // Iterate through monitor cache and attempt to release thread's monitors 1754 // Gives up on a particular monitor if an exception occurs, but continues 1755 // the overall iteration, swallowing the exception. 1756 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1757 private: 1758 TRAPS; 1759 1760 public: 1761 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1762 void do_monitor(ObjectMonitor* mid) { 1763 if (mid->owner() == THREAD) { 1764 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1765 ResourceMark rm; 1766 Handle obj((oop) mid->object()); 1767 tty->print("INFO: unexpected locked object:"); 1768 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1769 fatal("exiting JavaThread=" INTPTR_FORMAT 1770 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1771 p2i(THREAD), p2i(mid)); 1772 } 1773 (void)mid->complete_exit(CHECK); 1774 } 1775 } 1776 }; 1777 1778 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1779 // ignored. This is meant to be called during JNI thread detach which assumes 1780 // all remaining monitors are heavyweight. All exceptions are swallowed. 1781 // Scanning the extant monitor list can be time consuming. 1782 // A simple optimization is to add a per-thread flag that indicates a thread 1783 // called jni_monitorenter() during its lifetime. 1784 // 1785 // Instead of No_Savepoint_Verifier it might be cheaper to 1786 // use an idiom of the form: 1787 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1788 // <code that must not run at safepoint> 1789 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1790 // Since the tests are extremely cheap we could leave them enabled 1791 // for normal product builds. 1792 1793 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1794 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1795 NoSafepointVerifier nsv; 1796 ReleaseJavaMonitorsClosure rjmc(THREAD); 1797 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1798 ObjectSynchronizer::monitors_iterate(&rjmc); 1799 Thread::muxRelease(&gListLock); 1800 THREAD->clear_pending_exception(); 1801 } 1802 1803 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1804 switch (cause) { 1805 case inflate_cause_vm_internal: return "VM Internal"; 1806 case inflate_cause_monitor_enter: return "Monitor Enter"; 1807 case inflate_cause_wait: return "Monitor Wait"; 1808 case inflate_cause_notify: return "Monitor Notify"; 1809 case inflate_cause_hash_code: return "Monitor Hash Code"; 1810 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1811 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1812 default: 1813 ShouldNotReachHere(); 1814 } 1815 return "Unknown"; 1816 } 1817 1818 static void post_monitor_inflate_event(EventJavaMonitorInflate& event, 1819 const oop obj, 1820 const ObjectSynchronizer::InflateCause cause) { 1821 #if INCLUDE_TRACE 1822 assert(event.should_commit(), "check outside"); 1823 event.set_monitorClass(obj->klass()); 1824 event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj); 1825 event.set_cause((u1)cause); 1826 event.commit(); 1827 #endif 1828 } 1829 1830 //------------------------------------------------------------------------------ 1831 // Debugging code 1832 1833 void ObjectSynchronizer::sanity_checks(const bool verbose, 1834 const uint cache_line_size, 1835 int *error_cnt_ptr, 1836 int *warning_cnt_ptr) { 1837 u_char *addr_begin = (u_char*)&GVars; 1838 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1839 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1840 1841 if (verbose) { 1842 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1843 sizeof(SharedGlobals)); 1844 } 1845 1846 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1847 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1848 1849 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1850 if (verbose) { 1851 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1852 } 1853 1854 if (cache_line_size != 0) { 1855 // We were able to determine the L1 data cache line size so 1856 // do some cache line specific sanity checks 1857 1858 if (offset_stwRandom < cache_line_size) { 1859 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1860 "to the struct beginning than a cache line which permits " 1861 "false sharing."); 1862 (*warning_cnt_ptr)++; 1863 } 1864 1865 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1866 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1867 "SharedGlobals.hcSequence fields are closer than a cache " 1868 "line which permits false sharing."); 1869 (*warning_cnt_ptr)++; 1870 } 1871 1872 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1873 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1874 "to the struct end than a cache line which permits false " 1875 "sharing."); 1876 (*warning_cnt_ptr)++; 1877 } 1878 } 1879 } 1880 1881 #ifndef PRODUCT 1882 1883 // Verify all monitors in the monitor cache, the verification is weak. 1884 void ObjectSynchronizer::verify() { 1885 PaddedEnd<ObjectMonitor> * block = 1886 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1887 while (block != NULL) { 1888 assert(block->object() == CHAINMARKER, "must be a block header"); 1889 for (int i = 1; i < _BLOCKSIZE; i++) { 1890 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1891 oop object = (oop)mid->object(); 1892 if (object != NULL) { 1893 mid->verify(); 1894 } 1895 } 1896 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1897 } 1898 } 1899 1900 // Check if monitor belongs to the monitor cache 1901 // The list is grow-only so it's *relatively* safe to traverse 1902 // the list of extant blocks without taking a lock. 1903 1904 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1905 PaddedEnd<ObjectMonitor> * block = 1906 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1907 while (block != NULL) { 1908 assert(block->object() == CHAINMARKER, "must be a block header"); 1909 if (monitor > (ObjectMonitor *)&block[0] && 1910 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1911 address mon = (address)monitor; 1912 address blk = (address)block; 1913 size_t diff = mon - blk; 1914 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 1915 return 1; 1916 } 1917 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1918 } 1919 return 0; 1920 } 1921 1922 #endif 1923 1924 1925 ParallelObjectSynchronizerIterator ObjectSynchronizer::parallel_iterator() { 1926 return ParallelObjectSynchronizerIterator(gBlockList); 1927 } 1928 1929 // ParallelObjectSynchronizerIterator implementation 1930 ParallelObjectSynchronizerIterator::ParallelObjectSynchronizerIterator(ObjectMonitor * head) 1931 : _head(head), _cur(head) { 1932 assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); 1933 } 1934 1935 ParallelObjectSynchronizerIterator::~ParallelObjectSynchronizerIterator() { 1936 assert(SafepointSynchronize::is_at_safepoint(), "Must at safepoint"); 1937 PaddedEnd<ObjectMonitor>* block = (PaddedEnd<ObjectMonitor>*)_head; 1938 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1939 assert(block->object() == CLAIMEDMARKER, "Must be a claimed block"); 1940 // Restore chainmarker 1941 block->set_object(CHAINMARKER); 1942 } 1943 } 1944 1945 void* ParallelObjectSynchronizerIterator::claim() { 1946 PaddedEnd<ObjectMonitor>* my_cur = (PaddedEnd<ObjectMonitor>*)_cur; 1947 PaddedEnd<ObjectMonitor>* next_block; 1948 1949 while (true) { 1950 if (my_cur == NULL) return NULL; 1951 1952 if (my_cur->object() == CHAINMARKER) { 1953 if (my_cur->cas_set_object(CLAIMEDMARKER, CHAINMARKER) == CHAINMARKER) { 1954 return (void*)my_cur; 1955 } 1956 } else { 1957 assert(my_cur->object() == CLAIMEDMARKER, "Must be"); 1958 } 1959 1960 next_block = (PaddedEnd<ObjectMonitor> *)next(my_cur); 1961 my_cur = (PaddedEnd<ObjectMonitor> *)Atomic::cmpxchg_ptr(next_block, &_cur, my_cur); 1962 } 1963 } 1964 1965 bool ParallelObjectSynchronizerIterator::parallel_oops_do(OopClosure* f) { 1966 PaddedEnd<ObjectMonitor>* block = (PaddedEnd<ObjectMonitor>*)claim(); 1967 if (block != NULL) { 1968 assert(block->object() == CLAIMEDMARKER, "Must be a claimed block"); 1969 for (int i = 1; i < ObjectSynchronizer::_BLOCKSIZE; i++) { 1970 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 1971 if (mid->object() != NULL) { 1972 f->do_oop((oop*)mid->object_addr()); 1973 } 1974 } 1975 return true; 1976 } 1977 return false; 1978 }