1 /* 2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "memory/metaspaceShared.hpp" 29 #include "memory/padded.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.inline.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/osThread.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/vframe.hpp" 45 #include "trace/traceMacros.hpp" 46 #include "trace/tracing.hpp" 47 #include "utilities/dtrace.hpp" 48 #include "utilities/events.hpp" 49 #include "utilities/preserveException.hpp" 50 51 #if defined(__GNUC__) && !defined(PPC64) 52 // Need to inhibit inlining for older versions of GCC to avoid build-time failures 53 #define NOINLINE __attribute__((noinline)) 54 #else 55 #define NOINLINE 56 #endif 57 58 // The "core" versions of monitor enter and exit reside in this file. 59 // The interpreter and compilers contain specialized transliterated 60 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 61 // for instance. If you make changes here, make sure to modify the 62 // interpreter, and both C1 and C2 fast-path inline locking code emission. 63 // 64 // ----------------------------------------------------------------------------- 65 66 #ifdef DTRACE_ENABLED 67 68 // Only bother with this argument setup if dtrace is available 69 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 70 71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 72 char* bytes = NULL; \ 73 int len = 0; \ 74 jlong jtid = SharedRuntime::get_java_tid(thread); \ 75 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 76 if (klassname != NULL) { \ 77 bytes = (char*)klassname->bytes(); \ 78 len = klassname->utf8_length(); \ 79 } 80 81 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 82 { \ 83 if (DTraceMonitorProbes) { \ 84 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 85 HOTSPOT_MONITOR_WAIT(jtid, \ 86 (uintptr_t)(monitor), bytes, len, (millis)); \ 87 } \ 88 } 89 90 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 91 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 92 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 93 94 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 95 { \ 96 if (DTraceMonitorProbes) { \ 97 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 98 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 99 (uintptr_t)(monitor), bytes, len); \ 100 } \ 101 } 102 103 #else // ndef DTRACE_ENABLED 104 105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 107 108 #endif // ndef DTRACE_ENABLED 109 110 // This exists only as a workaround of dtrace bug 6254741 111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 113 return 0; 114 } 115 116 #define NINFLATIONLOCKS 256 117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 118 119 // global list of blocks of monitors 120 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 121 // want to expose the PaddedEnd template more than necessary. 122 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; 123 // global monitor free list 124 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 125 // global monitor in-use list, for moribund threads, 126 // monitors they inflated need to be scanned for deflation 127 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 128 // count of entries in gOmInUseList 129 int ObjectSynchronizer::gOmInUseCount = 0; 130 131 static volatile intptr_t gListLock = 0; // protects global monitor lists 132 static volatile int gMonitorFreeCount = 0; // # on gFreeList 133 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 134 135 static void post_monitor_inflate_event(EventJavaMonitorInflate&, 136 const oop, 137 const ObjectSynchronizer::InflateCause); 138 139 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 140 141 142 // =====================> Quick functions 143 144 // The quick_* forms are special fast-path variants used to improve 145 // performance. In the simplest case, a "quick_*" implementation could 146 // simply return false, in which case the caller will perform the necessary 147 // state transitions and call the slow-path form. 148 // The fast-path is designed to handle frequently arising cases in an efficient 149 // manner and is just a degenerate "optimistic" variant of the slow-path. 150 // returns true -- to indicate the call was satisfied. 151 // returns false -- to indicate the call needs the services of the slow-path. 152 // A no-loitering ordinance is in effect for code in the quick_* family 153 // operators: safepoints or indefinite blocking (blocking that might span a 154 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 155 // entry. 156 // 157 // Consider: An interesting optimization is to have the JIT recognize the 158 // following common idiom: 159 // synchronized (someobj) { .... ; notify(); } 160 // That is, we find a notify() or notifyAll() call that immediately precedes 161 // the monitorexit operation. In that case the JIT could fuse the operations 162 // into a single notifyAndExit() runtime primitive. 163 164 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 165 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 166 assert(self->is_Java_thread(), "invariant"); 167 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 168 NoSafepointVerifier nsv; 169 if (obj == NULL) return false; // slow-path for invalid obj 170 const markOop mark = obj->mark(); 171 172 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 173 // Degenerate notify 174 // stack-locked by caller so by definition the implied waitset is empty. 175 return true; 176 } 177 178 if (mark->has_monitor()) { 179 ObjectMonitor * const mon = mark->monitor(); 180 assert(mon->object() == obj, "invariant"); 181 if (mon->owner() != self) return false; // slow-path for IMS exception 182 183 if (mon->first_waiter() != NULL) { 184 // We have one or more waiters. Since this is an inflated monitor 185 // that we own, we can transfer one or more threads from the waitset 186 // to the entrylist here and now, avoiding the slow-path. 187 if (all) { 188 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 189 } else { 190 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 191 } 192 int tally = 0; 193 do { 194 mon->INotify(self); 195 ++tally; 196 } while (mon->first_waiter() != NULL && all); 197 OM_PERFDATA_OP(Notifications, inc(tally)); 198 } 199 return true; 200 } 201 202 // biased locking and any other IMS exception states take the slow-path 203 return false; 204 } 205 206 207 // The LockNode emitted directly at the synchronization site would have 208 // been too big if it were to have included support for the cases of inflated 209 // recursive enter and exit, so they go here instead. 210 // Note that we can't safely call AsyncPrintJavaStack() from within 211 // quick_enter() as our thread state remains _in_Java. 212 213 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 214 BasicLock * Lock) { 215 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 216 assert(Self->is_Java_thread(), "invariant"); 217 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 218 NoSafepointVerifier nsv; 219 if (obj == NULL) return false; // Need to throw NPE 220 const markOop mark = obj->mark(); 221 222 if (mark->has_monitor()) { 223 ObjectMonitor * const m = mark->monitor(); 224 assert(m->object() == obj, "invariant"); 225 Thread * const owner = (Thread *) m->_owner; 226 227 // Lock contention and Transactional Lock Elision (TLE) diagnostics 228 // and observability 229 // Case: light contention possibly amenable to TLE 230 // Case: TLE inimical operations such as nested/recursive synchronization 231 232 if (owner == Self) { 233 m->_recursions++; 234 return true; 235 } 236 237 if (owner == NULL && 238 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 239 assert(m->_recursions == 0, "invariant"); 240 assert(m->_owner == Self, "invariant"); 241 return true; 242 } 243 } 244 245 // Note that we could inflate in quick_enter. 246 // This is likely a useful optimization 247 // Critically, in quick_enter() we must not: 248 // -- perform bias revocation, or 249 // -- block indefinitely, or 250 // -- reach a safepoint 251 252 return false; // revert to slow-path 253 } 254 255 // ----------------------------------------------------------------------------- 256 // Fast Monitor Enter/Exit 257 // This the fast monitor enter. The interpreter and compiler use 258 // some assembly copies of this code. Make sure update those code 259 // if the following function is changed. The implementation is 260 // extremely sensitive to race condition. Be careful. 261 262 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 263 bool attempt_rebias, TRAPS) { 264 if (UseBiasedLocking) { 265 if (!SafepointSynchronize::is_at_safepoint()) { 266 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 267 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 268 return; 269 } 270 } else { 271 assert(!attempt_rebias, "can not rebias toward VM thread"); 272 BiasedLocking::revoke_at_safepoint(obj); 273 } 274 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 275 } 276 277 slow_enter(obj, lock, THREAD); 278 } 279 280 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 281 assert(!object->mark()->has_bias_pattern(), "should not see bias pattern here"); 282 // if displaced header is null, the previous enter is recursive enter, no-op 283 markOop dhw = lock->displaced_header(); 284 markOop mark; 285 if (dhw == NULL) { 286 // Recursive stack-lock. 287 // Diagnostics -- Could be: stack-locked, inflating, inflated. 288 mark = object->mark(); 289 assert(!mark->is_neutral(), "invariant"); 290 if (mark->has_locker() && mark != markOopDesc::INFLATING()) { 291 assert(THREAD->is_lock_owned((address)mark->locker()), "invariant"); 292 } 293 if (mark->has_monitor()) { 294 ObjectMonitor * m = mark->monitor(); 295 assert(((oop)(m->object()))->mark() == mark, "invariant"); 296 assert(m->is_entered(THREAD), "invariant"); 297 } 298 return; 299 } 300 301 mark = object->mark(); 302 303 // If the object is stack-locked by the current thread, try to 304 // swing the displaced header from the box back to the mark. 305 if (mark == (markOop) lock) { 306 assert(dhw->is_neutral(), "invariant"); 307 if ((markOop) Atomic::cmpxchg_ptr (dhw, object->mark_addr(), mark) == mark) { 308 TEVENT(fast_exit: release stacklock); 309 return; 310 } 311 } 312 313 ObjectSynchronizer::inflate(THREAD, 314 object, 315 inflate_cause_vm_internal)->exit(true, THREAD); 316 } 317 318 // ----------------------------------------------------------------------------- 319 // Interpreter/Compiler Slow Case 320 // This routine is used to handle interpreter/compiler slow case 321 // We don't need to use fast path here, because it must have been 322 // failed in the interpreter/compiler code. 323 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 324 markOop mark = obj->mark(); 325 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 326 327 if (mark->is_neutral()) { 328 // Anticipate successful CAS -- the ST of the displaced mark must 329 // be visible <= the ST performed by the CAS. 330 lock->set_displaced_header(mark); 331 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 332 TEVENT(slow_enter: release stacklock); 333 return; 334 } 335 // Fall through to inflate() ... 336 } else if (mark->has_locker() && 337 THREAD->is_lock_owned((address)mark->locker())) { 338 assert(lock != mark->locker(), "must not re-lock the same lock"); 339 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 340 lock->set_displaced_header(NULL); 341 return; 342 } 343 344 // The object header will never be displaced to this lock, 345 // so it does not matter what the value is, except that it 346 // must be non-zero to avoid looking like a re-entrant lock, 347 // and must not look locked either. 348 lock->set_displaced_header(markOopDesc::unused_mark()); 349 ObjectSynchronizer::inflate(THREAD, 350 obj(), 351 inflate_cause_monitor_enter)->enter(THREAD); 352 } 353 354 // This routine is used to handle interpreter/compiler slow case 355 // We don't need to use fast path here, because it must have 356 // failed in the interpreter/compiler code. Simply use the heavy 357 // weight monitor should be ok, unless someone find otherwise. 358 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 359 fast_exit(object, lock, THREAD); 360 } 361 362 // ----------------------------------------------------------------------------- 363 // Class Loader support to workaround deadlocks on the class loader lock objects 364 // Also used by GC 365 // complete_exit()/reenter() are used to wait on a nested lock 366 // i.e. to give up an outer lock completely and then re-enter 367 // Used when holding nested locks - lock acquisition order: lock1 then lock2 368 // 1) complete_exit lock1 - saving recursion count 369 // 2) wait on lock2 370 // 3) when notified on lock2, unlock lock2 371 // 4) reenter lock1 with original recursion count 372 // 5) lock lock2 373 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 374 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 375 TEVENT(complete_exit); 376 if (UseBiasedLocking) { 377 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 378 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 379 } 380 381 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 382 obj(), 383 inflate_cause_vm_internal); 384 385 return monitor->complete_exit(THREAD); 386 } 387 388 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 389 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 390 TEVENT(reenter); 391 if (UseBiasedLocking) { 392 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 393 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 394 } 395 396 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 397 obj(), 398 inflate_cause_vm_internal); 399 400 monitor->reenter(recursion, THREAD); 401 } 402 // ----------------------------------------------------------------------------- 403 // JNI locks on java objects 404 // NOTE: must use heavy weight monitor to handle jni monitor enter 405 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 406 // the current locking is from JNI instead of Java code 407 TEVENT(jni_enter); 408 if (UseBiasedLocking) { 409 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 410 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 411 } 412 THREAD->set_current_pending_monitor_is_from_java(false); 413 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 414 THREAD->set_current_pending_monitor_is_from_java(true); 415 } 416 417 // NOTE: must use heavy weight monitor to handle jni monitor exit 418 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 419 TEVENT(jni_exit); 420 if (UseBiasedLocking) { 421 Handle h_obj(THREAD, obj); 422 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 423 obj = h_obj(); 424 } 425 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 426 427 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 428 obj, 429 inflate_cause_jni_exit); 430 // If this thread has locked the object, exit the monitor. Note: can't use 431 // monitor->check(CHECK); must exit even if an exception is pending. 432 if (monitor->check(THREAD)) { 433 monitor->exit(true, THREAD); 434 } 435 } 436 437 // ----------------------------------------------------------------------------- 438 // Internal VM locks on java objects 439 // standard constructor, allows locking failures 440 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 441 _dolock = doLock; 442 _thread = thread; 443 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 444 _obj = obj; 445 446 if (_dolock) { 447 TEVENT(ObjectLocker); 448 449 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 450 } 451 } 452 453 ObjectLocker::~ObjectLocker() { 454 if (_dolock) { 455 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 456 } 457 } 458 459 460 // ----------------------------------------------------------------------------- 461 // Wait/Notify/NotifyAll 462 // NOTE: must use heavy weight monitor to handle wait() 463 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 464 if (UseBiasedLocking) { 465 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 466 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 467 } 468 if (millis < 0) { 469 TEVENT(wait - throw IAX); 470 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 471 } 472 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 473 obj(), 474 inflate_cause_wait); 475 476 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 477 monitor->wait(millis, true, THREAD); 478 479 // This dummy call is in place to get around dtrace bug 6254741. Once 480 // that's fixed we can uncomment the following line, remove the call 481 // and change this function back into a "void" func. 482 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 483 return dtrace_waited_probe(monitor, obj, THREAD); 484 } 485 486 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 487 if (UseBiasedLocking) { 488 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 489 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 490 } 491 if (millis < 0) { 492 TEVENT(wait - throw IAX); 493 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 494 } 495 ObjectSynchronizer::inflate(THREAD, 496 obj(), 497 inflate_cause_wait)->wait(millis, false, THREAD); 498 } 499 500 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 501 if (UseBiasedLocking) { 502 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 503 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 504 } 505 506 markOop mark = obj->mark(); 507 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 508 return; 509 } 510 ObjectSynchronizer::inflate(THREAD, 511 obj(), 512 inflate_cause_notify)->notify(THREAD); 513 } 514 515 // NOTE: see comment of notify() 516 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 517 if (UseBiasedLocking) { 518 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 519 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 520 } 521 522 markOop mark = obj->mark(); 523 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 524 return; 525 } 526 ObjectSynchronizer::inflate(THREAD, 527 obj(), 528 inflate_cause_notify)->notifyAll(THREAD); 529 } 530 531 // ----------------------------------------------------------------------------- 532 // Hash Code handling 533 // 534 // Performance concern: 535 // OrderAccess::storestore() calls release() which at one time stored 0 536 // into the global volatile OrderAccess::dummy variable. This store was 537 // unnecessary for correctness. Many threads storing into a common location 538 // causes considerable cache migration or "sloshing" on large SMP systems. 539 // As such, I avoided using OrderAccess::storestore(). In some cases 540 // OrderAccess::fence() -- which incurs local latency on the executing 541 // processor -- is a better choice as it scales on SMP systems. 542 // 543 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 544 // a discussion of coherency costs. Note that all our current reference 545 // platforms provide strong ST-ST order, so the issue is moot on IA32, 546 // x64, and SPARC. 547 // 548 // As a general policy we use "volatile" to control compiler-based reordering 549 // and explicit fences (barriers) to control for architectural reordering 550 // performed by the CPU(s) or platform. 551 552 struct SharedGlobals { 553 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 554 // These are highly shared mostly-read variables. 555 // To avoid false-sharing they need to be the sole occupants of a cache line. 556 volatile int stwRandom; 557 volatile int stwCycle; 558 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 559 // Hot RW variable -- Sequester to avoid false-sharing 560 volatile int hcSequence; 561 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 562 }; 563 564 static SharedGlobals GVars; 565 static int MonitorScavengeThreshold = 1000000; 566 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 567 568 static markOop ReadStableMark(oop obj) { 569 markOop mark = obj->mark(); 570 if (!mark->is_being_inflated()) { 571 return mark; // normal fast-path return 572 } 573 574 int its = 0; 575 for (;;) { 576 markOop mark = obj->mark(); 577 if (!mark->is_being_inflated()) { 578 return mark; // normal fast-path return 579 } 580 581 // The object is being inflated by some other thread. 582 // The caller of ReadStableMark() must wait for inflation to complete. 583 // Avoid live-lock 584 // TODO: consider calling SafepointSynchronize::do_call_back() while 585 // spinning to see if there's a safepoint pending. If so, immediately 586 // yielding or blocking would be appropriate. Avoid spinning while 587 // there is a safepoint pending. 588 // TODO: add inflation contention performance counters. 589 // TODO: restrict the aggregate number of spinners. 590 591 ++its; 592 if (its > 10000 || !os::is_MP()) { 593 if (its & 1) { 594 os::naked_yield(); 595 TEVENT(Inflate: INFLATING - yield); 596 } else { 597 // Note that the following code attenuates the livelock problem but is not 598 // a complete remedy. A more complete solution would require that the inflating 599 // thread hold the associated inflation lock. The following code simply restricts 600 // the number of spinners to at most one. We'll have N-2 threads blocked 601 // on the inflationlock, 1 thread holding the inflation lock and using 602 // a yield/park strategy, and 1 thread in the midst of inflation. 603 // A more refined approach would be to change the encoding of INFLATING 604 // to allow encapsulation of a native thread pointer. Threads waiting for 605 // inflation to complete would use CAS to push themselves onto a singly linked 606 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 607 // and calling park(). When inflation was complete the thread that accomplished inflation 608 // would detach the list and set the markword to inflated with a single CAS and 609 // then for each thread on the list, set the flag and unpark() the thread. 610 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 611 // wakes at most one thread whereas we need to wake the entire list. 612 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 613 int YieldThenBlock = 0; 614 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 615 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 616 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 617 while (obj->mark() == markOopDesc::INFLATING()) { 618 // Beware: NakedYield() is advisory and has almost no effect on some platforms 619 // so we periodically call Self->_ParkEvent->park(1). 620 // We use a mixed spin/yield/block mechanism. 621 if ((YieldThenBlock++) >= 16) { 622 Thread::current()->_ParkEvent->park(1); 623 } else { 624 os::naked_yield(); 625 } 626 } 627 Thread::muxRelease(gInflationLocks + ix); 628 TEVENT(Inflate: INFLATING - yield/park); 629 } 630 } else { 631 SpinPause(); // SMP-polite spinning 632 } 633 } 634 } 635 636 // hashCode() generation : 637 // 638 // Possibilities: 639 // * MD5Digest of {obj,stwRandom} 640 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 641 // * A DES- or AES-style SBox[] mechanism 642 // * One of the Phi-based schemes, such as: 643 // 2654435761 = 2^32 * Phi (golden ratio) 644 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 645 // * A variation of Marsaglia's shift-xor RNG scheme. 646 // * (obj ^ stwRandom) is appealing, but can result 647 // in undesirable regularity in the hashCode values of adjacent objects 648 // (objects allocated back-to-back, in particular). This could potentially 649 // result in hashtable collisions and reduced hashtable efficiency. 650 // There are simple ways to "diffuse" the middle address bits over the 651 // generated hashCode values: 652 653 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 654 intptr_t value = 0; 655 if (hashCode == 0) { 656 // This form uses an unguarded global Park-Miller RNG, 657 // so it's possible for two threads to race and generate the same RNG. 658 // On MP system we'll have lots of RW access to a global, so the 659 // mechanism induces lots of coherency traffic. 660 value = os::random(); 661 } else if (hashCode == 1) { 662 // This variation has the property of being stable (idempotent) 663 // between STW operations. This can be useful in some of the 1-0 664 // synchronization schemes. 665 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 666 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 667 } else if (hashCode == 2) { 668 value = 1; // for sensitivity testing 669 } else if (hashCode == 3) { 670 value = ++GVars.hcSequence; 671 } else if (hashCode == 4) { 672 value = cast_from_oop<intptr_t>(obj); 673 } else { 674 // Marsaglia's xor-shift scheme with thread-specific state 675 // This is probably the best overall implementation -- we'll 676 // likely make this the default in future releases. 677 unsigned t = Self->_hashStateX; 678 t ^= (t << 11); 679 Self->_hashStateX = Self->_hashStateY; 680 Self->_hashStateY = Self->_hashStateZ; 681 Self->_hashStateZ = Self->_hashStateW; 682 unsigned v = Self->_hashStateW; 683 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 684 Self->_hashStateW = v; 685 value = v; 686 } 687 688 value &= markOopDesc::hash_mask; 689 if (value == 0) value = 0xBAD; 690 assert(value != markOopDesc::no_hash, "invariant"); 691 TEVENT(hashCode: GENERATE); 692 return value; 693 } 694 695 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 696 if (UseBiasedLocking) { 697 // NOTE: many places throughout the JVM do not expect a safepoint 698 // to be taken here, in particular most operations on perm gen 699 // objects. However, we only ever bias Java instances and all of 700 // the call sites of identity_hash that might revoke biases have 701 // been checked to make sure they can handle a safepoint. The 702 // added check of the bias pattern is to avoid useless calls to 703 // thread-local storage. 704 if (obj->mark()->has_bias_pattern()) { 705 // Handle for oop obj in case of STW safepoint 706 Handle hobj(Self, obj); 707 // Relaxing assertion for bug 6320749. 708 assert(Universe::verify_in_progress() || 709 !SafepointSynchronize::is_at_safepoint(), 710 "biases should not be seen by VM thread here"); 711 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 712 obj = hobj(); 713 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 714 } 715 } 716 717 // hashCode() is a heap mutator ... 718 // Relaxing assertion for bug 6320749. 719 assert(Universe::verify_in_progress() || DumpSharedSpaces || 720 !SafepointSynchronize::is_at_safepoint(), "invariant"); 721 assert(Universe::verify_in_progress() || DumpSharedSpaces || 722 Self->is_Java_thread() , "invariant"); 723 assert(Universe::verify_in_progress() || DumpSharedSpaces || 724 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 725 726 ObjectMonitor* monitor = NULL; 727 markOop temp, test; 728 intptr_t hash; 729 markOop mark = ReadStableMark(obj); 730 731 // object should remain ineligible for biased locking 732 assert(!mark->has_bias_pattern(), "invariant"); 733 734 if (mark->is_neutral()) { 735 hash = mark->hash(); // this is a normal header 736 if (hash) { // if it has hash, just return it 737 return hash; 738 } 739 hash = get_next_hash(Self, obj); // allocate a new hash code 740 temp = mark->copy_set_hash(hash); // merge the hash code into header 741 // use (machine word version) atomic operation to install the hash 742 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 743 if (test == mark) { 744 return hash; 745 } 746 // If atomic operation failed, we must inflate the header 747 // into heavy weight monitor. We could add more code here 748 // for fast path, but it does not worth the complexity. 749 } else if (mark->has_monitor()) { 750 monitor = mark->monitor(); 751 temp = monitor->header(); 752 assert(temp->is_neutral(), "invariant"); 753 hash = temp->hash(); 754 if (hash) { 755 return hash; 756 } 757 // Skip to the following code to reduce code size 758 } else if (Self->is_lock_owned((address)mark->locker())) { 759 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 760 assert(temp->is_neutral(), "invariant"); 761 hash = temp->hash(); // by current thread, check if the displaced 762 if (hash) { // header contains hash code 763 return hash; 764 } 765 // WARNING: 766 // The displaced header is strictly immutable. 767 // It can NOT be changed in ANY cases. So we have 768 // to inflate the header into heavyweight monitor 769 // even the current thread owns the lock. The reason 770 // is the BasicLock (stack slot) will be asynchronously 771 // read by other threads during the inflate() function. 772 // Any change to stack may not propagate to other threads 773 // correctly. 774 } 775 776 // Inflate the monitor to set hash code 777 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); 778 // Load displaced header and check it has hash code 779 mark = monitor->header(); 780 assert(mark->is_neutral(), "invariant"); 781 hash = mark->hash(); 782 if (hash == 0) { 783 hash = get_next_hash(Self, obj); 784 temp = mark->copy_set_hash(hash); // merge hash code into header 785 assert(temp->is_neutral(), "invariant"); 786 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 787 if (test != mark) { 788 // The only update to the header in the monitor (outside GC) 789 // is install the hash code. If someone add new usage of 790 // displaced header, please update this code 791 hash = test->hash(); 792 assert(test->is_neutral(), "invariant"); 793 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 794 } 795 } 796 // We finally get the hash 797 return hash; 798 } 799 800 // Deprecated -- use FastHashCode() instead. 801 802 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 803 return FastHashCode(Thread::current(), obj()); 804 } 805 806 807 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 808 Handle h_obj) { 809 if (UseBiasedLocking) { 810 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 811 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 812 } 813 814 assert(thread == JavaThread::current(), "Can only be called on current thread"); 815 oop obj = h_obj(); 816 817 markOop mark = ReadStableMark(obj); 818 819 // Uncontended case, header points to stack 820 if (mark->has_locker()) { 821 return thread->is_lock_owned((address)mark->locker()); 822 } 823 // Contended case, header points to ObjectMonitor (tagged pointer) 824 if (mark->has_monitor()) { 825 ObjectMonitor* monitor = mark->monitor(); 826 return monitor->is_entered(thread) != 0; 827 } 828 // Unlocked case, header in place 829 assert(mark->is_neutral(), "sanity check"); 830 return false; 831 } 832 833 // Be aware of this method could revoke bias of the lock object. 834 // This method queries the ownership of the lock handle specified by 'h_obj'. 835 // If the current thread owns the lock, it returns owner_self. If no 836 // thread owns the lock, it returns owner_none. Otherwise, it will return 837 // owner_other. 838 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 839 (JavaThread *self, Handle h_obj) { 840 // The caller must beware this method can revoke bias, and 841 // revocation can result in a safepoint. 842 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 843 assert(self->thread_state() != _thread_blocked, "invariant"); 844 845 // Possible mark states: neutral, biased, stack-locked, inflated 846 847 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 848 // CASE: biased 849 BiasedLocking::revoke_and_rebias(h_obj, false, self); 850 assert(!h_obj->mark()->has_bias_pattern(), 851 "biases should be revoked by now"); 852 } 853 854 assert(self == JavaThread::current(), "Can only be called on current thread"); 855 oop obj = h_obj(); 856 markOop mark = ReadStableMark(obj); 857 858 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 859 if (mark->has_locker()) { 860 return self->is_lock_owned((address)mark->locker()) ? 861 owner_self : owner_other; 862 } 863 864 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 865 // The Object:ObjectMonitor relationship is stable as long as we're 866 // not at a safepoint. 867 if (mark->has_monitor()) { 868 void * owner = mark->monitor()->_owner; 869 if (owner == NULL) return owner_none; 870 return (owner == self || 871 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 872 } 873 874 // CASE: neutral 875 assert(mark->is_neutral(), "sanity check"); 876 return owner_none; // it's unlocked 877 } 878 879 // FIXME: jvmti should call this 880 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 881 if (UseBiasedLocking) { 882 if (SafepointSynchronize::is_at_safepoint()) { 883 BiasedLocking::revoke_at_safepoint(h_obj); 884 } else { 885 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 886 } 887 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 888 } 889 890 oop obj = h_obj(); 891 address owner = NULL; 892 893 markOop mark = ReadStableMark(obj); 894 895 // Uncontended case, header points to stack 896 if (mark->has_locker()) { 897 owner = (address) mark->locker(); 898 } 899 900 // Contended case, header points to ObjectMonitor (tagged pointer) 901 if (mark->has_monitor()) { 902 ObjectMonitor* monitor = mark->monitor(); 903 assert(monitor != NULL, "monitor should be non-null"); 904 owner = (address) monitor->owner(); 905 } 906 907 if (owner != NULL) { 908 // owning_thread_from_monitor_owner() may also return NULL here 909 return Threads::owning_thread_from_monitor_owner(owner, doLock); 910 } 911 912 // Unlocked case, header in place 913 // Cannot have assertion since this object may have been 914 // locked by another thread when reaching here. 915 // assert(mark->is_neutral(), "sanity check"); 916 917 return NULL; 918 } 919 920 // Visitors ... 921 922 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 923 PaddedEnd<ObjectMonitor> * block = 924 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 925 while (block != NULL) { 926 assert(block->object() == CHAINMARKER, "must be a block header"); 927 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 928 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 929 oop object = (oop)mid->object(); 930 if (object != NULL) { 931 closure->do_monitor(mid); 932 } 933 } 934 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 935 } 936 } 937 938 // Get the next block in the block list. 939 static inline ObjectMonitor* next(ObjectMonitor* block) { 940 assert(block->object() == CHAINMARKER, "must be a block header"); 941 block = block->FreeNext; 942 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 943 return block; 944 } 945 946 947 void ObjectSynchronizer::oops_do(OopClosure* f) { 948 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 949 PaddedEnd<ObjectMonitor> * block = 950 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 951 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 952 assert(block->object() == CHAINMARKER, "must be a block header"); 953 for (int i = 1; i < _BLOCKSIZE; i++) { 954 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 955 if (mid->object() != NULL) { 956 f->do_oop((oop*)mid->object_addr()); 957 } 958 } 959 } 960 } 961 962 963 // ----------------------------------------------------------------------------- 964 // ObjectMonitor Lifecycle 965 // ----------------------- 966 // Inflation unlinks monitors from the global gFreeList and 967 // associates them with objects. Deflation -- which occurs at 968 // STW-time -- disassociates idle monitors from objects. Such 969 // scavenged monitors are returned to the gFreeList. 970 // 971 // The global list is protected by gListLock. All the critical sections 972 // are short and operate in constant-time. 973 // 974 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 975 // 976 // Lifecycle: 977 // -- unassigned and on the global free list 978 // -- unassigned and on a thread's private omFreeList 979 // -- assigned to an object. The object is inflated and the mark refers 980 // to the objectmonitor. 981 982 983 // Constraining monitor pool growth via MonitorBound ... 984 // 985 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 986 // the rate of scavenging is driven primarily by GC. As such, we can find 987 // an inordinate number of monitors in circulation. 988 // To avoid that scenario we can artificially induce a STW safepoint 989 // if the pool appears to be growing past some reasonable bound. 990 // Generally we favor time in space-time tradeoffs, but as there's no 991 // natural back-pressure on the # of extant monitors we need to impose some 992 // type of limit. Beware that if MonitorBound is set to too low a value 993 // we could just loop. In addition, if MonitorBound is set to a low value 994 // we'll incur more safepoints, which are harmful to performance. 995 // See also: GuaranteedSafepointInterval 996 // 997 // The current implementation uses asynchronous VM operations. 998 999 static void InduceScavenge(Thread * Self, const char * Whence) { 1000 // Induce STW safepoint to trim monitors 1001 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1002 // More precisely, trigger an asynchronous STW safepoint as the number 1003 // of active monitors passes the specified threshold. 1004 // TODO: assert thread state is reasonable 1005 1006 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1007 if (ObjectMonitor::Knob_Verbose) { 1008 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 1009 Whence, ForceMonitorScavenge) ; 1010 tty->flush(); 1011 } 1012 // Induce a 'null' safepoint to scavenge monitors 1013 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1014 // to the VMthread and have a lifespan longer than that of this activation record. 1015 // The VMThread will delete the op when completed. 1016 VMThread::execute(new VM_ForceAsyncSafepoint()); 1017 1018 if (ObjectMonitor::Knob_Verbose) { 1019 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 1020 Whence, ForceMonitorScavenge) ; 1021 tty->flush(); 1022 } 1023 } 1024 } 1025 1026 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1027 ObjectMonitor* mid; 1028 int in_use_tally = 0; 1029 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1030 in_use_tally++; 1031 } 1032 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1033 1034 int free_tally = 0; 1035 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1036 free_tally++; 1037 } 1038 assert(free_tally == Self->omFreeCount, "free count off"); 1039 } 1040 1041 ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc(Thread * Self) { 1042 // A large MAXPRIVATE value reduces both list lock contention 1043 // and list coherency traffic, but also tends to increase the 1044 // number of objectMonitors in circulation as well as the STW 1045 // scavenge costs. As usual, we lean toward time in space-time 1046 // tradeoffs. 1047 const int MAXPRIVATE = 1024; 1048 for (;;) { 1049 ObjectMonitor * m; 1050 1051 // 1: try to allocate from the thread's local omFreeList. 1052 // Threads will attempt to allocate first from their local list, then 1053 // from the global list, and only after those attempts fail will the thread 1054 // attempt to instantiate new monitors. Thread-local free lists take 1055 // heat off the gListLock and improve allocation latency, as well as reducing 1056 // coherency traffic on the shared global list. 1057 m = Self->omFreeList; 1058 if (m != NULL) { 1059 Self->omFreeList = m->FreeNext; 1060 Self->omFreeCount--; 1061 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1062 guarantee(m->object() == NULL, "invariant"); 1063 if (MonitorInUseLists) { 1064 m->FreeNext = Self->omInUseList; 1065 Self->omInUseList = m; 1066 Self->omInUseCount++; 1067 if (ObjectMonitor::Knob_VerifyInUse) { 1068 verifyInUse(Self); 1069 } 1070 } else { 1071 m->FreeNext = NULL; 1072 } 1073 return m; 1074 } 1075 1076 // 2: try to allocate from the global gFreeList 1077 // CONSIDER: use muxTry() instead of muxAcquire(). 1078 // If the muxTry() fails then drop immediately into case 3. 1079 // If we're using thread-local free lists then try 1080 // to reprovision the caller's free list. 1081 if (gFreeList != NULL) { 1082 // Reprovision the thread's omFreeList. 1083 // Use bulk transfers to reduce the allocation rate and heat 1084 // on various locks. 1085 Thread::muxAcquire(&gListLock, "omAlloc"); 1086 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1087 gMonitorFreeCount--; 1088 ObjectMonitor * take = gFreeList; 1089 gFreeList = take->FreeNext; 1090 guarantee(take->object() == NULL, "invariant"); 1091 guarantee(!take->is_busy(), "invariant"); 1092 take->Recycle(); 1093 omRelease(Self, take, false); 1094 } 1095 Thread::muxRelease(&gListLock); 1096 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1097 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1098 TEVENT(omFirst - reprovision); 1099 1100 const int mx = MonitorBound; 1101 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1102 // We can't safely induce a STW safepoint from omAlloc() as our thread 1103 // state may not be appropriate for such activities and callers may hold 1104 // naked oops, so instead we defer the action. 1105 InduceScavenge(Self, "omAlloc"); 1106 } 1107 continue; 1108 } 1109 1110 // 3: allocate a block of new ObjectMonitors 1111 // Both the local and global free lists are empty -- resort to malloc(). 1112 // In the current implementation objectMonitors are TSM - immortal. 1113 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1114 // each ObjectMonitor to start at the beginning of a cache line, 1115 // so we use align_size_up(). 1116 // A better solution would be to use C++ placement-new. 1117 // BEWARE: As it stands currently, we don't run the ctors! 1118 assert(_BLOCKSIZE > 1, "invariant"); 1119 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1120 PaddedEnd<ObjectMonitor> * temp; 1121 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1122 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1123 mtInternal); 1124 temp = (PaddedEnd<ObjectMonitor> *) 1125 align_size_up((intptr_t)real_malloc_addr, 1126 DEFAULT_CACHE_LINE_SIZE); 1127 1128 // NOTE: (almost) no way to recover if allocation failed. 1129 // We might be able to induce a STW safepoint and scavenge enough 1130 // objectMonitors to permit progress. 1131 if (temp == NULL) { 1132 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1133 "Allocate ObjectMonitors"); 1134 } 1135 (void)memset((void *) temp, 0, neededsize); 1136 1137 // Format the block. 1138 // initialize the linked list, each monitor points to its next 1139 // forming the single linked free list, the very first monitor 1140 // will points to next block, which forms the block list. 1141 // The trick of using the 1st element in the block as gBlockList 1142 // linkage should be reconsidered. A better implementation would 1143 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1144 1145 for (int i = 1; i < _BLOCKSIZE; i++) { 1146 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1147 } 1148 1149 // terminate the last monitor as the end of list 1150 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1151 1152 // Element [0] is reserved for global list linkage 1153 temp[0].set_object(CHAINMARKER); 1154 1155 // Consider carving out this thread's current request from the 1156 // block in hand. This avoids some lock traffic and redundant 1157 // list activity. 1158 1159 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1160 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1161 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1162 gMonitorPopulation += _BLOCKSIZE-1; 1163 gMonitorFreeCount += _BLOCKSIZE-1; 1164 1165 // Add the new block to the list of extant blocks (gBlockList). 1166 // The very first objectMonitor in a block is reserved and dedicated. 1167 // It serves as blocklist "next" linkage. 1168 temp[0].FreeNext = gBlockList; 1169 // There are lock-free uses of gBlockList so make sure that 1170 // the previous stores happen before we update gBlockList. 1171 OrderAccess::release_store_ptr(&gBlockList, temp); 1172 1173 // Add the new string of objectMonitors to the global free list 1174 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1175 gFreeList = temp + 1; 1176 Thread::muxRelease(&gListLock); 1177 TEVENT(Allocate block of monitors); 1178 } 1179 } 1180 1181 // Place "m" on the caller's private per-thread omFreeList. 1182 // In practice there's no need to clamp or limit the number of 1183 // monitors on a thread's omFreeList as the only time we'll call 1184 // omRelease is to return a monitor to the free list after a CAS 1185 // attempt failed. This doesn't allow unbounded #s of monitors to 1186 // accumulate on a thread's free list. 1187 // 1188 // Key constraint: all ObjectMonitors on a thread's free list and the global 1189 // free list must have their object field set to null. This prevents the 1190 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1191 1192 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1193 bool fromPerThreadAlloc) { 1194 guarantee(m->object() == NULL, "invariant"); 1195 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1196 // Remove from omInUseList 1197 if (MonitorInUseLists && fromPerThreadAlloc) { 1198 ObjectMonitor* cur_mid_in_use = NULL; 1199 bool extracted = false; 1200 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1201 if (m == mid) { 1202 // extract from per-thread in-use list 1203 if (mid == Self->omInUseList) { 1204 Self->omInUseList = mid->FreeNext; 1205 } else if (cur_mid_in_use != NULL) { 1206 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1207 } 1208 extracted = true; 1209 Self->omInUseCount--; 1210 if (ObjectMonitor::Knob_VerifyInUse) { 1211 verifyInUse(Self); 1212 } 1213 break; 1214 } 1215 } 1216 assert(extracted, "Should have extracted from in-use list"); 1217 } 1218 1219 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1220 m->FreeNext = Self->omFreeList; 1221 Self->omFreeList = m; 1222 Self->omFreeCount++; 1223 } 1224 1225 // Return the monitors of a moribund thread's local free list to 1226 // the global free list. Typically a thread calls omFlush() when 1227 // it's dying. We could also consider having the VM thread steal 1228 // monitors from threads that have not run java code over a few 1229 // consecutive STW safepoints. Relatedly, we might decay 1230 // omFreeProvision at STW safepoints. 1231 // 1232 // Also return the monitors of a moribund thread's omInUseList to 1233 // a global gOmInUseList under the global list lock so these 1234 // will continue to be scanned. 1235 // 1236 // We currently call omFlush() from the Thread:: dtor _after the thread 1237 // has been excised from the thread list and is no longer a mutator. 1238 // That means that omFlush() can run concurrently with a safepoint and 1239 // the scavenge operator. Calling omFlush() from JavaThread::exit() might 1240 // be a better choice as we could safely reason that that the JVM is 1241 // not at a safepoint at the time of the call, and thus there could 1242 // be not inopportune interleavings between omFlush() and the scavenge 1243 // operator. 1244 1245 void ObjectSynchronizer::omFlush(Thread * Self) { 1246 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1247 Self->omFreeList = NULL; 1248 ObjectMonitor * tail = NULL; 1249 int tally = 0; 1250 if (list != NULL) { 1251 ObjectMonitor * s; 1252 // The thread is going away, the per-thread free monitors 1253 // are freed via set_owner(NULL) 1254 // Link them to tail, which will be linked into the global free list 1255 // gFreeList below, under the gListLock 1256 for (s = list; s != NULL; s = s->FreeNext) { 1257 tally++; 1258 tail = s; 1259 guarantee(s->object() == NULL, "invariant"); 1260 guarantee(!s->is_busy(), "invariant"); 1261 s->set_owner(NULL); // redundant but good hygiene 1262 TEVENT(omFlush - Move one); 1263 } 1264 guarantee(tail != NULL && list != NULL, "invariant"); 1265 } 1266 1267 ObjectMonitor * inUseList = Self->omInUseList; 1268 ObjectMonitor * inUseTail = NULL; 1269 int inUseTally = 0; 1270 if (inUseList != NULL) { 1271 Self->omInUseList = NULL; 1272 ObjectMonitor *cur_om; 1273 // The thread is going away, however the omInUseList inflated 1274 // monitors may still be in-use by other threads. 1275 // Link them to inUseTail, which will be linked into the global in-use list 1276 // gOmInUseList below, under the gListLock 1277 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1278 inUseTail = cur_om; 1279 inUseTally++; 1280 } 1281 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1282 Self->omInUseCount = 0; 1283 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1284 } 1285 1286 Thread::muxAcquire(&gListLock, "omFlush"); 1287 if (tail != NULL) { 1288 tail->FreeNext = gFreeList; 1289 gFreeList = list; 1290 gMonitorFreeCount += tally; 1291 } 1292 1293 if (inUseTail != NULL) { 1294 inUseTail->FreeNext = gOmInUseList; 1295 gOmInUseList = inUseList; 1296 gOmInUseCount += inUseTally; 1297 } 1298 1299 Thread::muxRelease(&gListLock); 1300 TEVENT(omFlush); 1301 } 1302 1303 // Fast path code shared by multiple functions 1304 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1305 markOop mark = obj->mark(); 1306 if (mark->has_monitor()) { 1307 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1308 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1309 return mark->monitor(); 1310 } 1311 return ObjectSynchronizer::inflate(Thread::current(), 1312 obj, 1313 inflate_cause_vm_internal); 1314 } 1315 1316 ObjectMonitor * NOINLINE ObjectSynchronizer::inflate(Thread * Self, 1317 oop object, 1318 const InflateCause cause) { 1319 1320 // Inflate mutates the heap ... 1321 // Relaxing assertion for bug 6320749. 1322 assert(Universe::verify_in_progress() || 1323 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1324 1325 EventJavaMonitorInflate event; 1326 1327 for (;;) { 1328 const markOop mark = object->mark(); 1329 assert(!mark->has_bias_pattern(), "invariant"); 1330 1331 // The mark can be in one of the following states: 1332 // * Inflated - just return 1333 // * Stack-locked - coerce it to inflated 1334 // * INFLATING - busy wait for conversion to complete 1335 // * Neutral - aggressively inflate the object. 1336 // * BIASED - Illegal. We should never see this 1337 1338 // CASE: inflated 1339 if (mark->has_monitor()) { 1340 ObjectMonitor * inf = mark->monitor(); 1341 assert(inf->header()->is_neutral(), "invariant"); 1342 assert(inf->object() == object, "invariant"); 1343 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1344 event.cancel(); // let's not post an inflation event, unless we did the deed ourselves 1345 return inf; 1346 } 1347 1348 // CASE: inflation in progress - inflating over a stack-lock. 1349 // Some other thread is converting from stack-locked to inflated. 1350 // Only that thread can complete inflation -- other threads must wait. 1351 // The INFLATING value is transient. 1352 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1353 // We could always eliminate polling by parking the thread on some auxiliary list. 1354 if (mark == markOopDesc::INFLATING()) { 1355 TEVENT(Inflate: spin while INFLATING); 1356 ReadStableMark(object); 1357 continue; 1358 } 1359 1360 // CASE: stack-locked 1361 // Could be stack-locked either by this thread or by some other thread. 1362 // 1363 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1364 // to install INFLATING into the mark word. We originally installed INFLATING, 1365 // allocated the objectmonitor, and then finally STed the address of the 1366 // objectmonitor into the mark. This was correct, but artificially lengthened 1367 // the interval in which INFLATED appeared in the mark, thus increasing 1368 // the odds of inflation contention. 1369 // 1370 // We now use per-thread private objectmonitor free lists. 1371 // These list are reprovisioned from the global free list outside the 1372 // critical INFLATING...ST interval. A thread can transfer 1373 // multiple objectmonitors en-mass from the global free list to its local free list. 1374 // This reduces coherency traffic and lock contention on the global free list. 1375 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1376 // before or after the CAS(INFLATING) operation. 1377 // See the comments in omAlloc(). 1378 1379 if (mark->has_locker()) { 1380 ObjectMonitor * m = omAlloc(Self); 1381 // Optimistically prepare the objectmonitor - anticipate successful CAS 1382 // We do this before the CAS in order to minimize the length of time 1383 // in which INFLATING appears in the mark. 1384 m->Recycle(); 1385 m->_Responsible = NULL; 1386 m->_recursions = 0; 1387 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1388 1389 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1390 if (cmp != mark) { 1391 omRelease(Self, m, true); 1392 continue; // Interference -- just retry 1393 } 1394 1395 // We've successfully installed INFLATING (0) into the mark-word. 1396 // This is the only case where 0 will appear in a mark-word. 1397 // Only the singular thread that successfully swings the mark-word 1398 // to 0 can perform (or more precisely, complete) inflation. 1399 // 1400 // Why do we CAS a 0 into the mark-word instead of just CASing the 1401 // mark-word from the stack-locked value directly to the new inflated state? 1402 // Consider what happens when a thread unlocks a stack-locked object. 1403 // It attempts to use CAS to swing the displaced header value from the 1404 // on-stack basiclock back into the object header. Recall also that the 1405 // header value (hashcode, etc) can reside in (a) the object header, or 1406 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1407 // header in an objectMonitor. The inflate() routine must copy the header 1408 // value from the basiclock on the owner's stack to the objectMonitor, all 1409 // the while preserving the hashCode stability invariants. If the owner 1410 // decides to release the lock while the value is 0, the unlock will fail 1411 // and control will eventually pass from slow_exit() to inflate. The owner 1412 // will then spin, waiting for the 0 value to disappear. Put another way, 1413 // the 0 causes the owner to stall if the owner happens to try to 1414 // drop the lock (restoring the header from the basiclock to the object) 1415 // while inflation is in-progress. This protocol avoids races that might 1416 // would otherwise permit hashCode values to change or "flicker" for an object. 1417 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1418 // 0 serves as a "BUSY" inflate-in-progress indicator. 1419 1420 1421 // fetch the displaced mark from the owner's stack. 1422 // The owner can't die or unwind past the lock while our INFLATING 1423 // object is in the mark. Furthermore the owner can't complete 1424 // an unlock on the object, either. 1425 markOop dmw = mark->displaced_mark_helper(); 1426 assert(dmw->is_neutral(), "invariant"); 1427 1428 // Setup monitor fields to proper values -- prepare the monitor 1429 m->set_header(dmw); 1430 1431 // Optimization: if the mark->locker stack address is associated 1432 // with this thread we could simply set m->_owner = Self. 1433 // Note that a thread can inflate an object 1434 // that it has stack-locked -- as might happen in wait() -- directly 1435 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1436 m->set_owner(mark->locker()); 1437 m->set_object(object); 1438 // TODO-FIXME: assert BasicLock->dhw != 0. 1439 1440 // Must preserve store ordering. The monitor state must 1441 // be stable at the time of publishing the monitor address. 1442 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1443 object->release_set_mark(markOopDesc::encode(m)); 1444 1445 // Hopefully the performance counters are allocated on distinct cache lines 1446 // to avoid false sharing on MP systems ... 1447 OM_PERFDATA_OP(Inflations, inc()); 1448 TEVENT(Inflate: overwrite stacklock); 1449 if (log_is_enabled(Debug, monitorinflation)) { 1450 if (object->is_instance()) { 1451 ResourceMark rm; 1452 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1453 p2i(object), p2i(object->mark()), 1454 object->klass()->external_name()); 1455 } 1456 } 1457 if (event.should_commit()) { 1458 post_monitor_inflate_event(event, object, cause); 1459 } 1460 return m; 1461 } 1462 1463 // CASE: neutral 1464 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1465 // If we know we're inflating for entry it's better to inflate by swinging a 1466 // pre-locked objectMonitor pointer into the object header. A successful 1467 // CAS inflates the object *and* confers ownership to the inflating thread. 1468 // In the current implementation we use a 2-step mechanism where we CAS() 1469 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1470 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1471 // would be useful. 1472 1473 assert(mark->is_neutral(), "invariant"); 1474 ObjectMonitor * m = omAlloc(Self); 1475 // prepare m for installation - set monitor to initial state 1476 m->Recycle(); 1477 m->set_header(mark); 1478 m->set_owner(NULL); 1479 m->set_object(object); 1480 m->_recursions = 0; 1481 m->_Responsible = NULL; 1482 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1483 1484 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1485 m->set_object(NULL); 1486 m->set_owner(NULL); 1487 m->Recycle(); 1488 omRelease(Self, m, true); 1489 m = NULL; 1490 continue; 1491 // interference - the markword changed - just retry. 1492 // The state-transitions are one-way, so there's no chance of 1493 // live-lock -- "Inflated" is an absorbing state. 1494 } 1495 1496 // Hopefully the performance counters are allocated on distinct 1497 // cache lines to avoid false sharing on MP systems ... 1498 OM_PERFDATA_OP(Inflations, inc()); 1499 TEVENT(Inflate: overwrite neutral); 1500 if (log_is_enabled(Debug, monitorinflation)) { 1501 if (object->is_instance()) { 1502 ResourceMark rm; 1503 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1504 p2i(object), p2i(object->mark()), 1505 object->klass()->external_name()); 1506 } 1507 } 1508 if (event.should_commit()) { 1509 post_monitor_inflate_event(event, object, cause); 1510 } 1511 return m; 1512 } 1513 } 1514 1515 1516 // Deflate_idle_monitors() is called at all safepoints, immediately 1517 // after all mutators are stopped, but before any objects have moved. 1518 // It traverses the list of known monitors, deflating where possible. 1519 // The scavenged monitor are returned to the monitor free list. 1520 // 1521 // Beware that we scavenge at *every* stop-the-world point. 1522 // Having a large number of monitors in-circulation negatively 1523 // impacts the performance of some applications (e.g., PointBase). 1524 // Broadly, we want to minimize the # of monitors in circulation. 1525 // 1526 // We have added a flag, MonitorInUseLists, which creates a list 1527 // of active monitors for each thread. deflate_idle_monitors() 1528 // only scans the per-thread in-use lists. omAlloc() puts all 1529 // assigned monitors on the per-thread list. deflate_idle_monitors() 1530 // returns the non-busy monitors to the global free list. 1531 // When a thread dies, omFlush() adds the list of active monitors for 1532 // that thread to a global gOmInUseList acquiring the 1533 // global list lock. deflate_idle_monitors() acquires the global 1534 // list lock to scan for non-busy monitors to the global free list. 1535 // An alternative could have used a single global in-use list. The 1536 // downside would have been the additional cost of acquiring the global list lock 1537 // for every omAlloc(). 1538 // 1539 // Perversely, the heap size -- and thus the STW safepoint rate -- 1540 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1541 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1542 // This is an unfortunate aspect of this design. 1543 1544 enum ManifestConstants { 1545 ClearResponsibleAtSTW = 0 1546 }; 1547 1548 // Deflate a single monitor if not in-use 1549 // Return true if deflated, false if in-use 1550 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1551 ObjectMonitor** freeHeadp, 1552 ObjectMonitor** freeTailp) { 1553 bool deflated; 1554 // Normal case ... The monitor is associated with obj. 1555 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1556 guarantee(mid == obj->mark()->monitor(), "invariant"); 1557 guarantee(mid->header()->is_neutral(), "invariant"); 1558 1559 if (mid->is_busy()) { 1560 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1561 deflated = false; 1562 } else { 1563 // Deflate the monitor if it is no longer being used 1564 // It's idle - scavenge and return to the global free list 1565 // plain old deflation ... 1566 TEVENT(deflate_idle_monitors - scavenge1); 1567 if (log_is_enabled(Debug, monitorinflation)) { 1568 if (obj->is_instance()) { 1569 ResourceMark rm; 1570 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1571 "mark " INTPTR_FORMAT " , type %s", 1572 p2i(obj), p2i(obj->mark()), 1573 obj->klass()->external_name()); 1574 } 1575 } 1576 1577 // Restore the header back to obj 1578 obj->release_set_mark(mid->header()); 1579 mid->clear(); 1580 1581 assert(mid->object() == NULL, "invariant"); 1582 1583 // Move the object to the working free list defined by freeHeadp, freeTailp 1584 if (*freeHeadp == NULL) *freeHeadp = mid; 1585 if (*freeTailp != NULL) { 1586 ObjectMonitor * prevtail = *freeTailp; 1587 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1588 prevtail->FreeNext = mid; 1589 } 1590 *freeTailp = mid; 1591 deflated = true; 1592 } 1593 return deflated; 1594 } 1595 1596 // Walk a given monitor list, and deflate idle monitors 1597 // The given list could be a per-thread list or a global list 1598 // Caller acquires gListLock 1599 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1600 ObjectMonitor** freeHeadp, 1601 ObjectMonitor** freeTailp) { 1602 ObjectMonitor* mid; 1603 ObjectMonitor* next; 1604 ObjectMonitor* cur_mid_in_use = NULL; 1605 int deflated_count = 0; 1606 1607 for (mid = *listHeadp; mid != NULL;) { 1608 oop obj = (oop) mid->object(); 1609 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1610 // if deflate_monitor succeeded, 1611 // extract from per-thread in-use list 1612 if (mid == *listHeadp) { 1613 *listHeadp = mid->FreeNext; 1614 } else if (cur_mid_in_use != NULL) { 1615 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1616 } 1617 next = mid->FreeNext; 1618 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1619 mid = next; 1620 deflated_count++; 1621 } else { 1622 cur_mid_in_use = mid; 1623 mid = mid->FreeNext; 1624 } 1625 } 1626 return deflated_count; 1627 } 1628 1629 void ObjectSynchronizer::deflate_idle_monitors() { 1630 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1631 int nInuse = 0; // currently associated with objects 1632 int nInCirculation = 0; // extant 1633 int nScavenged = 0; // reclaimed 1634 bool deflated = false; 1635 1636 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1637 ObjectMonitor * freeTailp = NULL; 1638 1639 TEVENT(deflate_idle_monitors); 1640 // Prevent omFlush from changing mids in Thread dtor's during deflation 1641 // And in case the vm thread is acquiring a lock during a safepoint 1642 // See e.g. 6320749 1643 Thread::muxAcquire(&gListLock, "scavenge - return"); 1644 1645 if (MonitorInUseLists) { 1646 int inUse = 0; 1647 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1648 nInCirculation+= cur->omInUseCount; 1649 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1650 cur->omInUseCount-= deflated_count; 1651 if (ObjectMonitor::Knob_VerifyInUse) { 1652 verifyInUse(cur); 1653 } 1654 nScavenged += deflated_count; 1655 nInuse += cur->omInUseCount; 1656 } 1657 1658 // For moribund threads, scan gOmInUseList 1659 if (gOmInUseList) { 1660 nInCirculation += gOmInUseCount; 1661 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1662 gOmInUseCount-= deflated_count; 1663 nScavenged += deflated_count; 1664 nInuse += gOmInUseCount; 1665 } 1666 1667 } else { 1668 PaddedEnd<ObjectMonitor> * block = 1669 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1670 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1671 // Iterate over all extant monitors - Scavenge all idle monitors. 1672 assert(block->object() == CHAINMARKER, "must be a block header"); 1673 nInCirculation += _BLOCKSIZE; 1674 for (int i = 1; i < _BLOCKSIZE; i++) { 1675 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1676 oop obj = (oop)mid->object(); 1677 1678 if (obj == NULL) { 1679 // The monitor is not associated with an object. 1680 // The monitor should either be a thread-specific private 1681 // free list or the global free list. 1682 // obj == NULL IMPLIES mid->is_busy() == 0 1683 guarantee(!mid->is_busy(), "invariant"); 1684 continue; 1685 } 1686 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1687 1688 if (deflated) { 1689 mid->FreeNext = NULL; 1690 nScavenged++; 1691 } else { 1692 nInuse++; 1693 } 1694 } 1695 } 1696 } 1697 1698 gMonitorFreeCount += nScavenged; 1699 1700 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1701 1702 if (ObjectMonitor::Knob_Verbose) { 1703 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1704 "ForceMonitorScavenge=%d : pop=%d free=%d", 1705 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1706 gMonitorPopulation, gMonitorFreeCount); 1707 tty->flush(); 1708 } 1709 1710 ForceMonitorScavenge = 0; // Reset 1711 1712 // Move the scavenged monitors back to the global free list. 1713 if (freeHeadp != NULL) { 1714 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1715 assert(freeTailp->FreeNext == NULL, "invariant"); 1716 // constant-time list splice - prepend scavenged segment to gFreeList 1717 freeTailp->FreeNext = gFreeList; 1718 gFreeList = freeHeadp; 1719 } 1720 Thread::muxRelease(&gListLock); 1721 1722 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1723 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1724 1725 // TODO: Add objectMonitor leak detection. 1726 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1727 GVars.stwRandom = os::random(); 1728 GVars.stwCycle++; 1729 } 1730 1731 // Monitor cleanup on JavaThread::exit 1732 1733 // Iterate through monitor cache and attempt to release thread's monitors 1734 // Gives up on a particular monitor if an exception occurs, but continues 1735 // the overall iteration, swallowing the exception. 1736 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1737 private: 1738 TRAPS; 1739 1740 public: 1741 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1742 void do_monitor(ObjectMonitor* mid) { 1743 if (mid->owner() == THREAD) { 1744 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1745 Handle obj((oop) mid->object()); 1746 tty->print("INFO: unexpected locked object:"); 1747 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1748 fatal("exiting JavaThread=" INTPTR_FORMAT 1749 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1750 p2i(THREAD), p2i(mid)); 1751 } 1752 (void)mid->complete_exit(CHECK); 1753 } 1754 } 1755 }; 1756 1757 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1758 // ignored. This is meant to be called during JNI thread detach which assumes 1759 // all remaining monitors are heavyweight. All exceptions are swallowed. 1760 // Scanning the extant monitor list can be time consuming. 1761 // A simple optimization is to add a per-thread flag that indicates a thread 1762 // called jni_monitorenter() during its lifetime. 1763 // 1764 // Instead of No_Savepoint_Verifier it might be cheaper to 1765 // use an idiom of the form: 1766 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1767 // <code that must not run at safepoint> 1768 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1769 // Since the tests are extremely cheap we could leave them enabled 1770 // for normal product builds. 1771 1772 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1773 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1774 NoSafepointVerifier nsv; 1775 ReleaseJavaMonitorsClosure rjmc(THREAD); 1776 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1777 ObjectSynchronizer::monitors_iterate(&rjmc); 1778 Thread::muxRelease(&gListLock); 1779 THREAD->clear_pending_exception(); 1780 } 1781 1782 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1783 switch (cause) { 1784 case inflate_cause_vm_internal: return "VM Internal"; 1785 case inflate_cause_monitor_enter: return "Monitor Enter"; 1786 case inflate_cause_wait: return "Monitor Wait"; 1787 case inflate_cause_notify: return "Monitor Notify"; 1788 case inflate_cause_hash_code: return "Monitor Hash Code"; 1789 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1790 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1791 default: 1792 ShouldNotReachHere(); 1793 } 1794 return "Unknown"; 1795 } 1796 1797 static void post_monitor_inflate_event(EventJavaMonitorInflate& event, 1798 const oop obj, 1799 const ObjectSynchronizer::InflateCause cause) { 1800 #if INCLUDE_TRACE 1801 assert(event.should_commit(), "check outside"); 1802 event.set_klass(obj->klass()); 1803 event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj); 1804 event.set_cause((u1)cause); 1805 event.commit(); 1806 #endif 1807 } 1808 1809 //------------------------------------------------------------------------------ 1810 // Debugging code 1811 1812 void ObjectSynchronizer::sanity_checks(const bool verbose, 1813 const uint cache_line_size, 1814 int *error_cnt_ptr, 1815 int *warning_cnt_ptr) { 1816 u_char *addr_begin = (u_char*)&GVars; 1817 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1818 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1819 1820 if (verbose) { 1821 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1822 sizeof(SharedGlobals)); 1823 } 1824 1825 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1826 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1827 1828 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1829 if (verbose) { 1830 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1831 } 1832 1833 if (cache_line_size != 0) { 1834 // We were able to determine the L1 data cache line size so 1835 // do some cache line specific sanity checks 1836 1837 if (offset_stwRandom < cache_line_size) { 1838 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1839 "to the struct beginning than a cache line which permits " 1840 "false sharing."); 1841 (*warning_cnt_ptr)++; 1842 } 1843 1844 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1845 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1846 "SharedGlobals.hcSequence fields are closer than a cache " 1847 "line which permits false sharing."); 1848 (*warning_cnt_ptr)++; 1849 } 1850 1851 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1852 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1853 "to the struct end than a cache line which permits false " 1854 "sharing."); 1855 (*warning_cnt_ptr)++; 1856 } 1857 } 1858 } 1859 1860 #ifndef PRODUCT 1861 1862 // Verify all monitors in the monitor cache, the verification is weak. 1863 void ObjectSynchronizer::verify() { 1864 PaddedEnd<ObjectMonitor> * block = 1865 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1866 while (block != NULL) { 1867 assert(block->object() == CHAINMARKER, "must be a block header"); 1868 for (int i = 1; i < _BLOCKSIZE; i++) { 1869 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1870 oop object = (oop)mid->object(); 1871 if (object != NULL) { 1872 mid->verify(); 1873 } 1874 } 1875 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1876 } 1877 } 1878 1879 // Check if monitor belongs to the monitor cache 1880 // The list is grow-only so it's *relatively* safe to traverse 1881 // the list of extant blocks without taking a lock. 1882 1883 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1884 PaddedEnd<ObjectMonitor> * block = 1885 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1886 while (block != NULL) { 1887 assert(block->object() == CHAINMARKER, "must be a block header"); 1888 if (monitor > (ObjectMonitor *)&block[0] && 1889 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 1890 address mon = (address)monitor; 1891 address blk = (address)block; 1892 size_t diff = mon - blk; 1893 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 1894 return 1; 1895 } 1896 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1897 } 1898 return 0; 1899 } 1900 1901 #endif