1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "memory/metaspaceShared.hpp" 29 #include "memory/padded.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/osThread.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/vframe.hpp" 45 #include "trace/traceMacros.hpp" 46 #include "trace/tracing.hpp" 47 #include "utilities/dtrace.hpp" 48 #include "utilities/events.hpp" 49 #include "utilities/preserveException.hpp" 50 51 // The "core" versions of monitor enter and exit reside in this file. 52 // The interpreter and compilers contain specialized transliterated 53 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 54 // for instance. If you make changes here, make sure to modify the 55 // interpreter, and both C1 and C2 fast-path inline locking code emission. 56 // 57 // ----------------------------------------------------------------------------- 58 59 #ifdef DTRACE_ENABLED 60 61 // Only bother with this argument setup if dtrace is available 62 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 63 64 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 65 char* bytes = NULL; \ 66 int len = 0; \ 67 jlong jtid = SharedRuntime::get_java_tid(thread); \ 68 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 69 if (klassname != NULL) { \ 70 bytes = (char*)klassname->bytes(); \ 71 len = klassname->utf8_length(); \ 72 } 73 74 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 75 { \ 76 if (DTraceMonitorProbes) { \ 77 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 78 HOTSPOT_MONITOR_WAIT(jtid, \ 79 (uintptr_t)(monitor), bytes, len, (millis)); \ 80 } \ 81 } 82 83 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 84 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 85 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 86 87 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 88 { \ 89 if (DTraceMonitorProbes) { \ 90 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 91 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 92 (uintptr_t)(monitor), bytes, len); \ 93 } \ 94 } 95 96 #else // ndef DTRACE_ENABLED 97 98 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 99 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 100 101 #endif // ndef DTRACE_ENABLED 102 103 // This exists only as a workaround of dtrace bug 6254741 104 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 105 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 106 return 0; 107 } 108 109 #define NINFLATIONLOCKS 256 110 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 111 112 // global list of blocks of monitors 113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't 114 // want to expose the PaddedEnd template more than necessary. 115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL; 116 // global monitor free list 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 118 ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepoint = NULL; 119 ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepointTail = NULL; 120 // global monitor in-use list, for moribund threads, 121 // monitors they inflated need to be scanned for deflation 122 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 123 // count of entries in gOmInUseList 124 int ObjectSynchronizer::gOmInUseCount = 0; 125 126 bool ObjectSynchronizer::_should_deflate_idle_monitors_conc = false; 127 128 static volatile intptr_t gListLock = 0; // protects global monitor lists 129 static volatile int gMonitorFreeCount = 0; // # on gFreeList 130 static int gMonitorFreeCountNextSafepoint = 0; 131 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 132 133 static void post_monitor_inflate_event(EventJavaMonitorInflate&, 134 const oop, 135 const ObjectSynchronizer::InflateCause); 136 137 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 138 139 140 // =====================> Quick functions 141 142 // The quick_* forms are special fast-path variants used to improve 143 // performance. In the simplest case, a "quick_*" implementation could 144 // simply return false, in which case the caller will perform the necessary 145 // state transitions and call the slow-path form. 146 // The fast-path is designed to handle frequently arising cases in an efficient 147 // manner and is just a degenerate "optimistic" variant of the slow-path. 148 // returns true -- to indicate the call was satisfied. 149 // returns false -- to indicate the call needs the services of the slow-path. 150 // A no-loitering ordinance is in effect for code in the quick_* family 151 // operators: safepoints or indefinite blocking (blocking that might span a 152 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 153 // entry. 154 // 155 // Consider: An interesting optimization is to have the JIT recognize the 156 // following common idiom: 157 // synchronized (someobj) { .... ; notify(); } 158 // That is, we find a notify() or notifyAll() call that immediately precedes 159 // the monitorexit operation. In that case the JIT could fuse the operations 160 // into a single notifyAndExit() runtime primitive. 161 162 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 163 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 164 assert(self->is_Java_thread(), "invariant"); 165 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 166 NoSafepointVerifier nsv; 167 if (obj == NULL) return false; // slow-path for invalid obj 168 const markOop mark = obj->mark(); 169 170 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 171 // Degenerate notify 172 // stack-locked by caller so by definition the implied waitset is empty. 173 return true; 174 } 175 176 if (mark->has_monitor()) { 177 ObjectMonitor * const mon = mark->monitor(); 178 assert(mon->object() == obj, "invariant"); 179 if (mon->owner() != self) return false; // slow-path for IMS exception 180 181 if (mon->first_waiter() != NULL) { 182 // We have one or more waiters. Since this is an inflated monitor 183 // that we own, we can transfer one or more threads from the waitset 184 // to the entrylist here and now, avoiding the slow-path. 185 if (all) { 186 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 187 } else { 188 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 189 } 190 int tally = 0; 191 do { 192 mon->INotify(self); 193 ++tally; 194 } while (mon->first_waiter() != NULL && all); 195 OM_PERFDATA_OP(Notifications, inc(tally)); 196 } 197 return true; 198 } 199 200 // biased locking and any other IMS exception states take the slow-path 201 return false; 202 } 203 204 205 // The LockNode emitted directly at the synchronization site would have 206 // been too big if it were to have included support for the cases of inflated 207 // recursive enter and exit, so they go here instead. 208 // Note that we can't safely call AsyncPrintJavaStack() from within 209 // quick_enter() as our thread state remains _in_Java. 210 211 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 212 BasicLock * lock) { 213 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 214 assert(Self->is_Java_thread(), "invariant"); 215 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 216 NoSafepointVerifier nsv; 217 if (obj == NULL) return false; // Need to throw NPE 218 const markOop mark = obj->mark(); 219 220 if (mark->has_monitor()) { 221 ObjectMonitor * const m = mark->monitor(); 222 assert(m->object() == obj, "invariant"); 223 Thread * const owner = (Thread *) m->_owner; 224 225 // Lock contention and Transactional Lock Elision (TLE) diagnostics 226 // and observability 227 // Case: light contention possibly amenable to TLE 228 // Case: TLE inimical operations such as nested/recursive synchronization 229 230 if (owner == Self) { 231 m->_recursions++; 232 return true; 233 } 234 235 // This Java Monitor is inflated so obj's header will never be 236 // displaced to this thread's BasicLock. Make the displaced header 237 // non-NULL so this BasicLock is not seen as recursive nor as 238 // being locked. We do this unconditionally so that this thread's 239 // BasicLock cannot be mis-interpreted by any stack walkers. For 240 // performance reasons, stack walkers generally first check for 241 // Biased Locking in the object's header, the second check is for 242 // stack-locking in the object's header, the third check is for 243 // recursive stack-locking in the displaced header in the BasicLock, 244 // and last are the inflated Java Monitor (ObjectMonitor) checks. 245 lock->set_displaced_header(markOopDesc::unused_mark()); 246 247 if (owner == NULL && 248 Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) { 249 assert(m->_recursions == 0, "invariant"); 250 assert(m->_owner == Self, "invariant"); 251 return true; 252 } 253 } 254 255 // Note that we could inflate in quick_enter. 256 // This is likely a useful optimization 257 // Critically, in quick_enter() we must not: 258 // -- perform bias revocation, or 259 // -- block indefinitely, or 260 // -- reach a safepoint 261 262 return false; // revert to slow-path 263 } 264 265 // ----------------------------------------------------------------------------- 266 // Fast Monitor Enter/Exit 267 // This the fast monitor enter. The interpreter and compiler use 268 // some assembly copies of this code. Make sure update those code 269 // if the following function is changed. The implementation is 270 // extremely sensitive to race condition. Be careful. 271 272 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 273 bool attempt_rebias, TRAPS) { 274 if (UseBiasedLocking) { 275 if (!SafepointSynchronize::is_at_safepoint()) { 276 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 277 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 278 return; 279 } 280 } else { 281 assert(!attempt_rebias, "can not rebias toward VM thread"); 282 BiasedLocking::revoke_at_safepoint(obj); 283 } 284 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 285 } 286 287 slow_enter(obj, lock, THREAD); 288 } 289 290 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 291 markOop mark = object->mark(); 292 // We cannot check for Biased Locking if we are racing an inflation. 293 assert(mark == markOopDesc::INFLATING() || 294 !mark->has_bias_pattern(), "should not see bias pattern here"); 295 296 markOop dhw = lock->displaced_header(); 297 if (dhw == NULL) { 298 // If the displaced header is NULL, then this exit matches up with 299 // a recursive enter. No real work to do here except for diagnostics. 300 #ifndef PRODUCT 301 if (mark != markOopDesc::INFLATING()) { 302 // Only do diagnostics if we are not racing an inflation. Simply 303 // exiting a recursive enter of a Java Monitor that is being 304 // inflated is safe; see the has_monitor() comment below. 305 assert(!mark->is_neutral(), "invariant"); 306 assert(!mark->has_locker() || 307 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 308 if (mark->has_monitor()) { 309 // The BasicLock's displaced_header is marked as a recursive 310 // enter and we have an inflated Java Monitor (ObjectMonitor). 311 // This is a special case where the Java Monitor was inflated 312 // after this thread entered the stack-lock recursively. When a 313 // Java Monitor is inflated, we cannot safely walk the Java 314 // Monitor owner's stack and update the BasicLocks because a 315 // Java Monitor can be asynchronously inflated by a thread that 316 // does not own the Java Monitor. 317 ObjectMonitor * m = mark->monitor(); 318 assert(((oop)(m->object()))->mark() == mark, "invariant"); 319 assert(m->is_entered(THREAD), "invariant"); 320 } 321 } 322 #endif 323 return; 324 } 325 326 if (mark == (markOop) lock) { 327 // If the object is stack-locked by the current thread, try to 328 // swing the displaced header from the BasicLock back to the mark. 329 assert(dhw->is_neutral(), "invariant"); 330 if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) { 331 TEVENT(fast_exit: release stack-lock); 332 return; 333 } 334 } 335 336 // We have to take the slow-path of possible inflation and then exit. 337 ObjectSynchronizer::inflate(THREAD, 338 object, 339 inflate_cause_vm_internal)->exit(true, THREAD); 340 } 341 342 // ----------------------------------------------------------------------------- 343 // Interpreter/Compiler Slow Case 344 // This routine is used to handle interpreter/compiler slow case 345 // We don't need to use fast path here, because it must have been 346 // failed in the interpreter/compiler code. 347 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 348 do { 349 markOop mark = obj->mark(); 350 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 351 352 if (mark->is_neutral()) { 353 // Anticipate successful CAS -- the ST of the displaced mark must 354 // be visible <= the ST performed by the CAS. 355 lock->set_displaced_header(mark); 356 if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) { 357 TEVENT(slow_enter: release stacklock); 358 return; 359 } 360 // Fall through to inflate() ... 361 } else if (mark->has_locker() && 362 THREAD->is_lock_owned((address)mark->locker())) { 363 assert(lock != mark->locker(), "must not re-lock the same lock"); 364 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 365 lock->set_displaced_header(NULL); 366 return; 367 } 368 369 // The object header will never be displaced to this lock, 370 // so it does not matter what the value is, except that it 371 // must be non-zero to avoid looking like a re-entrant lock, 372 // and must not look locked either. 373 lock->set_displaced_header(markOopDesc::unused_mark()); 374 } while (!ObjectSynchronizer::inflate(THREAD, 375 obj(), 376 inflate_cause_monitor_enter)->enter(THREAD)); 377 } 378 379 // This routine is used to handle interpreter/compiler slow case 380 // We don't need to use fast path here, because it must have 381 // failed in the interpreter/compiler code. Simply use the heavy 382 // weight monitor should be ok, unless someone find otherwise. 383 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 384 fast_exit(object, lock, THREAD); 385 } 386 387 // ----------------------------------------------------------------------------- 388 // Class Loader support to workaround deadlocks on the class loader lock objects 389 // Also used by GC 390 // complete_exit()/reenter() are used to wait on a nested lock 391 // i.e. to give up an outer lock completely and then re-enter 392 // Used when holding nested locks - lock acquisition order: lock1 then lock2 393 // 1) complete_exit lock1 - saving recursion count 394 // 2) wait on lock2 395 // 3) when notified on lock2, unlock lock2 396 // 4) reenter lock1 with original recursion count 397 // 5) lock lock2 398 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 399 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 400 TEVENT(complete_exit); 401 if (UseBiasedLocking) { 402 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 403 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 404 } 405 406 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 407 obj(), 408 inflate_cause_vm_internal); 409 410 return monitor->complete_exit(THREAD); 411 } 412 413 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 414 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 415 TEVENT(reenter); 416 if (UseBiasedLocking) { 417 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 418 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 419 } 420 421 ObjectMonitor* monitor; 422 do { 423 monitor = ObjectSynchronizer::inflate(THREAD, 424 obj(), 425 inflate_cause_vm_internal); 426 } while(!monitor->reenter(recursion, THREAD)); 427 } 428 // ----------------------------------------------------------------------------- 429 // JNI locks on java objects 430 // NOTE: must use heavy weight monitor to handle jni monitor enter 431 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 432 // the current locking is from JNI instead of Java code 433 TEVENT(jni_enter); 434 if (UseBiasedLocking) { 435 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 436 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 437 } 438 THREAD->set_current_pending_monitor_is_from_java(false); 439 while (!ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD)); 440 THREAD->set_current_pending_monitor_is_from_java(true); 441 } 442 443 // NOTE: must use heavy weight monitor to handle jni monitor exit 444 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 445 TEVENT(jni_exit); 446 if (UseBiasedLocking) { 447 Handle h_obj(THREAD, obj); 448 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 449 obj = h_obj(); 450 } 451 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 452 453 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 454 obj, 455 inflate_cause_jni_exit); 456 // If this thread has locked the object, exit the monitor. Note: can't use 457 // monitor->check(CHECK); must exit even if an exception is pending. 458 if (monitor->check(THREAD)) { 459 monitor->exit(true, THREAD); 460 } 461 } 462 463 // ----------------------------------------------------------------------------- 464 // Internal VM locks on java objects 465 // standard constructor, allows locking failures 466 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 467 _dolock = doLock; 468 _thread = thread; 469 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 470 _obj = obj; 471 472 if (_dolock) { 473 TEVENT(ObjectLocker); 474 475 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 476 } 477 } 478 479 ObjectLocker::~ObjectLocker() { 480 if (_dolock) { 481 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 482 } 483 } 484 485 486 // ----------------------------------------------------------------------------- 487 // Wait/Notify/NotifyAll 488 // NOTE: must use heavy weight monitor to handle wait() 489 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 490 if (UseBiasedLocking) { 491 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 492 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 493 } 494 if (millis < 0) { 495 TEVENT(wait - throw IAX); 496 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 497 } 498 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 499 obj(), 500 inflate_cause_wait); 501 502 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 503 monitor->wait(millis, true, THREAD); 504 505 // This dummy call is in place to get around dtrace bug 6254741. Once 506 // that's fixed we can uncomment the following line, remove the call 507 // and change this function back into a "void" func. 508 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 509 return dtrace_waited_probe(monitor, obj, THREAD); 510 } 511 512 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 513 if (UseBiasedLocking) { 514 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 515 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 516 } 517 if (millis < 0) { 518 TEVENT(wait - throw IAX); 519 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 520 } 521 ObjectSynchronizer::inflate(THREAD, 522 obj(), 523 inflate_cause_wait)->wait(millis, false, THREAD); 524 } 525 526 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 527 if (UseBiasedLocking) { 528 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 529 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 530 } 531 532 markOop mark = obj->mark(); 533 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 534 return; 535 } 536 ObjectSynchronizer::inflate(THREAD, 537 obj(), 538 inflate_cause_notify)->notify(THREAD); 539 } 540 541 // NOTE: see comment of notify() 542 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 543 if (UseBiasedLocking) { 544 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 545 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 546 } 547 548 markOop mark = obj->mark(); 549 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 550 return; 551 } 552 ObjectSynchronizer::inflate(THREAD, 553 obj(), 554 inflate_cause_notify)->notifyAll(THREAD); 555 } 556 557 // ----------------------------------------------------------------------------- 558 // Hash Code handling 559 // 560 // Performance concern: 561 // OrderAccess::storestore() calls release() which at one time stored 0 562 // into the global volatile OrderAccess::dummy variable. This store was 563 // unnecessary for correctness. Many threads storing into a common location 564 // causes considerable cache migration or "sloshing" on large SMP systems. 565 // As such, I avoided using OrderAccess::storestore(). In some cases 566 // OrderAccess::fence() -- which incurs local latency on the executing 567 // processor -- is a better choice as it scales on SMP systems. 568 // 569 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 570 // a discussion of coherency costs. Note that all our current reference 571 // platforms provide strong ST-ST order, so the issue is moot on IA32, 572 // x64, and SPARC. 573 // 574 // As a general policy we use "volatile" to control compiler-based reordering 575 // and explicit fences (barriers) to control for architectural reordering 576 // performed by the CPU(s) or platform. 577 578 struct SharedGlobals { 579 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 580 // These are highly shared mostly-read variables. 581 // To avoid false-sharing they need to be the sole occupants of a cache line. 582 volatile int stwRandom; 583 volatile int stwCycle; 584 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 585 // Hot RW variable -- Sequester to avoid false-sharing 586 volatile int hcSequence; 587 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 588 }; 589 590 static SharedGlobals GVars; 591 static int MonitorScavengeThreshold = 1000000; 592 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 593 594 static markOop ReadStableMark(oop obj) { 595 markOop mark = obj->mark(); 596 if (!mark->is_being_inflated()) { 597 return mark; // normal fast-path return 598 } 599 600 int its = 0; 601 for (;;) { 602 markOop mark = obj->mark(); 603 if (!mark->is_being_inflated()) { 604 return mark; // normal fast-path return 605 } 606 607 // The object is being inflated by some other thread. 608 // The caller of ReadStableMark() must wait for inflation to complete. 609 // Avoid live-lock 610 // TODO: consider calling SafepointSynchronize::do_call_back() while 611 // spinning to see if there's a safepoint pending. If so, immediately 612 // yielding or blocking would be appropriate. Avoid spinning while 613 // there is a safepoint pending. 614 // TODO: add inflation contention performance counters. 615 // TODO: restrict the aggregate number of spinners. 616 617 ++its; 618 if (its > 10000 || !os::is_MP()) { 619 if (its & 1) { 620 os::naked_yield(); 621 TEVENT(Inflate: INFLATING - yield); 622 } else { 623 // Note that the following code attenuates the livelock problem but is not 624 // a complete remedy. A more complete solution would require that the inflating 625 // thread hold the associated inflation lock. The following code simply restricts 626 // the number of spinners to at most one. We'll have N-2 threads blocked 627 // on the inflationlock, 1 thread holding the inflation lock and using 628 // a yield/park strategy, and 1 thread in the midst of inflation. 629 // A more refined approach would be to change the encoding of INFLATING 630 // to allow encapsulation of a native thread pointer. Threads waiting for 631 // inflation to complete would use CAS to push themselves onto a singly linked 632 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 633 // and calling park(). When inflation was complete the thread that accomplished inflation 634 // would detach the list and set the markword to inflated with a single CAS and 635 // then for each thread on the list, set the flag and unpark() the thread. 636 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 637 // wakes at most one thread whereas we need to wake the entire list. 638 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 639 int YieldThenBlock = 0; 640 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 641 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 642 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 643 while (obj->mark() == markOopDesc::INFLATING()) { 644 // Beware: NakedYield() is advisory and has almost no effect on some platforms 645 // so we periodically call Self->_ParkEvent->park(1). 646 // We use a mixed spin/yield/block mechanism. 647 if ((YieldThenBlock++) >= 16) { 648 Thread::current()->_ParkEvent->park(1); 649 } else { 650 os::naked_yield(); 651 } 652 } 653 Thread::muxRelease(gInflationLocks + ix); 654 TEVENT(Inflate: INFLATING - yield/park); 655 } 656 } else { 657 SpinPause(); // SMP-polite spinning 658 } 659 } 660 } 661 662 // hashCode() generation : 663 // 664 // Possibilities: 665 // * MD5Digest of {obj,stwRandom} 666 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 667 // * A DES- or AES-style SBox[] mechanism 668 // * One of the Phi-based schemes, such as: 669 // 2654435761 = 2^32 * Phi (golden ratio) 670 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 671 // * A variation of Marsaglia's shift-xor RNG scheme. 672 // * (obj ^ stwRandom) is appealing, but can result 673 // in undesirable regularity in the hashCode values of adjacent objects 674 // (objects allocated back-to-back, in particular). This could potentially 675 // result in hashtable collisions and reduced hashtable efficiency. 676 // There are simple ways to "diffuse" the middle address bits over the 677 // generated hashCode values: 678 679 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 680 intptr_t value = 0; 681 if (hashCode == 0) { 682 // This form uses an unguarded global Park-Miller RNG, 683 // so it's possible for two threads to race and generate the same RNG. 684 // On MP system we'll have lots of RW access to a global, so the 685 // mechanism induces lots of coherency traffic. 686 value = os::random(); 687 } else if (hashCode == 1) { 688 // This variation has the property of being stable (idempotent) 689 // between STW operations. This can be useful in some of the 1-0 690 // synchronization schemes. 691 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 692 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 693 } else if (hashCode == 2) { 694 value = 1; // for sensitivity testing 695 } else if (hashCode == 3) { 696 value = ++GVars.hcSequence; 697 } else if (hashCode == 4) { 698 value = cast_from_oop<intptr_t>(obj); 699 } else { 700 // Marsaglia's xor-shift scheme with thread-specific state 701 // This is probably the best overall implementation -- we'll 702 // likely make this the default in future releases. 703 unsigned t = Self->_hashStateX; 704 t ^= (t << 11); 705 Self->_hashStateX = Self->_hashStateY; 706 Self->_hashStateY = Self->_hashStateZ; 707 Self->_hashStateZ = Self->_hashStateW; 708 unsigned v = Self->_hashStateW; 709 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 710 Self->_hashStateW = v; 711 value = v; 712 } 713 714 value &= markOopDesc::hash_mask; 715 if (value == 0) value = 0xBAD; 716 assert(value != markOopDesc::no_hash, "invariant"); 717 TEVENT(hashCode: GENERATE); 718 return value; 719 } 720 721 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 722 Retry: 723 if (UseBiasedLocking) { 724 // NOTE: many places throughout the JVM do not expect a safepoint 725 // to be taken here, in particular most operations on perm gen 726 // objects. However, we only ever bias Java instances and all of 727 // the call sites of identity_hash that might revoke biases have 728 // been checked to make sure they can handle a safepoint. The 729 // added check of the bias pattern is to avoid useless calls to 730 // thread-local storage. 731 if (obj->mark()->has_bias_pattern()) { 732 // Handle for oop obj in case of STW safepoint 733 Handle hobj(Self, obj); 734 // Relaxing assertion for bug 6320749. 735 assert(Universe::verify_in_progress() || 736 !SafepointSynchronize::is_at_safepoint(), 737 "biases should not be seen by VM thread here"); 738 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 739 obj = hobj(); 740 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 741 } 742 } 743 744 // hashCode() is a heap mutator ... 745 // Relaxing assertion for bug 6320749. 746 assert(Universe::verify_in_progress() || DumpSharedSpaces || 747 !SafepointSynchronize::is_at_safepoint(), "invariant"); 748 assert(Universe::verify_in_progress() || DumpSharedSpaces || 749 Self->is_Java_thread() , "invariant"); 750 assert(Universe::verify_in_progress() || DumpSharedSpaces || 751 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 752 753 ObjectMonitor* monitor = NULL; 754 markOop temp, test; 755 intptr_t hash; 756 markOop mark = ReadStableMark(obj); 757 758 // object should remain ineligible for biased locking 759 assert(!mark->has_bias_pattern(), "invariant"); 760 761 if (mark->is_neutral()) { 762 hash = mark->hash(); // this is a normal header 763 if (hash) { // if it has hash, just return it 764 return hash; 765 } 766 hash = get_next_hash(Self, obj); // allocate a new hash code 767 temp = mark->copy_set_hash(hash); // merge the hash code into header 768 // use (machine word version) atomic operation to install the hash 769 test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark); 770 if (test == mark) { 771 return hash; 772 } 773 // If atomic operation failed, we must inflate the header 774 // into heavy weight monitor. We could add more code here 775 // for fast path, but it does not worth the complexity. 776 } else if (mark->has_monitor()) { 777 monitor = mark->monitor(); 778 temp = monitor->header(); 779 assert(temp->is_neutral() || temp->hash() == 0 && temp->is_marked(), "invariant"); 780 hash = temp->hash(); 781 if (hash) { 782 return hash; 783 } 784 // Skip to the following code to reduce code size 785 } else if (Self->is_lock_owned((address)mark->locker())) { 786 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 787 assert(temp->is_neutral(), "invariant"); 788 hash = temp->hash(); // by current thread, check if the displaced 789 if (hash) { // header contains hash code 790 return hash; 791 } 792 // WARNING: 793 // The displaced header is strictly immutable. 794 // It can NOT be changed in ANY cases. So we have 795 // to inflate the header into heavyweight monitor 796 // even the current thread owns the lock. The reason 797 // is the BasicLock (stack slot) will be asynchronously 798 // read by other threads during the inflate() function. 799 // Any change to stack may not propagate to other threads 800 // correctly. 801 } 802 803 // Inflate the monitor to set hash code 804 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); 805 // Load displaced header and check it has hash code 806 mark = monitor->header(); 807 assert(mark->is_neutral() || mark->hash() == 0 && mark->is_marked(), "invariant"); 808 hash = mark->hash(); 809 if (hash == 0) { 810 hash = get_next_hash(Self, obj); 811 temp = mark->set_unmarked()->copy_set_hash(hash); // merge hash code into header 812 assert(temp->is_neutral(), "invariant"); 813 if (mark->is_marked()) { 814 // Monitor is being deflated. Try installing mark word with hash code into obj. 815 markOop monitor_mark = markOopDesc::encode(monitor); 816 if (obj->cas_set_mark(temp, monitor_mark) == monitor_mark) { 817 return hash; 818 } else { 819 // Somebody else installed a new mark word in obj. Start over. We are making progress, 820 // as the new mark word is not a pointer to monitor. 821 goto Retry; 822 } 823 } 824 test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark); 825 if (test != mark) { 826 // The only update to the header in the monitor (outside GC) is install 827 // the hash code or mark the header to signal that the monitor is being 828 // deflated. If someone add new usage of displaced header, please update 829 // this code. 830 if (test->is_marked()) { 831 // Monitor is being deflated. Make progress by starting over. 832 assert(test->hash() == 0, "invariant"); 833 goto Retry; 834 } 835 hash = test->hash(); 836 assert(test->is_neutral(), "invariant"); 837 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 838 } 839 } 840 // We finally get the hash 841 return hash; 842 } 843 844 // Deprecated -- use FastHashCode() instead. 845 846 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 847 return FastHashCode(Thread::current(), obj()); 848 } 849 850 851 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 852 Handle h_obj) { 853 if (UseBiasedLocking) { 854 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 855 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 856 } 857 858 assert(thread == JavaThread::current(), "Can only be called on current thread"); 859 oop obj = h_obj(); 860 861 markOop mark = ReadStableMark(obj); 862 863 // Uncontended case, header points to stack 864 if (mark->has_locker()) { 865 return thread->is_lock_owned((address)mark->locker()); 866 } 867 // Contended case, header points to ObjectMonitor (tagged pointer) 868 if (mark->has_monitor()) { 869 ObjectMonitor* monitor = mark->monitor(); 870 return monitor->is_entered(thread) != 0; 871 } 872 // Unlocked case, header in place 873 assert(mark->is_neutral(), "sanity check"); 874 return false; 875 } 876 877 // Be aware of this method could revoke bias of the lock object. 878 // This method queries the ownership of the lock handle specified by 'h_obj'. 879 // If the current thread owns the lock, it returns owner_self. If no 880 // thread owns the lock, it returns owner_none. Otherwise, it will return 881 // owner_other. 882 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 883 (JavaThread *self, Handle h_obj) { 884 // The caller must beware this method can revoke bias, and 885 // revocation can result in a safepoint. 886 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 887 assert(self->thread_state() != _thread_blocked, "invariant"); 888 889 // Possible mark states: neutral, biased, stack-locked, inflated 890 891 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 892 // CASE: biased 893 BiasedLocking::revoke_and_rebias(h_obj, false, self); 894 assert(!h_obj->mark()->has_bias_pattern(), 895 "biases should be revoked by now"); 896 } 897 898 assert(self == JavaThread::current(), "Can only be called on current thread"); 899 oop obj = h_obj(); 900 markOop mark = ReadStableMark(obj); 901 902 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 903 if (mark->has_locker()) { 904 return self->is_lock_owned((address)mark->locker()) ? 905 owner_self : owner_other; 906 } 907 908 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 909 // The Object:ObjectMonitor relationship is stable as long as we're 910 // not at a safepoint. 911 if (mark->has_monitor()) { 912 void * owner = mark->monitor()->_owner; 913 if (owner == NULL) return owner_none; 914 return (owner == self || 915 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 916 } 917 918 // CASE: neutral 919 assert(mark->is_neutral(), "sanity check"); 920 return owner_none; // it's unlocked 921 } 922 923 // FIXME: jvmti should call this 924 JavaThread* ObjectSynchronizer::get_lock_owner(Handle h_obj, bool doLock) { 925 if (UseBiasedLocking) { 926 if (SafepointSynchronize::is_at_safepoint()) { 927 BiasedLocking::revoke_at_safepoint(h_obj); 928 } else { 929 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 930 } 931 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 932 } 933 934 oop obj = h_obj(); 935 address owner = NULL; 936 937 markOop mark = ReadStableMark(obj); 938 939 // Uncontended case, header points to stack 940 if (mark->has_locker()) { 941 owner = (address) mark->locker(); 942 } 943 944 // Contended case, header points to ObjectMonitor (tagged pointer) 945 if (mark->has_monitor()) { 946 ObjectMonitor* monitor = mark->monitor(); 947 assert(monitor != NULL, "monitor should be non-null"); 948 owner = (address) monitor->owner(); 949 } 950 951 if (owner != NULL) { 952 // owning_thread_from_monitor_owner() may also return NULL here 953 return Threads::owning_thread_from_monitor_owner(owner, doLock); 954 } 955 956 // Unlocked case, header in place 957 // Cannot have assertion since this object may have been 958 // locked by another thread when reaching here. 959 // assert(mark->is_neutral(), "sanity check"); 960 961 return NULL; 962 } 963 964 // Visitors ... 965 966 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 967 PaddedEnd<ObjectMonitor> * block = 968 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 969 while (block != NULL) { 970 assert(block->object() == CHAINMARKER, "must be a block header"); 971 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 972 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 973 oop object = (oop)mid->object(); 974 if (object != NULL) { 975 closure->do_monitor(mid); 976 } 977 } 978 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 979 } 980 } 981 982 // Get the next block in the block list. 983 static inline ObjectMonitor* next(ObjectMonitor* block) { 984 assert(block->object() == CHAINMARKER, "must be a block header"); 985 block = block->FreeNext; 986 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 987 return block; 988 } 989 990 991 void ObjectSynchronizer::oops_do(OopClosure* f) { 992 if (MonitorInUseLists) { 993 // When using thread local monitor lists, we only scan the 994 // global used list here (for moribund threads), and 995 // the thread-local monitors in Thread::oops_do(). 996 global_used_oops_do(f); 997 } else { 998 global_oops_do(f); 999 } 1000 } 1001 1002 void ObjectSynchronizer::global_oops_do(OopClosure* f) { 1003 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1004 PaddedEnd<ObjectMonitor> * block = 1005 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1006 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1007 assert(block->object() == CHAINMARKER, "must be a block header"); 1008 for (int i = 1; i < _BLOCKSIZE; i++) { 1009 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 1010 if (mid->is_active()) { 1011 assert(mid->object() != NULL, "invariant"); 1012 f->do_oop((oop*)mid->object_addr()); 1013 } 1014 } 1015 } 1016 } 1017 1018 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1019 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1020 list_oops_do(gOmInUseList, f); 1021 } 1022 1023 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1024 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1025 list_oops_do(thread->omInUseList, f); 1026 } 1027 1028 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1029 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1030 ObjectMonitor* mid; 1031 for (mid = list; mid != NULL; mid = mid->FreeNext) { 1032 if (mid->object() != NULL) { 1033 f->do_oop((oop*)mid->object_addr()); 1034 } 1035 } 1036 } 1037 1038 1039 // ----------------------------------------------------------------------------- 1040 // ObjectMonitor Lifecycle 1041 // ----------------------- 1042 // Inflation unlinks monitors from the global gFreeList and 1043 // associates them with objects. Deflation -- which occurs at 1044 // STW-time -- disassociates idle monitors from objects. Such 1045 // scavenged monitors are returned to the gFreeList. 1046 // 1047 // The global list is protected by gListLock. All the critical sections 1048 // are short and operate in constant-time. 1049 // 1050 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1051 // 1052 // Lifecycle: 1053 // -- unassigned and on the global free list 1054 // -- unassigned and on a thread's private omFreeList 1055 // -- assigned to an object. The object is inflated and the mark refers 1056 // to the objectmonitor. 1057 1058 1059 // Constraining monitor pool growth via MonitorBound ... 1060 // 1061 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1062 // the rate of scavenging is driven primarily by GC. As such, we can find 1063 // an inordinate number of monitors in circulation. 1064 // To avoid that scenario we can artificially induce a STW safepoint 1065 // if the pool appears to be growing past some reasonable bound. 1066 // Generally we favor time in space-time tradeoffs, but as there's no 1067 // natural back-pressure on the # of extant monitors we need to impose some 1068 // type of limit. Beware that if MonitorBound is set to too low a value 1069 // we could just loop. In addition, if MonitorBound is set to a low value 1070 // we'll incur more safepoints, which are harmful to performance. 1071 // See also: GuaranteedSafepointInterval 1072 // 1073 // The current implementation uses asynchronous VM operations. 1074 1075 static void InduceScavenge(Thread * Self, const char * Whence) { 1076 // Induce STW safepoint to trim monitors 1077 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1078 // More precisely, trigger an asynchronous STW safepoint as the number 1079 // of active monitors passes the specified threshold. 1080 // TODO: assert thread state is reasonable 1081 1082 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1083 if (ObjectMonitor::Knob_Verbose) { 1084 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 1085 Whence, ForceMonitorScavenge) ; 1086 tty->flush(); 1087 } 1088 // Induce a 'null' safepoint to scavenge monitors 1089 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1090 // to the VMthread and have a lifespan longer than that of this activation record. 1091 // The VMThread will delete the op when completed. 1092 VMThread::execute(new VM_ScavengeMonitors()); 1093 1094 if (ObjectMonitor::Knob_Verbose) { 1095 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 1096 Whence, ForceMonitorScavenge) ; 1097 tty->flush(); 1098 } 1099 } 1100 } 1101 1102 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1103 ObjectMonitor* mid; 1104 int in_use_tally = 0; 1105 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1106 in_use_tally++; 1107 guarantee(mid->is_active(), "invariant"); 1108 } 1109 guarantee(in_use_tally == Self->omInUseCount, "in-use count off"); 1110 1111 int free_tally = 0; 1112 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1113 free_tally++; 1114 guarantee(mid->is_free(), "invariant"); 1115 } 1116 guarantee(free_tally == Self->omFreeCount, "free count off"); 1117 } 1118 1119 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1120 // A large MAXPRIVATE value reduces both list lock contention 1121 // and list coherency traffic, but also tends to increase the 1122 // number of objectMonitors in circulation as well as the STW 1123 // scavenge costs. As usual, we lean toward time in space-time 1124 // tradeoffs. 1125 const int MAXPRIVATE = 1024; 1126 for (;;) { 1127 ObjectMonitor * m; 1128 1129 // 1: try to allocate from the thread's local omFreeList. 1130 // Threads will attempt to allocate first from their local list, then 1131 // from the global list, and only after those attempts fail will the thread 1132 // attempt to instantiate new monitors. Thread-local free lists take 1133 // heat off the gListLock and improve allocation latency, as well as reducing 1134 // coherency traffic on the shared global list. 1135 m = Self->omFreeList; 1136 if (m != NULL) { 1137 Self->omFreeList = m->FreeNext; 1138 Self->omFreeCount--; 1139 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1140 guarantee(m->object() == NULL, "invariant"); 1141 m->set_allocation_state(ObjectMonitor::New); 1142 if (MonitorInUseLists) { 1143 m->FreeNext = Self->omInUseList; 1144 Self->omInUseList = m; 1145 Self->omInUseCount++; 1146 if (ObjectMonitor::Knob_VerifyInUse) { 1147 verifyInUse(Self); 1148 } 1149 } else { 1150 m->FreeNext = NULL; 1151 } 1152 assert(!m->is_free(), "post-condition"); 1153 return m; 1154 } 1155 1156 // 2: try to allocate from the global gFreeList 1157 // CONSIDER: use muxTry() instead of muxAcquire(). 1158 // If the muxTry() fails then drop immediately into case 3. 1159 // If we're using thread-local free lists then try 1160 // to reprovision the caller's free list. 1161 if (gFreeList != NULL) { 1162 // Reprovision the thread's omFreeList. 1163 // Use bulk transfers to reduce the allocation rate and heat 1164 // on various locks. 1165 Thread::muxAcquire(&gListLock, "omAlloc"); 1166 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1167 gMonitorFreeCount--; 1168 ObjectMonitor * take = gFreeList; 1169 gFreeList = take->FreeNext; 1170 take->set_object(NULL); 1171 take->set_owner(NULL); 1172 take->_count = 0; 1173 guarantee(!take->is_busy(), "invariant"); 1174 take->Recycle(); 1175 assert(take->is_free(), "invariant"); 1176 omRelease(Self, take, false); 1177 } 1178 Thread::muxRelease(&gListLock); 1179 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1180 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1181 TEVENT(omFirst - reprovision); 1182 1183 const int mx = MonitorBound; 1184 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1185 // We can't safely induce a STW safepoint from omAlloc() as our thread 1186 // state may not be appropriate for such activities and callers may hold 1187 // naked oops, so instead we defer the action. 1188 InduceScavenge(Self, "omAlloc"); 1189 } 1190 continue; 1191 } 1192 1193 // 3: allocate a block of new ObjectMonitors 1194 // Both the local and global free lists are empty -- resort to malloc(). 1195 // In the current implementation objectMonitors are TSM - immortal. 1196 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1197 // each ObjectMonitor to start at the beginning of a cache line, 1198 // so we use align_size_up(). 1199 // A better solution would be to use C++ placement-new. 1200 // BEWARE: As it stands currently, we don't run the ctors! 1201 assert(_BLOCKSIZE > 1, "invariant"); 1202 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1203 PaddedEnd<ObjectMonitor> * temp; 1204 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1205 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1206 mtInternal); 1207 temp = (PaddedEnd<ObjectMonitor> *) 1208 align_size_up((intptr_t)real_malloc_addr, 1209 DEFAULT_CACHE_LINE_SIZE); 1210 1211 // NOTE: (almost) no way to recover if allocation failed. 1212 // We might be able to induce a STW safepoint and scavenge enough 1213 // objectMonitors to permit progress. 1214 if (temp == NULL) { 1215 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1216 "Allocate ObjectMonitors"); 1217 } 1218 (void)memset((void *) temp, 0, neededsize); 1219 1220 // Format the block. 1221 // initialize the linked list, each monitor points to its next 1222 // forming the single linked free list, the very first monitor 1223 // will points to next block, which forms the block list. 1224 // The trick of using the 1st element in the block as gBlockList 1225 // linkage should be reconsidered. A better implementation would 1226 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1227 1228 for (int i = 1; i < _BLOCKSIZE; i++) { 1229 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1230 assert(temp[i].is_free(), "invariant"); 1231 } 1232 1233 // terminate the last monitor as the end of list 1234 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1235 1236 // Element [0] is reserved for global list linkage 1237 temp[0].set_object(CHAINMARKER); 1238 1239 // Consider carving out this thread's current request from the 1240 // block in hand. This avoids some lock traffic and redundant 1241 // list activity. 1242 1243 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1244 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1245 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1246 gMonitorPopulation += _BLOCKSIZE-1; 1247 gMonitorFreeCount += _BLOCKSIZE-1; 1248 1249 // Add the new block to the list of extant blocks (gBlockList). 1250 // The very first objectMonitor in a block is reserved and dedicated. 1251 // It serves as blocklist "next" linkage. 1252 temp[0].FreeNext = gBlockList; 1253 // There are lock-free uses of gBlockList so make sure that 1254 // the previous stores happen before we update gBlockList. 1255 OrderAccess::release_store_ptr(&gBlockList, temp); 1256 1257 // Add the new string of objectMonitors to the global free list 1258 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1259 gFreeList = temp + 1; 1260 Thread::muxRelease(&gListLock); 1261 TEVENT(Allocate block of monitors); 1262 } 1263 } 1264 1265 // Place "m" on the caller's private per-thread omFreeList. 1266 // In practice there's no need to clamp or limit the number of 1267 // monitors on a thread's omFreeList as the only time we'll call 1268 // omRelease is to return a monitor to the free list after a CAS 1269 // attempt failed. This doesn't allow unbounded #s of monitors to 1270 // accumulate on a thread's free list. 1271 // 1272 // Key constraint: all ObjectMonitors on a thread's free list and the global 1273 // free list must have their object field set to null. This prevents the 1274 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1275 1276 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1277 bool fromPerThreadAlloc) { 1278 guarantee(m->object() == NULL, "invariant"); 1279 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1280 m->set_allocation_state(ObjectMonitor::Free); 1281 // Remove from omInUseList 1282 if (MonitorInUseLists && fromPerThreadAlloc) { 1283 ObjectMonitor* cur_mid_in_use = NULL; 1284 bool extracted = false; 1285 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1286 if (m == mid) { 1287 // extract from per-thread in-use list 1288 if (mid == Self->omInUseList) { 1289 Self->omInUseList = mid->FreeNext; 1290 } else if (cur_mid_in_use != NULL) { 1291 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1292 } 1293 extracted = true; 1294 Self->omInUseCount--; 1295 if (ObjectMonitor::Knob_VerifyInUse) { 1296 verifyInUse(Self); 1297 } 1298 break; 1299 } 1300 } 1301 assert(extracted, "Should have extracted from in-use list"); 1302 } 1303 1304 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1305 m->FreeNext = Self->omFreeList; 1306 Self->omFreeList = m; 1307 Self->omFreeCount++; 1308 } 1309 1310 // Return the monitors of a moribund thread's local free list to 1311 // the global free list. Typically a thread calls omFlush() when 1312 // it's dying. We could also consider having the VM thread steal 1313 // monitors from threads that have not run java code over a few 1314 // consecutive STW safepoints. Relatedly, we might decay 1315 // omFreeProvision at STW safepoints. 1316 // 1317 // Also return the monitors of a moribund thread's omInUseList to 1318 // a global gOmInUseList under the global list lock so these 1319 // will continue to be scanned. 1320 // 1321 // We currently call omFlush() from Threads::remove() _before the thread 1322 // has been excised from the thread list and is no longer a mutator. 1323 // This means that omFlush() can not run concurrently with a safepoint and 1324 // interleave with the scavenge operator. In particular, this ensures that 1325 // the thread's monitors are scanned by a GC safepoint, either via 1326 // Thread::oops_do() (if safepoint happens before omFlush()) or via 1327 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1328 // monitors have been transferred to the global in-use list). 1329 1330 void ObjectSynchronizer::omFlush(Thread * Self) { 1331 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1332 Self->omFreeList = NULL; 1333 ObjectMonitor * tail = NULL; 1334 int tally = 0; 1335 if (list != NULL) { 1336 ObjectMonitor * s; 1337 // The thread is going away, the per-thread free monitors 1338 // are freed via set_owner(NULL) 1339 // Link them to tail, which will be linked into the global free list 1340 // gFreeList below, under the gListLock 1341 for (s = list; s != NULL; s = s->FreeNext) { 1342 tally++; 1343 tail = s; 1344 guarantee(s->object() == NULL, "invariant"); 1345 guarantee(!s->is_busy(), "invariant"); 1346 s->set_owner(NULL); // redundant but good hygiene 1347 TEVENT(omFlush - Move one); 1348 } 1349 guarantee(tail != NULL && list != NULL, "invariant"); 1350 } 1351 1352 ObjectMonitor * inUseList = Self->omInUseList; 1353 ObjectMonitor * inUseTail = NULL; 1354 int inUseTally = 0; 1355 if (inUseList != NULL) { 1356 Self->omInUseList = NULL; 1357 ObjectMonitor *cur_om; 1358 // The thread is going away, however the omInUseList inflated 1359 // monitors may still be in-use by other threads. 1360 // Link them to inUseTail, which will be linked into the global in-use list 1361 // gOmInUseList below, under the gListLock 1362 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1363 inUseTail = cur_om; 1364 inUseTally++; 1365 } 1366 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1367 Self->omInUseCount = 0; 1368 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1369 } 1370 1371 Thread::muxAcquire(&gListLock, "omFlush"); 1372 if (tail != NULL) { 1373 tail->FreeNext = gFreeList; 1374 gFreeList = list; 1375 gMonitorFreeCount += tally; 1376 assert(Self->omFreeCount == tally, "free-count off"); 1377 Self->omFreeCount = 0; 1378 } 1379 1380 if (inUseTail != NULL) { 1381 inUseTail->FreeNext = gOmInUseList; 1382 gOmInUseList = inUseList; 1383 gOmInUseCount += inUseTally; 1384 } 1385 1386 Thread::muxRelease(&gListLock); 1387 TEVENT(omFlush); 1388 } 1389 1390 // Fast path code shared by multiple functions 1391 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1392 markOop mark = obj->mark(); 1393 if (mark->has_monitor()) { 1394 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1395 markOop dmw = mark->monitor()->header(); 1396 assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "monitor must record a good object header"); 1397 if (dmw->is_neutral()) { 1398 return mark->monitor(); 1399 } 1400 } 1401 return ObjectSynchronizer::inflate(Thread::current(), 1402 obj, 1403 inflate_cause_vm_internal); 1404 } 1405 1406 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1407 oop object, 1408 const InflateCause cause) { 1409 Retry: 1410 // Inflate mutates the heap ... 1411 // Relaxing assertion for bug 6320749. 1412 assert(Universe::verify_in_progress() || 1413 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1414 1415 EventJavaMonitorInflate event; 1416 1417 for (;;) { 1418 const markOop mark = object->mark(); 1419 assert(!mark->has_bias_pattern(), "invariant"); 1420 1421 // The mark can be in one of the following states: 1422 // * Inflated - just return 1423 // * Stack-locked - coerce it to inflated 1424 // * INFLATING - busy wait for conversion to complete 1425 // * Neutral - aggressively inflate the object. 1426 // * BIASED - Illegal. We should never see this 1427 1428 // CASE: inflated 1429 if (mark->has_monitor()) { 1430 ObjectMonitor * inf = mark->monitor(); 1431 markOop dmw = inf->header(); 1432 assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "invariant"); 1433 if (dmw->is_marked()) { 1434 goto Retry; 1435 } 1436 assert(inf->object() == object, "invariant"); 1437 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1438 event.cancel(); // let's not post an inflation event, unless we did the deed ourselves 1439 return inf; 1440 } 1441 1442 // CASE: inflation in progress - inflating over a stack-lock. 1443 // Some other thread is converting from stack-locked to inflated. 1444 // Only that thread can complete inflation -- other threads must wait. 1445 // The INFLATING value is transient. 1446 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1447 // We could always eliminate polling by parking the thread on some auxiliary list. 1448 if (mark == markOopDesc::INFLATING()) { 1449 TEVENT(Inflate: spin while INFLATING); 1450 ReadStableMark(object); 1451 continue; 1452 } 1453 1454 // CASE: stack-locked 1455 // Could be stack-locked either by this thread or by some other thread. 1456 // 1457 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1458 // to install INFLATING into the mark word. We originally installed INFLATING, 1459 // allocated the objectmonitor, and then finally STed the address of the 1460 // objectmonitor into the mark. This was correct, but artificially lengthened 1461 // the interval in which INFLATED appeared in the mark, thus increasing 1462 // the odds of inflation contention. 1463 // 1464 // We now use per-thread private objectmonitor free lists. 1465 // These list are reprovisioned from the global free list outside the 1466 // critical INFLATING...ST interval. A thread can transfer 1467 // multiple objectmonitors en-mass from the global free list to its local free list. 1468 // This reduces coherency traffic and lock contention on the global free list. 1469 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1470 // before or after the CAS(INFLATING) operation. 1471 // See the comments in omAlloc(). 1472 1473 if (mark->has_locker()) { 1474 ObjectMonitor * m = omAlloc(Self); 1475 // Optimistically prepare the objectmonitor - anticipate successful CAS 1476 // We do this before the CAS in order to minimize the length of time 1477 // in which INFLATING appears in the mark. 1478 m->Recycle(); 1479 m->_Responsible = NULL; 1480 m->_recursions = 0; 1481 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1482 1483 markOop cmp = (markOop) Atomic::cmpxchg_ptr(markOopDesc::INFLATING(), object->mark_addr(), mark); 1484 if (cmp != mark) { 1485 omRelease(Self, m, true); 1486 continue; // Interference -- just retry 1487 } 1488 1489 // We've successfully installed INFLATING (0) into the mark-word. 1490 // This is the only case where 0 will appear in a mark-word. 1491 // Only the singular thread that successfully swings the mark-word 1492 // to 0 can perform (or more precisely, complete) inflation. 1493 // 1494 // Why do we CAS a 0 into the mark-word instead of just CASing the 1495 // mark-word from the stack-locked value directly to the new inflated state? 1496 // Consider what happens when a thread unlocks a stack-locked object. 1497 // It attempts to use CAS to swing the displaced header value from the 1498 // on-stack basiclock back into the object header. Recall also that the 1499 // header value (hashcode, etc) can reside in (a) the object header, or 1500 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1501 // header in an objectMonitor. The inflate() routine must copy the header 1502 // value from the basiclock on the owner's stack to the objectMonitor, all 1503 // the while preserving the hashCode stability invariants. If the owner 1504 // decides to release the lock while the value is 0, the unlock will fail 1505 // and control will eventually pass from slow_exit() to inflate. The owner 1506 // will then spin, waiting for the 0 value to disappear. Put another way, 1507 // the 0 causes the owner to stall if the owner happens to try to 1508 // drop the lock (restoring the header from the basiclock to the object) 1509 // while inflation is in-progress. This protocol avoids races that might 1510 // would otherwise permit hashCode values to change or "flicker" for an object. 1511 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1512 // 0 serves as a "BUSY" inflate-in-progress indicator. 1513 1514 1515 // fetch the displaced mark from the owner's stack. 1516 // The owner can't die or unwind past the lock while our INFLATING 1517 // object is in the mark. Furthermore the owner can't complete 1518 // an unlock on the object, either. 1519 markOop dmw = mark->displaced_mark_helper(); 1520 assert(dmw->is_neutral(), "invariant"); 1521 1522 // Setup monitor fields to proper values -- prepare the monitor 1523 m->set_header(dmw); 1524 1525 // Optimization: if the mark->locker stack address is associated 1526 // with this thread we could simply set m->_owner = Self. 1527 // Note that a thread can inflate an object 1528 // that it has stack-locked -- as might happen in wait() -- directly 1529 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1530 m->set_owner(mark->locker()); 1531 m->set_object(object); 1532 // TODO-FIXME: assert BasicLock->dhw != 0. 1533 1534 // Must preserve store ordering. The monitor state must 1535 // be stable at the time of publishing the monitor address. 1536 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1537 object->release_set_mark(markOopDesc::encode(m)); 1538 1539 // Hopefully the performance counters are allocated on distinct cache lines 1540 // to avoid false sharing on MP systems ... 1541 OM_PERFDATA_OP(Inflations, inc()); 1542 TEVENT(Inflate: overwrite stacklock); 1543 if (log_is_enabled(Debug, monitorinflation)) { 1544 if (object->is_instance()) { 1545 ResourceMark rm; 1546 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1547 p2i(object), p2i(object->mark()), 1548 object->klass()->external_name()); 1549 } 1550 } 1551 if (event.should_commit()) { 1552 post_monitor_inflate_event(event, object, cause); 1553 } 1554 return m; 1555 } 1556 1557 // CASE: neutral 1558 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1559 // If we know we're inflating for entry it's better to inflate by swinging a 1560 // pre-locked objectMonitor pointer into the object header. A successful 1561 // CAS inflates the object *and* confers ownership to the inflating thread. 1562 // In the current implementation we use a 2-step mechanism where we CAS() 1563 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1564 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1565 // would be useful. 1566 1567 assert(mark->is_neutral(), "invariant"); 1568 ObjectMonitor * m = omAlloc(Self); 1569 // prepare m for installation - set monitor to initial state 1570 m->Recycle(); 1571 m->set_header(mark); 1572 m->set_owner(NULL); 1573 m->set_object(object); 1574 m->_recursions = 0; 1575 m->_Responsible = NULL; 1576 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1577 1578 if (Atomic::cmpxchg_ptr (markOopDesc::encode(m), object->mark_addr(), mark) != mark) { 1579 m->set_object(NULL); 1580 m->set_owner(NULL); 1581 m->Recycle(); 1582 omRelease(Self, m, true); 1583 m = NULL; 1584 continue; 1585 // interference - the markword changed - just retry. 1586 // The state-transitions are one-way, so there's no chance of 1587 // live-lock -- "Inflated" is an absorbing state. 1588 } 1589 1590 // Hopefully the performance counters are allocated on distinct 1591 // cache lines to avoid false sharing on MP systems ... 1592 OM_PERFDATA_OP(Inflations, inc()); 1593 TEVENT(Inflate: overwrite neutral); 1594 if (log_is_enabled(Debug, monitorinflation)) { 1595 if (object->is_instance()) { 1596 ResourceMark rm; 1597 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1598 p2i(object), p2i(object->mark()), 1599 object->klass()->external_name()); 1600 } 1601 } 1602 if (event.should_commit()) { 1603 post_monitor_inflate_event(event, object, cause); 1604 } 1605 return m; 1606 } 1607 } 1608 1609 1610 // Deflate_idle_monitors() is called at all safepoints, immediately 1611 // after all mutators are stopped, but before any objects have moved. 1612 // It traverses the list of known monitors, deflating where possible. 1613 // The scavenged monitor are returned to the monitor free list. 1614 // 1615 // Beware that we scavenge at *every* stop-the-world point. 1616 // Having a large number of monitors in-circulation negatively 1617 // impacts the performance of some applications (e.g., PointBase). 1618 // Broadly, we want to minimize the # of monitors in circulation. 1619 // 1620 // We have added a flag, MonitorInUseLists, which creates a list 1621 // of active monitors for each thread. deflate_idle_monitors() 1622 // only scans the per-thread in-use lists. omAlloc() puts all 1623 // assigned monitors on the per-thread list. deflate_idle_monitors() 1624 // returns the non-busy monitors to the global free list. 1625 // When a thread dies, omFlush() adds the list of active monitors for 1626 // that thread to a global gOmInUseList acquiring the 1627 // global list lock. deflate_idle_monitors() acquires the global 1628 // list lock to scan for non-busy monitors to the global free list. 1629 // An alternative could have used a single global in-use list. The 1630 // downside would have been the additional cost of acquiring the global list lock 1631 // for every omAlloc(). 1632 // 1633 // Perversely, the heap size -- and thus the STW safepoint rate -- 1634 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1635 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1636 // This is an unfortunate aspect of this design. 1637 1638 enum ManifestConstants { 1639 ClearResponsibleAtSTW = 0 1640 }; 1641 1642 void ObjectSynchronizer::do_safepoint_work() { 1643 if (MonitorInUseLists || !AsyncDeflateIdleMonitors) { 1644 ObjectSynchronizer::deflate_idle_monitors(); 1645 return; 1646 } 1647 assert(AsyncDeflateIdleMonitors, "oops"); 1648 if (gFreeListNextSafepoint != NULL) { 1649 #ifdef ASSERT 1650 for (ObjectMonitor* monitor = gFreeListNextSafepoint; monitor != NULL; monitor = monitor->FreeNext) { 1651 guarantee(monitor->owner() == NULL, "invariant"); 1652 guarantee(monitor->waiters() == 0, "invariant"); 1653 guarantee(monitor->recursions() == 0, "invariant"); 1654 guarantee(monitor->object() != NULL, "invariant"); 1655 guarantee(monitor->header() != 0, "invariant"); 1656 guarantee(monitor->is_free(), "invariant"); 1657 } 1658 guarantee(gFreeListNextSafepointTail != NULL, "invariant"); 1659 #endif // def ASSERT 1660 1661 Thread::muxAcquire(&gListLock, "do_safepoint_work"); 1662 gFreeListNextSafepointTail->FreeNext = gFreeList; 1663 gFreeList = gFreeListNextSafepoint; 1664 gMonitorFreeCount += gMonitorFreeCountNextSafepoint; 1665 Thread::muxRelease(&gListLock); 1666 1667 gFreeListNextSafepoint = NULL; 1668 gFreeListNextSafepointTail = NULL; 1669 gMonitorFreeCountNextSafepoint = 0; 1670 } 1671 set_should_deflate_idle_monitors_conc(); 1672 MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 1673 Service_lock->notify_all(); 1674 } 1675 1676 void ObjectSynchronizer::append_to_freelist_for_after_safepoint(int nScavenged, ObjectMonitor* const head, ObjectMonitor* const tail) { 1677 #ifdef ASSERT 1678 int count = 0; 1679 for(ObjectMonitor* m = head; m != NULL; m = m->FreeNext) { count++; } 1680 guarantee(count == nScavenged, "invariant"); 1681 #endif // def ASSERT 1682 if (head != NULL) { 1683 assert(tail->FreeNext == NULL, "invariant"); 1684 tail->FreeNext = gFreeListNextSafepoint; 1685 gFreeListNextSafepoint = head; 1686 } 1687 if (gFreeListNextSafepointTail == NULL) { 1688 gFreeListNextSafepointTail = tail; 1689 } 1690 gMonitorFreeCountNextSafepoint += nScavenged; 1691 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1692 } 1693 1694 void ObjectSynchronizer::deflate_idle_monitors_conc() { 1695 assert(Thread::current()->is_Java_thread(), "precondition"); 1696 _should_deflate_idle_monitors_conc = false; 1697 if (MonitorInUseLists) { 1698 return; // Don't want to run over the thread list for now. 1699 } 1700 1701 ObjectMonitor* freeHeadp = NULL; 1702 ObjectMonitor* freeTailp = NULL; 1703 int nScavenged = 0; 1704 int nInuse = 0; 1705 int nInCirculation = 0; 1706 1707 PaddedEnd<ObjectMonitor> * block = 1708 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1709 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1710 // Iterate over all extant monitors - Scavenge all idle monitors. 1711 assert(block->object() == CHAINMARKER, "must be a block header"); 1712 if (SafepointSynchronize::is_synchronizing()) { 1713 append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp); 1714 nScavenged = 0; 1715 freeHeadp = NULL; 1716 freeTailp = NULL; 1717 JavaThread* const jt = (JavaThread*) Thread::current(); 1718 ThreadBlockInVM blocker(jt); 1719 } 1720 nInCirculation += _BLOCKSIZE; 1721 for (int i = 1; i < _BLOCKSIZE; i++) { 1722 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1723 if (!mid->is_old()) { 1724 // Skip deflating newly allocated or free monitors. 1725 if (mid->is_new()) { 1726 // Mark mid as "old". 1727 mid->set_allocation_state(ObjectMonitor::Old); 1728 } 1729 continue; 1730 } 1731 1732 oop obj = (oop)mid->object(); 1733 assert(obj != NULL, "invariant"); 1734 1735 if (mid->try_disable_monitor()) { 1736 mid->FreeNext = NULL; 1737 if (freeHeadp == NULL) { freeHeadp = mid; } 1738 if (freeTailp != NULL) { freeTailp->FreeNext = mid; } 1739 freeTailp = mid; 1740 nScavenged++; 1741 } else { 1742 nInuse++; 1743 } 1744 } 1745 } 1746 append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp); 1747 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1748 } 1749 1750 // Deflate a single monitor if not in-use 1751 // Return true if deflated, false if in-use 1752 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1753 ObjectMonitor** freeHeadp, 1754 ObjectMonitor** freeTailp) { 1755 bool deflated; 1756 // Normal case ... The monitor is associated with obj. 1757 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1758 guarantee(mid == obj->mark()->monitor(), "invariant"); 1759 guarantee(mid->header()->is_neutral(), "invariant"); 1760 1761 if (mid->is_busy()) { 1762 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1763 deflated = false; 1764 } else { 1765 // Deflate the monitor if it is no longer being used 1766 // It's idle - scavenge and return to the global free list 1767 // plain old deflation ... 1768 TEVENT(deflate_idle_monitors - scavenge1); 1769 if (log_is_enabled(Debug, monitorinflation)) { 1770 if (obj->is_instance()) { 1771 ResourceMark rm; 1772 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1773 "mark " INTPTR_FORMAT " , type %s", 1774 p2i(obj), p2i(obj->mark()), 1775 obj->klass()->external_name()); 1776 } 1777 } 1778 1779 // Restore the header back to obj 1780 obj->release_set_mark(mid->header()); 1781 mid->clear(); 1782 1783 assert(mid->object() == NULL, "invariant"); 1784 assert(mid->is_free(), "invariant"); 1785 1786 // Move the object to the working free list defined by freeHeadp, freeTailp 1787 if (*freeHeadp == NULL) *freeHeadp = mid; 1788 if (*freeTailp != NULL) { 1789 ObjectMonitor * prevtail = *freeTailp; 1790 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1791 prevtail->FreeNext = mid; 1792 } 1793 *freeTailp = mid; 1794 deflated = true; 1795 } 1796 return deflated; 1797 } 1798 1799 // Walk a given monitor list, and deflate idle monitors 1800 // The given list could be a per-thread list or a global list 1801 // Caller acquires gListLock 1802 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1803 ObjectMonitor** freeHeadp, 1804 ObjectMonitor** freeTailp) { 1805 ObjectMonitor* mid; 1806 ObjectMonitor* next; 1807 ObjectMonitor* cur_mid_in_use = NULL; 1808 int deflated_count = 0; 1809 1810 for (mid = *listHeadp; mid != NULL;) { 1811 oop obj = (oop) mid->object(); 1812 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1813 // if deflate_monitor succeeded, 1814 // extract from per-thread in-use list 1815 if (mid == *listHeadp) { 1816 *listHeadp = mid->FreeNext; 1817 } else if (cur_mid_in_use != NULL) { 1818 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1819 } 1820 next = mid->FreeNext; 1821 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1822 mid = next; 1823 deflated_count++; 1824 } else { 1825 cur_mid_in_use = mid; 1826 mid = mid->FreeNext; 1827 } 1828 } 1829 return deflated_count; 1830 } 1831 1832 void ObjectSynchronizer::deflate_idle_monitors() { 1833 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1834 int nInuse = 0; // currently associated with objects 1835 int nInCirculation = 0; // extant 1836 int nScavenged = 0; // reclaimed 1837 bool deflated = false; 1838 1839 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1840 ObjectMonitor * freeTailp = NULL; 1841 1842 TEVENT(deflate_idle_monitors); 1843 // Prevent omFlush from changing mids in Thread dtor's during deflation 1844 // And in case the vm thread is acquiring a lock during a safepoint 1845 // See e.g. 6320749 1846 Thread::muxAcquire(&gListLock, "scavenge - return"); 1847 1848 if (MonitorInUseLists) { 1849 int inUse = 0; 1850 for (JavaThread* cur = Threads::first(); cur != NULL; cur = cur->next()) { 1851 nInCirculation+= cur->omInUseCount; 1852 int deflated_count = deflate_monitor_list(cur->omInUseList_addr(), &freeHeadp, &freeTailp); 1853 cur->omInUseCount-= deflated_count; 1854 if (ObjectMonitor::Knob_VerifyInUse) { 1855 verifyInUse(cur); 1856 } 1857 nScavenged += deflated_count; 1858 nInuse += cur->omInUseCount; 1859 } 1860 1861 // For moribund threads, scan gOmInUseList 1862 if (gOmInUseList) { 1863 nInCirculation += gOmInUseCount; 1864 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1865 gOmInUseCount-= deflated_count; 1866 nScavenged += deflated_count; 1867 nInuse += gOmInUseCount; 1868 } 1869 1870 } else { 1871 PaddedEnd<ObjectMonitor> * block = 1872 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 1873 for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) { 1874 // Iterate over all extant monitors - Scavenge all idle monitors. 1875 assert(block->object() == CHAINMARKER, "must be a block header"); 1876 nInCirculation += _BLOCKSIZE; 1877 for (int i = 1; i < _BLOCKSIZE; i++) { 1878 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1879 oop obj = (oop)mid->object(); 1880 1881 if (obj == NULL) { 1882 // The monitor is not associated with an object. 1883 // The monitor should either be a thread-specific private 1884 // free list or the global free list. 1885 // obj == NULL IMPLIES mid->is_busy() == 0 1886 guarantee(!mid->is_busy(), "invariant"); 1887 continue; 1888 } 1889 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1890 1891 if (deflated) { 1892 mid->FreeNext = NULL; 1893 nScavenged++; 1894 } else { 1895 nInuse++; 1896 } 1897 } 1898 } 1899 } 1900 1901 gMonitorFreeCount += nScavenged; 1902 1903 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1904 1905 if (ObjectMonitor::Knob_Verbose) { 1906 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1907 "ForceMonitorScavenge=%d : pop=%d free=%d", 1908 nInCirculation, nInuse, nScavenged, ForceMonitorScavenge, 1909 gMonitorPopulation, gMonitorFreeCount); 1910 tty->flush(); 1911 } 1912 1913 ForceMonitorScavenge = 0; // Reset 1914 1915 // Move the scavenged monitors back to the global free list. 1916 if (freeHeadp != NULL) { 1917 guarantee(freeTailp != NULL && nScavenged > 0, "invariant"); 1918 assert(freeTailp->FreeNext == NULL, "invariant"); 1919 // constant-time list splice - prepend scavenged segment to gFreeList 1920 freeTailp->FreeNext = gFreeList; 1921 gFreeList = freeHeadp; 1922 } 1923 Thread::muxRelease(&gListLock); 1924 1925 OM_PERFDATA_OP(Deflations, inc(nScavenged)); 1926 OM_PERFDATA_OP(MonExtant, set_value(nInCirculation)); 1927 1928 // TODO: Add objectMonitor leak detection. 1929 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1930 GVars.stwRandom = os::random(); 1931 GVars.stwCycle++; 1932 } 1933 1934 // Monitor cleanup on JavaThread::exit 1935 1936 // Iterate through monitor cache and attempt to release thread's monitors 1937 // Gives up on a particular monitor if an exception occurs, but continues 1938 // the overall iteration, swallowing the exception. 1939 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1940 private: 1941 TRAPS; 1942 1943 public: 1944 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1945 void do_monitor(ObjectMonitor* mid) { 1946 if (mid->owner() == THREAD) { 1947 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1948 ResourceMark rm; 1949 Handle obj(THREAD, (oop) mid->object()); 1950 tty->print("INFO: unexpected locked object:"); 1951 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1952 fatal("exiting JavaThread=" INTPTR_FORMAT 1953 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1954 p2i(THREAD), p2i(mid)); 1955 } 1956 (void)mid->complete_exit(CHECK); 1957 } 1958 } 1959 }; 1960 1961 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1962 // ignored. This is meant to be called during JNI thread detach which assumes 1963 // all remaining monitors are heavyweight. All exceptions are swallowed. 1964 // Scanning the extant monitor list can be time consuming. 1965 // A simple optimization is to add a per-thread flag that indicates a thread 1966 // called jni_monitorenter() during its lifetime. 1967 // 1968 // Instead of No_Savepoint_Verifier it might be cheaper to 1969 // use an idiom of the form: 1970 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1971 // <code that must not run at safepoint> 1972 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1973 // Since the tests are extremely cheap we could leave them enabled 1974 // for normal product builds. 1975 1976 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1977 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1978 NoSafepointVerifier nsv; 1979 ReleaseJavaMonitorsClosure rjmc(THREAD); 1980 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1981 ObjectSynchronizer::monitors_iterate(&rjmc); 1982 Thread::muxRelease(&gListLock); 1983 THREAD->clear_pending_exception(); 1984 } 1985 1986 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1987 switch (cause) { 1988 case inflate_cause_vm_internal: return "VM Internal"; 1989 case inflate_cause_monitor_enter: return "Monitor Enter"; 1990 case inflate_cause_wait: return "Monitor Wait"; 1991 case inflate_cause_notify: return "Monitor Notify"; 1992 case inflate_cause_hash_code: return "Monitor Hash Code"; 1993 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1994 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1995 default: 1996 ShouldNotReachHere(); 1997 } 1998 return "Unknown"; 1999 } 2000 2001 static void post_monitor_inflate_event(EventJavaMonitorInflate& event, 2002 const oop obj, 2003 const ObjectSynchronizer::InflateCause cause) { 2004 #if INCLUDE_TRACE 2005 assert(event.should_commit(), "check outside"); 2006 event.set_monitorClass(obj->klass()); 2007 event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj); 2008 event.set_cause((u1)cause); 2009 event.commit(); 2010 #endif 2011 } 2012 2013 //------------------------------------------------------------------------------ 2014 // Debugging code 2015 2016 void ObjectSynchronizer::sanity_checks(const bool verbose, 2017 const uint cache_line_size, 2018 int *error_cnt_ptr, 2019 int *warning_cnt_ptr) { 2020 u_char *addr_begin = (u_char*)&GVars; 2021 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 2022 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 2023 2024 if (verbose) { 2025 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 2026 sizeof(SharedGlobals)); 2027 } 2028 2029 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 2030 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 2031 2032 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 2033 if (verbose) { 2034 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 2035 } 2036 2037 if (cache_line_size != 0) { 2038 // We were able to determine the L1 data cache line size so 2039 // do some cache line specific sanity checks 2040 2041 if (offset_stwRandom < cache_line_size) { 2042 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 2043 "to the struct beginning than a cache line which permits " 2044 "false sharing."); 2045 (*warning_cnt_ptr)++; 2046 } 2047 2048 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 2049 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 2050 "SharedGlobals.hcSequence fields are closer than a cache " 2051 "line which permits false sharing."); 2052 (*warning_cnt_ptr)++; 2053 } 2054 2055 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 2056 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 2057 "to the struct end than a cache line which permits false " 2058 "sharing."); 2059 (*warning_cnt_ptr)++; 2060 } 2061 } 2062 } 2063 2064 #ifndef PRODUCT 2065 2066 // Check if monitor belongs to the monitor cache 2067 // The list is grow-only so it's *relatively* safe to traverse 2068 // the list of extant blocks without taking a lock. 2069 2070 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 2071 PaddedEnd<ObjectMonitor> * block = 2072 (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList); 2073 while (block != NULL) { 2074 assert(block->object() == CHAINMARKER, "must be a block header"); 2075 if (monitor > (ObjectMonitor *)&block[0] && 2076 monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) { 2077 address mon = (address)monitor; 2078 address blk = (address)block; 2079 size_t diff = mon - blk; 2080 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 2081 return 1; 2082 } 2083 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 2084 } 2085 return 0; 2086 } 2087 2088 #endif