1 /* 2 * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "memory/metaspaceShared.hpp" 29 #include "memory/padded.hpp" 30 #include "memory/resourceArea.hpp" 31 #include "oops/markOop.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/atomic.hpp" 34 #include "runtime/biasedLocking.hpp" 35 #include "runtime/handles.inline.hpp" 36 #include "runtime/interfaceSupport.hpp" 37 #include "runtime/mutexLocker.hpp" 38 #include "runtime/objectMonitor.hpp" 39 #include "runtime/objectMonitor.inline.hpp" 40 #include "runtime/osThread.hpp" 41 #include "runtime/stubRoutines.hpp" 42 #include "runtime/synchronizer.hpp" 43 #include "runtime/thread.inline.hpp" 44 #include "runtime/vframe.hpp" 45 #include "trace/traceMacros.hpp" 46 #include "trace/tracing.hpp" 47 #include "utilities/align.hpp" 48 #include "utilities/dtrace.hpp" 49 #include "utilities/events.hpp" 50 #include "utilities/preserveException.hpp" 51 52 // The "core" versions of monitor enter and exit reside in this file. 53 // The interpreter and compilers contain specialized transliterated 54 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 55 // for instance. If you make changes here, make sure to modify the 56 // interpreter, and both C1 and C2 fast-path inline locking code emission. 57 // 58 // ----------------------------------------------------------------------------- 59 60 #ifdef DTRACE_ENABLED 61 62 // Only bother with this argument setup if dtrace is available 63 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 64 65 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 66 char* bytes = NULL; \ 67 int len = 0; \ 68 jlong jtid = SharedRuntime::get_java_tid(thread); \ 69 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 70 if (klassname != NULL) { \ 71 bytes = (char*)klassname->bytes(); \ 72 len = klassname->utf8_length(); \ 73 } 74 75 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 76 { \ 77 if (DTraceMonitorProbes) { \ 78 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 79 HOTSPOT_MONITOR_WAIT(jtid, \ 80 (uintptr_t)(monitor), bytes, len, (millis)); \ 81 } \ 82 } 83 84 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 85 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 86 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 87 88 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 89 { \ 90 if (DTraceMonitorProbes) { \ 91 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 92 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 93 (uintptr_t)(monitor), bytes, len); \ 94 } \ 95 } 96 97 #else // ndef DTRACE_ENABLED 98 99 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 100 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 101 102 #endif // ndef DTRACE_ENABLED 103 104 // This exists only as a workaround of dtrace bug 6254741 105 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 106 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 107 return 0; 108 } 109 110 #define NINFLATIONLOCKS 256 111 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 112 113 // global list of blocks of monitors 114 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL; 115 // global monitor free list 116 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 117 // global monitor in-use list, for moribund threads, 118 // monitors they inflated need to be scanned for deflation 119 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 120 // count of entries in gOmInUseList 121 int ObjectSynchronizer::gOmInUseCount = 0; 122 123 static volatile intptr_t gListLock = 0; // protects global monitor lists 124 static volatile int gMonitorFreeCount = 0; // # on gFreeList 125 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 126 127 static void post_monitor_inflate_event(EventJavaMonitorInflate&, 128 const oop, 129 const ObjectSynchronizer::InflateCause); 130 131 #define CHECK_THROW_VALUE_TYPE_IMSE(obj) \ 132 if ((obj)->klass_is_value_type()) { \ 133 ResourceMark rm(THREAD); \ 134 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \ 135 } 136 137 #define CHECK_THROW_VALUE_TYPE_IMSE_0(obj) \ 138 if ((obj)->klass_is_value_type()) { \ 139 ResourceMark rm(THREAD); \ 140 THROW_MSG_0(vmSymbols::java_lang_IllegalMonitorStateException(), obj->klass()->external_name()); \ 141 } 142 143 144 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 145 146 147 // =====================> Quick functions 148 149 // The quick_* forms are special fast-path variants used to improve 150 // performance. In the simplest case, a "quick_*" implementation could 151 // simply return false, in which case the caller will perform the necessary 152 // state transitions and call the slow-path form. 153 // The fast-path is designed to handle frequently arising cases in an efficient 154 // manner and is just a degenerate "optimistic" variant of the slow-path. 155 // returns true -- to indicate the call was satisfied. 156 // returns false -- to indicate the call needs the services of the slow-path. 157 // A no-loitering ordinance is in effect for code in the quick_* family 158 // operators: safepoints or indefinite blocking (blocking that might span a 159 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 160 // entry. 161 // 162 // Consider: An interesting optimization is to have the JIT recognize the 163 // following common idiom: 164 // synchronized (someobj) { .... ; notify(); } 165 // That is, we find a notify() or notifyAll() call that immediately precedes 166 // the monitorexit operation. In that case the JIT could fuse the operations 167 // into a single notifyAndExit() runtime primitive. 168 169 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 170 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 171 assert(self->is_Java_thread(), "invariant"); 172 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 173 NoSafepointVerifier nsv; 174 if (obj == NULL) return false; // slow-path for invalid obj 175 assert(!obj->klass_is_value_type(), "monitor op on value type"); 176 const markOop mark = obj->mark(); 177 178 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 179 // Degenerate notify 180 // stack-locked by caller so by definition the implied waitset is empty. 181 return true; 182 } 183 184 if (mark->has_monitor()) { 185 ObjectMonitor * const mon = mark->monitor(); 186 assert(mon->object() == obj, "invariant"); 187 if (mon->owner() != self) return false; // slow-path for IMS exception 188 189 if (mon->first_waiter() != NULL) { 190 // We have one or more waiters. Since this is an inflated monitor 191 // that we own, we can transfer one or more threads from the waitset 192 // to the entrylist here and now, avoiding the slow-path. 193 if (all) { 194 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 195 } else { 196 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 197 } 198 int tally = 0; 199 do { 200 mon->INotify(self); 201 ++tally; 202 } while (mon->first_waiter() != NULL && all); 203 OM_PERFDATA_OP(Notifications, inc(tally)); 204 } 205 return true; 206 } 207 208 // biased locking and any other IMS exception states take the slow-path 209 return false; 210 } 211 212 213 // The LockNode emitted directly at the synchronization site would have 214 // been too big if it were to have included support for the cases of inflated 215 // recursive enter and exit, so they go here instead. 216 // Note that we can't safely call AsyncPrintJavaStack() from within 217 // quick_enter() as our thread state remains _in_Java. 218 219 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 220 BasicLock * lock) { 221 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 222 assert(Self->is_Java_thread(), "invariant"); 223 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 224 NoSafepointVerifier nsv; 225 if (obj == NULL) return false; // Need to throw NPE 226 assert(!obj->klass_is_value_type(), "monitor op on value type"); 227 const markOop mark = obj->mark(); 228 229 if (mark->has_monitor()) { 230 ObjectMonitor * const m = mark->monitor(); 231 assert(m->object() == obj, "invariant"); 232 Thread * const owner = (Thread *) m->_owner; 233 234 // Lock contention and Transactional Lock Elision (TLE) diagnostics 235 // and observability 236 // Case: light contention possibly amenable to TLE 237 // Case: TLE inimical operations such as nested/recursive synchronization 238 239 if (owner == Self) { 240 m->_recursions++; 241 return true; 242 } 243 244 // This Java Monitor is inflated so obj's header will never be 245 // displaced to this thread's BasicLock. Make the displaced header 246 // non-NULL so this BasicLock is not seen as recursive nor as 247 // being locked. We do this unconditionally so that this thread's 248 // BasicLock cannot be mis-interpreted by any stack walkers. For 249 // performance reasons, stack walkers generally first check for 250 // Biased Locking in the object's header, the second check is for 251 // stack-locking in the object's header, the third check is for 252 // recursive stack-locking in the displaced header in the BasicLock, 253 // and last are the inflated Java Monitor (ObjectMonitor) checks. 254 lock->set_displaced_header(markOopDesc::unused_mark()); 255 256 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { 257 assert(m->_recursions == 0, "invariant"); 258 assert(m->_owner == Self, "invariant"); 259 return true; 260 } 261 } 262 263 // Note that we could inflate in quick_enter. 264 // This is likely a useful optimization 265 // Critically, in quick_enter() we must not: 266 // -- perform bias revocation, or 267 // -- block indefinitely, or 268 // -- reach a safepoint 269 270 return false; // revert to slow-path 271 } 272 273 // ----------------------------------------------------------------------------- 274 // Fast Monitor Enter/Exit 275 // This the fast monitor enter. The interpreter and compiler use 276 // some assembly copies of this code. Make sure update those code 277 // if the following function is changed. The implementation is 278 // extremely sensitive to race condition. Be careful. 279 280 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 281 bool attempt_rebias, TRAPS) { 282 assert(!obj->klass_is_value_type(), "monitor op on value type"); 283 if (UseBiasedLocking) { 284 if (!SafepointSynchronize::is_at_safepoint()) { 285 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 286 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 287 return; 288 } 289 } else { 290 assert(!attempt_rebias, "can not rebias toward VM thread"); 291 BiasedLocking::revoke_at_safepoint(obj); 292 } 293 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 294 } 295 296 slow_enter(obj, lock, THREAD); 297 } 298 299 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 300 markOop mark = object->mark(); 301 assert(!object->klass_is_value_type(), "monitor op on value type"); 302 // We cannot check for Biased Locking if we are racing an inflation. 303 assert(mark == markOopDesc::INFLATING() || 304 !mark->has_bias_pattern(), "should not see bias pattern here"); 305 306 markOop dhw = lock->displaced_header(); 307 if (dhw == NULL) { 308 // If the displaced header is NULL, then this exit matches up with 309 // a recursive enter. No real work to do here except for diagnostics. 310 #ifndef PRODUCT 311 if (mark != markOopDesc::INFLATING()) { 312 // Only do diagnostics if we are not racing an inflation. Simply 313 // exiting a recursive enter of a Java Monitor that is being 314 // inflated is safe; see the has_monitor() comment below. 315 assert(!mark->is_neutral(), "invariant"); 316 assert(!mark->has_locker() || 317 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 318 if (mark->has_monitor()) { 319 // The BasicLock's displaced_header is marked as a recursive 320 // enter and we have an inflated Java Monitor (ObjectMonitor). 321 // This is a special case where the Java Monitor was inflated 322 // after this thread entered the stack-lock recursively. When a 323 // Java Monitor is inflated, we cannot safely walk the Java 324 // Monitor owner's stack and update the BasicLocks because a 325 // Java Monitor can be asynchronously inflated by a thread that 326 // does not own the Java Monitor. 327 ObjectMonitor * m = mark->monitor(); 328 assert(((oop)(m->object()))->mark() == mark, "invariant"); 329 assert(m->is_entered(THREAD), "invariant"); 330 } 331 } 332 #endif 333 return; 334 } 335 336 if (mark == (markOop) lock) { 337 // If the object is stack-locked by the current thread, try to 338 // swing the displaced header from the BasicLock back to the mark. 339 assert(dhw->is_neutral(), "invariant"); 340 if (object->cas_set_mark(dhw, mark) == mark) { 341 TEVENT(fast_exit: release stack-lock); 342 return; 343 } 344 } 345 346 // We have to take the slow-path of possible inflation and then exit. 347 ObjectSynchronizer::inflate(THREAD, 348 object, 349 inflate_cause_vm_internal)->exit(true, THREAD); 350 } 351 352 // ----------------------------------------------------------------------------- 353 // Interpreter/Compiler Slow Case 354 // This routine is used to handle interpreter/compiler slow case 355 // We don't need to use fast path here, because it must have been 356 // failed in the interpreter/compiler code. 357 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 358 CHECK_THROW_VALUE_TYPE_IMSE(obj); 359 markOop mark = obj->mark(); 360 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 361 362 if (mark->is_neutral()) { 363 // Anticipate successful CAS -- the ST of the displaced mark must 364 // be visible <= the ST performed by the CAS. 365 lock->set_displaced_header(mark); 366 if (mark == obj()->cas_set_mark((markOop) lock, mark)) { 367 TEVENT(slow_enter: release stacklock); 368 return; 369 } 370 // Fall through to inflate() ... 371 } else if (mark->has_locker() && 372 THREAD->is_lock_owned((address)mark->locker())) { 373 assert(lock != mark->locker(), "must not re-lock the same lock"); 374 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 375 lock->set_displaced_header(NULL); 376 return; 377 } 378 379 // The object header will never be displaced to this lock, 380 // so it does not matter what the value is, except that it 381 // must be non-zero to avoid looking like a re-entrant lock, 382 // and must not look locked either. 383 lock->set_displaced_header(markOopDesc::unused_mark()); 384 ObjectSynchronizer::inflate(THREAD, 385 obj(), 386 inflate_cause_monitor_enter)->enter(THREAD); 387 } 388 389 // This routine is used to handle interpreter/compiler slow case 390 // We don't need to use fast path here, because it must have 391 // failed in the interpreter/compiler code. Simply use the heavy 392 // weight monitor should be ok, unless someone find otherwise. 393 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 394 fast_exit(object, lock, THREAD); 395 } 396 397 // ----------------------------------------------------------------------------- 398 // Class Loader support to workaround deadlocks on the class loader lock objects 399 // Also used by GC 400 // complete_exit()/reenter() are used to wait on a nested lock 401 // i.e. to give up an outer lock completely and then re-enter 402 // Used when holding nested locks - lock acquisition order: lock1 then lock2 403 // 1) complete_exit lock1 - saving recursion count 404 // 2) wait on lock2 405 // 3) when notified on lock2, unlock lock2 406 // 4) reenter lock1 with original recursion count 407 // 5) lock lock2 408 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 409 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 410 TEVENT(complete_exit); 411 assert(!obj->klass_is_value_type(), "monitor op on value type"); 412 if (UseBiasedLocking) { 413 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 414 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 415 } 416 417 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 418 obj(), 419 inflate_cause_vm_internal); 420 421 return monitor->complete_exit(THREAD); 422 } 423 424 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 425 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 426 TEVENT(reenter); 427 assert(!obj->klass_is_value_type(), "monitor op on value type"); 428 if (UseBiasedLocking) { 429 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 430 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 431 } 432 433 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 434 obj(), 435 inflate_cause_vm_internal); 436 437 monitor->reenter(recursion, THREAD); 438 } 439 // ----------------------------------------------------------------------------- 440 // JNI locks on java objects 441 // NOTE: must use heavy weight monitor to handle jni monitor enter 442 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 443 // the current locking is from JNI instead of Java code 444 TEVENT(jni_enter); 445 CHECK_THROW_VALUE_TYPE_IMSE(obj); 446 if (UseBiasedLocking) { 447 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 448 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 449 } 450 THREAD->set_current_pending_monitor_is_from_java(false); 451 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 452 THREAD->set_current_pending_monitor_is_from_java(true); 453 } 454 455 // NOTE: must use heavy weight monitor to handle jni monitor exit 456 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 457 TEVENT(jni_exit); 458 CHECK_THROW_VALUE_TYPE_IMSE(obj); 459 if (UseBiasedLocking) { 460 Handle h_obj(THREAD, obj); 461 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 462 obj = h_obj(); 463 } 464 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 465 466 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 467 obj, 468 inflate_cause_jni_exit); 469 // If this thread has locked the object, exit the monitor. Note: can't use 470 // monitor->check(CHECK); must exit even if an exception is pending. 471 if (monitor->check(THREAD)) { 472 monitor->exit(true, THREAD); 473 } 474 } 475 476 // ----------------------------------------------------------------------------- 477 // Internal VM locks on java objects 478 // standard constructor, allows locking failures 479 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 480 _dolock = doLock; 481 _thread = thread; 482 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 483 _obj = obj; 484 485 if (_dolock) { 486 TEVENT(ObjectLocker); 487 488 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 489 } 490 } 491 492 ObjectLocker::~ObjectLocker() { 493 if (_dolock) { 494 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 495 } 496 } 497 498 499 // ----------------------------------------------------------------------------- 500 // Wait/Notify/NotifyAll 501 // NOTE: must use heavy weight monitor to handle wait() 502 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 503 CHECK_THROW_VALUE_TYPE_IMSE_0(obj); 504 if (UseBiasedLocking) { 505 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 506 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 507 } 508 if (millis < 0) { 509 TEVENT(wait - throw IAX); 510 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 511 } 512 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 513 obj(), 514 inflate_cause_wait); 515 516 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 517 monitor->wait(millis, true, THREAD); 518 519 // This dummy call is in place to get around dtrace bug 6254741. Once 520 // that's fixed we can uncomment the following line, remove the call 521 // and change this function back into a "void" func. 522 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 523 return dtrace_waited_probe(monitor, obj, THREAD); 524 } 525 526 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 527 CHECK_THROW_VALUE_TYPE_IMSE(obj); 528 if (UseBiasedLocking) { 529 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 530 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 531 } 532 if (millis < 0) { 533 TEVENT(wait - throw IAX); 534 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 535 } 536 ObjectSynchronizer::inflate(THREAD, 537 obj(), 538 inflate_cause_wait)->wait(millis, false, THREAD); 539 } 540 541 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 542 CHECK_THROW_VALUE_TYPE_IMSE(obj); 543 if (UseBiasedLocking) { 544 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 545 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 546 } 547 548 markOop mark = obj->mark(); 549 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 550 return; 551 } 552 ObjectSynchronizer::inflate(THREAD, 553 obj(), 554 inflate_cause_notify)->notify(THREAD); 555 } 556 557 // NOTE: see comment of notify() 558 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 559 CHECK_THROW_VALUE_TYPE_IMSE(obj); 560 if (UseBiasedLocking) { 561 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 562 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 563 } 564 565 markOop mark = obj->mark(); 566 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 567 return; 568 } 569 ObjectSynchronizer::inflate(THREAD, 570 obj(), 571 inflate_cause_notify)->notifyAll(THREAD); 572 } 573 574 // ----------------------------------------------------------------------------- 575 // Hash Code handling 576 // 577 // Performance concern: 578 // OrderAccess::storestore() calls release() which at one time stored 0 579 // into the global volatile OrderAccess::dummy variable. This store was 580 // unnecessary for correctness. Many threads storing into a common location 581 // causes considerable cache migration or "sloshing" on large SMP systems. 582 // As such, I avoided using OrderAccess::storestore(). In some cases 583 // OrderAccess::fence() -- which incurs local latency on the executing 584 // processor -- is a better choice as it scales on SMP systems. 585 // 586 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 587 // a discussion of coherency costs. Note that all our current reference 588 // platforms provide strong ST-ST order, so the issue is moot on IA32, 589 // x64, and SPARC. 590 // 591 // As a general policy we use "volatile" to control compiler-based reordering 592 // and explicit fences (barriers) to control for architectural reordering 593 // performed by the CPU(s) or platform. 594 595 struct SharedGlobals { 596 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 597 // These are highly shared mostly-read variables. 598 // To avoid false-sharing they need to be the sole occupants of a cache line. 599 volatile int stwRandom; 600 volatile int stwCycle; 601 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 602 // Hot RW variable -- Sequester to avoid false-sharing 603 volatile int hcSequence; 604 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 605 }; 606 607 static SharedGlobals GVars; 608 static int MonitorScavengeThreshold = 1000000; 609 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 610 611 static markOop ReadStableMark(oop obj) { 612 markOop mark = obj->mark(); 613 if (!mark->is_being_inflated()) { 614 return mark; // normal fast-path return 615 } 616 617 int its = 0; 618 for (;;) { 619 markOop mark = obj->mark(); 620 if (!mark->is_being_inflated()) { 621 return mark; // normal fast-path return 622 } 623 624 // The object is being inflated by some other thread. 625 // The caller of ReadStableMark() must wait for inflation to complete. 626 // Avoid live-lock 627 // TODO: consider calling SafepointSynchronize::do_call_back() while 628 // spinning to see if there's a safepoint pending. If so, immediately 629 // yielding or blocking would be appropriate. Avoid spinning while 630 // there is a safepoint pending. 631 // TODO: add inflation contention performance counters. 632 // TODO: restrict the aggregate number of spinners. 633 634 ++its; 635 if (its > 10000 || !os::is_MP()) { 636 if (its & 1) { 637 os::naked_yield(); 638 TEVENT(Inflate: INFLATING - yield); 639 } else { 640 // Note that the following code attenuates the livelock problem but is not 641 // a complete remedy. A more complete solution would require that the inflating 642 // thread hold the associated inflation lock. The following code simply restricts 643 // the number of spinners to at most one. We'll have N-2 threads blocked 644 // on the inflationlock, 1 thread holding the inflation lock and using 645 // a yield/park strategy, and 1 thread in the midst of inflation. 646 // A more refined approach would be to change the encoding of INFLATING 647 // to allow encapsulation of a native thread pointer. Threads waiting for 648 // inflation to complete would use CAS to push themselves onto a singly linked 649 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 650 // and calling park(). When inflation was complete the thread that accomplished inflation 651 // would detach the list and set the markword to inflated with a single CAS and 652 // then for each thread on the list, set the flag and unpark() the thread. 653 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 654 // wakes at most one thread whereas we need to wake the entire list. 655 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 656 int YieldThenBlock = 0; 657 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 658 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 659 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 660 while (obj->mark() == markOopDesc::INFLATING()) { 661 // Beware: NakedYield() is advisory and has almost no effect on some platforms 662 // so we periodically call Self->_ParkEvent->park(1). 663 // We use a mixed spin/yield/block mechanism. 664 if ((YieldThenBlock++) >= 16) { 665 Thread::current()->_ParkEvent->park(1); 666 } else { 667 os::naked_yield(); 668 } 669 } 670 Thread::muxRelease(gInflationLocks + ix); 671 TEVENT(Inflate: INFLATING - yield/park); 672 } 673 } else { 674 SpinPause(); // SMP-polite spinning 675 } 676 } 677 } 678 679 // hashCode() generation : 680 // 681 // Possibilities: 682 // * MD5Digest of {obj,stwRandom} 683 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 684 // * A DES- or AES-style SBox[] mechanism 685 // * One of the Phi-based schemes, such as: 686 // 2654435761 = 2^32 * Phi (golden ratio) 687 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 688 // * A variation of Marsaglia's shift-xor RNG scheme. 689 // * (obj ^ stwRandom) is appealing, but can result 690 // in undesirable regularity in the hashCode values of adjacent objects 691 // (objects allocated back-to-back, in particular). This could potentially 692 // result in hashtable collisions and reduced hashtable efficiency. 693 // There are simple ways to "diffuse" the middle address bits over the 694 // generated hashCode values: 695 696 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 697 intptr_t value = 0; 698 if (hashCode == 0) { 699 // This form uses global Park-Miller RNG. 700 // On MP system we'll have lots of RW access to a global, so the 701 // mechanism induces lots of coherency traffic. 702 value = os::random(); 703 } else if (hashCode == 1) { 704 // This variation has the property of being stable (idempotent) 705 // between STW operations. This can be useful in some of the 1-0 706 // synchronization schemes. 707 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 708 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 709 } else if (hashCode == 2) { 710 value = 1; // for sensitivity testing 711 } else if (hashCode == 3) { 712 value = ++GVars.hcSequence; 713 } else if (hashCode == 4) { 714 value = cast_from_oop<intptr_t>(obj); 715 } else { 716 // Marsaglia's xor-shift scheme with thread-specific state 717 // This is probably the best overall implementation -- we'll 718 // likely make this the default in future releases. 719 unsigned t = Self->_hashStateX; 720 t ^= (t << 11); 721 Self->_hashStateX = Self->_hashStateY; 722 Self->_hashStateY = Self->_hashStateZ; 723 Self->_hashStateZ = Self->_hashStateW; 724 unsigned v = Self->_hashStateW; 725 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 726 Self->_hashStateW = v; 727 value = v; 728 } 729 730 value &= markOopDesc::hash_mask; 731 if (value == 0) value = 0xBAD; 732 assert(value != markOopDesc::no_hash, "invariant"); 733 TEVENT(hashCode: GENERATE); 734 return value; 735 } 736 737 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 738 if (EnableValhalla && obj->klass_is_value_type()) { 739 // Expected tooling to override hashCode for value type, just don't crash 740 if (log_is_enabled(Debug, monitorinflation)) { 741 ResourceMark rm; 742 log_debug(monitorinflation)("FastHashCode for value type: %s", obj->klass()->external_name()); 743 } 744 return 0; 745 } 746 if (UseBiasedLocking) { 747 // NOTE: many places throughout the JVM do not expect a safepoint 748 // to be taken here, in particular most operations on perm gen 749 // objects. However, we only ever bias Java instances and all of 750 // the call sites of identity_hash that might revoke biases have 751 // been checked to make sure they can handle a safepoint. The 752 // added check of the bias pattern is to avoid useless calls to 753 // thread-local storage. 754 if (obj->mark()->has_bias_pattern()) { 755 // Handle for oop obj in case of STW safepoint 756 Handle hobj(Self, obj); 757 // Relaxing assertion for bug 6320749. 758 assert(Universe::verify_in_progress() || 759 !SafepointSynchronize::is_at_safepoint(), 760 "biases should not be seen by VM thread here"); 761 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 762 obj = hobj(); 763 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 764 } 765 } 766 767 // hashCode() is a heap mutator ... 768 // Relaxing assertion for bug 6320749. 769 assert(Universe::verify_in_progress() || DumpSharedSpaces || 770 !SafepointSynchronize::is_at_safepoint(), "invariant"); 771 assert(Universe::verify_in_progress() || DumpSharedSpaces || 772 Self->is_Java_thread() , "invariant"); 773 assert(Universe::verify_in_progress() || DumpSharedSpaces || 774 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 775 776 ObjectMonitor* monitor = NULL; 777 markOop temp, test; 778 intptr_t hash; 779 markOop mark = ReadStableMark(obj); 780 781 // object should remain ineligible for biased locking 782 assert(!mark->has_bias_pattern(), "invariant"); 783 784 if (mark->is_neutral()) { 785 hash = mark->hash(); // this is a normal header 786 if (hash) { // if it has hash, just return it 787 return hash; 788 } 789 hash = get_next_hash(Self, obj); // allocate a new hash code 790 temp = mark->copy_set_hash(hash); // merge the hash code into header 791 // use (machine word version) atomic operation to install the hash 792 test = obj->cas_set_mark(temp, mark); 793 if (test == mark) { 794 return hash; 795 } 796 // If atomic operation failed, we must inflate the header 797 // into heavy weight monitor. We could add more code here 798 // for fast path, but it does not worth the complexity. 799 } else if (mark->has_monitor()) { 800 monitor = mark->monitor(); 801 temp = monitor->header(); 802 assert(temp->is_neutral(), "invariant"); 803 hash = temp->hash(); 804 if (hash) { 805 return hash; 806 } 807 // Skip to the following code to reduce code size 808 } else if (Self->is_lock_owned((address)mark->locker())) { 809 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 810 assert(temp->is_neutral(), "invariant"); 811 hash = temp->hash(); // by current thread, check if the displaced 812 if (hash) { // header contains hash code 813 return hash; 814 } 815 // WARNING: 816 // The displaced header is strictly immutable. 817 // It can NOT be changed in ANY cases. So we have 818 // to inflate the header into heavyweight monitor 819 // even the current thread owns the lock. The reason 820 // is the BasicLock (stack slot) will be asynchronously 821 // read by other threads during the inflate() function. 822 // Any change to stack may not propagate to other threads 823 // correctly. 824 } 825 826 // Inflate the monitor to set hash code 827 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); 828 // Load displaced header and check it has hash code 829 mark = monitor->header(); 830 assert(mark->is_neutral(), "invariant"); 831 hash = mark->hash(); 832 if (hash == 0) { 833 hash = get_next_hash(Self, obj); 834 temp = mark->copy_set_hash(hash); // merge hash code into header 835 assert(temp->is_neutral(), "invariant"); 836 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); 837 if (test != mark) { 838 // The only update to the header in the monitor (outside GC) 839 // is install the hash code. If someone add new usage of 840 // displaced header, please update this code 841 hash = test->hash(); 842 assert(test->is_neutral(), "invariant"); 843 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 844 } 845 } 846 // We finally get the hash 847 return hash; 848 } 849 850 851 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 852 Handle h_obj) { 853 if (UseBiasedLocking) { 854 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 855 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 856 } 857 858 assert(thread == JavaThread::current(), "Can only be called on current thread"); 859 oop obj = h_obj(); 860 861 markOop mark = ReadStableMark(obj); 862 863 // Uncontended case, header points to stack 864 if (mark->has_locker()) { 865 return thread->is_lock_owned((address)mark->locker()); 866 } 867 // Contended case, header points to ObjectMonitor (tagged pointer) 868 if (mark->has_monitor()) { 869 ObjectMonitor* monitor = mark->monitor(); 870 return monitor->is_entered(thread) != 0; 871 } 872 // Unlocked case, header in place 873 assert(mark->is_neutral(), "sanity check"); 874 return false; 875 } 876 877 // Be aware of this method could revoke bias of the lock object. 878 // This method queries the ownership of the lock handle specified by 'h_obj'. 879 // If the current thread owns the lock, it returns owner_self. If no 880 // thread owns the lock, it returns owner_none. Otherwise, it will return 881 // owner_other. 882 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 883 (JavaThread *self, Handle h_obj) { 884 // The caller must beware this method can revoke bias, and 885 // revocation can result in a safepoint. 886 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 887 assert(self->thread_state() != _thread_blocked, "invariant"); 888 889 // Possible mark states: neutral, biased, stack-locked, inflated 890 891 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 892 // CASE: biased 893 BiasedLocking::revoke_and_rebias(h_obj, false, self); 894 assert(!h_obj->mark()->has_bias_pattern(), 895 "biases should be revoked by now"); 896 } 897 898 assert(self == JavaThread::current(), "Can only be called on current thread"); 899 oop obj = h_obj(); 900 markOop mark = ReadStableMark(obj); 901 902 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 903 if (mark->has_locker()) { 904 return self->is_lock_owned((address)mark->locker()) ? 905 owner_self : owner_other; 906 } 907 908 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 909 // The Object:ObjectMonitor relationship is stable as long as we're 910 // not at a safepoint. 911 if (mark->has_monitor()) { 912 void * owner = mark->monitor()->_owner; 913 if (owner == NULL) return owner_none; 914 return (owner == self || 915 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 916 } 917 918 // CASE: neutral 919 assert(mark->is_neutral(), "sanity check"); 920 return owner_none; // it's unlocked 921 } 922 923 // FIXME: jvmti should call this 924 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 925 if (UseBiasedLocking) { 926 if (SafepointSynchronize::is_at_safepoint()) { 927 BiasedLocking::revoke_at_safepoint(h_obj); 928 } else { 929 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 930 } 931 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 932 } 933 934 oop obj = h_obj(); 935 address owner = NULL; 936 937 markOop mark = ReadStableMark(obj); 938 939 // Uncontended case, header points to stack 940 if (mark->has_locker()) { 941 owner = (address) mark->locker(); 942 } 943 944 // Contended case, header points to ObjectMonitor (tagged pointer) 945 if (mark->has_monitor()) { 946 ObjectMonitor* monitor = mark->monitor(); 947 assert(monitor != NULL, "monitor should be non-null"); 948 owner = (address) monitor->owner(); 949 } 950 951 if (owner != NULL) { 952 // owning_thread_from_monitor_owner() may also return NULL here 953 return Threads::owning_thread_from_monitor_owner(t_list, owner); 954 } 955 956 // Unlocked case, header in place 957 // Cannot have assertion since this object may have been 958 // locked by another thread when reaching here. 959 // assert(mark->is_neutral(), "sanity check"); 960 961 return NULL; 962 } 963 964 // Visitors ... 965 966 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 967 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 968 while (block != NULL) { 969 assert(block->object() == CHAINMARKER, "must be a block header"); 970 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 971 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 972 oop object = (oop)mid->object(); 973 if (object != NULL) { 974 closure->do_monitor(mid); 975 } 976 } 977 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 978 } 979 } 980 981 // Get the next block in the block list. 982 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) { 983 assert(block->object() == CHAINMARKER, "must be a block header"); 984 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext; 985 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 986 return block; 987 } 988 989 static bool monitors_used_above_threshold() { 990 if (gMonitorPopulation == 0) { 991 return false; 992 } 993 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 994 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; 995 return monitor_usage > MonitorUsedDeflationThreshold; 996 } 997 998 bool ObjectSynchronizer::is_cleanup_needed() { 999 if (MonitorUsedDeflationThreshold > 0) { 1000 return monitors_used_above_threshold(); 1001 } 1002 return false; 1003 } 1004 1005 void ObjectSynchronizer::oops_do(OopClosure* f) { 1006 if (MonitorInUseLists) { 1007 // When using thread local monitor lists, we only scan the 1008 // global used list here (for moribund threads), and 1009 // the thread-local monitors in Thread::oops_do(). 1010 global_used_oops_do(f); 1011 } else { 1012 global_oops_do(f); 1013 } 1014 } 1015 1016 void ObjectSynchronizer::global_oops_do(OopClosure* f) { 1017 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1018 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 1019 for (; block != NULL; block = next(block)) { 1020 assert(block->object() == CHAINMARKER, "must be a block header"); 1021 for (int i = 1; i < _BLOCKSIZE; i++) { 1022 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 1023 if (mid->object() != NULL) { 1024 f->do_oop((oop*)mid->object_addr()); 1025 } 1026 } 1027 } 1028 } 1029 1030 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1031 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1032 list_oops_do(gOmInUseList, f); 1033 } 1034 1035 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1036 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1037 list_oops_do(thread->omInUseList, f); 1038 } 1039 1040 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1041 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1042 ObjectMonitor* mid; 1043 for (mid = list; mid != NULL; mid = mid->FreeNext) { 1044 if (mid->object() != NULL) { 1045 f->do_oop((oop*)mid->object_addr()); 1046 } 1047 } 1048 } 1049 1050 1051 // ----------------------------------------------------------------------------- 1052 // ObjectMonitor Lifecycle 1053 // ----------------------- 1054 // Inflation unlinks monitors from the global gFreeList and 1055 // associates them with objects. Deflation -- which occurs at 1056 // STW-time -- disassociates idle monitors from objects. Such 1057 // scavenged monitors are returned to the gFreeList. 1058 // 1059 // The global list is protected by gListLock. All the critical sections 1060 // are short and operate in constant-time. 1061 // 1062 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1063 // 1064 // Lifecycle: 1065 // -- unassigned and on the global free list 1066 // -- unassigned and on a thread's private omFreeList 1067 // -- assigned to an object. The object is inflated and the mark refers 1068 // to the objectmonitor. 1069 1070 1071 // Constraining monitor pool growth via MonitorBound ... 1072 // 1073 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1074 // the rate of scavenging is driven primarily by GC. As such, we can find 1075 // an inordinate number of monitors in circulation. 1076 // To avoid that scenario we can artificially induce a STW safepoint 1077 // if the pool appears to be growing past some reasonable bound. 1078 // Generally we favor time in space-time tradeoffs, but as there's no 1079 // natural back-pressure on the # of extant monitors we need to impose some 1080 // type of limit. Beware that if MonitorBound is set to too low a value 1081 // we could just loop. In addition, if MonitorBound is set to a low value 1082 // we'll incur more safepoints, which are harmful to performance. 1083 // See also: GuaranteedSafepointInterval 1084 // 1085 // The current implementation uses asynchronous VM operations. 1086 1087 static void InduceScavenge(Thread * Self, const char * Whence) { 1088 // Induce STW safepoint to trim monitors 1089 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1090 // More precisely, trigger an asynchronous STW safepoint as the number 1091 // of active monitors passes the specified threshold. 1092 // TODO: assert thread state is reasonable 1093 1094 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1095 if (ObjectMonitor::Knob_Verbose) { 1096 tty->print_cr("INFO: Monitor scavenge - Induced STW @%s (%d)", 1097 Whence, ForceMonitorScavenge) ; 1098 tty->flush(); 1099 } 1100 // Induce a 'null' safepoint to scavenge monitors 1101 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1102 // to the VMthread and have a lifespan longer than that of this activation record. 1103 // The VMThread will delete the op when completed. 1104 VMThread::execute(new VM_ScavengeMonitors()); 1105 1106 if (ObjectMonitor::Knob_Verbose) { 1107 tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)", 1108 Whence, ForceMonitorScavenge) ; 1109 tty->flush(); 1110 } 1111 } 1112 } 1113 1114 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1115 ObjectMonitor* mid; 1116 int in_use_tally = 0; 1117 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1118 in_use_tally++; 1119 } 1120 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1121 1122 int free_tally = 0; 1123 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1124 free_tally++; 1125 } 1126 assert(free_tally == Self->omFreeCount, "free count off"); 1127 } 1128 1129 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1130 // A large MAXPRIVATE value reduces both list lock contention 1131 // and list coherency traffic, but also tends to increase the 1132 // number of objectMonitors in circulation as well as the STW 1133 // scavenge costs. As usual, we lean toward time in space-time 1134 // tradeoffs. 1135 const int MAXPRIVATE = 1024; 1136 for (;;) { 1137 ObjectMonitor * m; 1138 1139 // 1: try to allocate from the thread's local omFreeList. 1140 // Threads will attempt to allocate first from their local list, then 1141 // from the global list, and only after those attempts fail will the thread 1142 // attempt to instantiate new monitors. Thread-local free lists take 1143 // heat off the gListLock and improve allocation latency, as well as reducing 1144 // coherency traffic on the shared global list. 1145 m = Self->omFreeList; 1146 if (m != NULL) { 1147 Self->omFreeList = m->FreeNext; 1148 Self->omFreeCount--; 1149 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1150 guarantee(m->object() == NULL, "invariant"); 1151 if (MonitorInUseLists) { 1152 m->FreeNext = Self->omInUseList; 1153 Self->omInUseList = m; 1154 Self->omInUseCount++; 1155 if (ObjectMonitor::Knob_VerifyInUse) { 1156 verifyInUse(Self); 1157 } 1158 } else { 1159 m->FreeNext = NULL; 1160 } 1161 return m; 1162 } 1163 1164 // 2: try to allocate from the global gFreeList 1165 // CONSIDER: use muxTry() instead of muxAcquire(). 1166 // If the muxTry() fails then drop immediately into case 3. 1167 // If we're using thread-local free lists then try 1168 // to reprovision the caller's free list. 1169 if (gFreeList != NULL) { 1170 // Reprovision the thread's omFreeList. 1171 // Use bulk transfers to reduce the allocation rate and heat 1172 // on various locks. 1173 Thread::muxAcquire(&gListLock, "omAlloc"); 1174 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1175 gMonitorFreeCount--; 1176 ObjectMonitor * take = gFreeList; 1177 gFreeList = take->FreeNext; 1178 guarantee(take->object() == NULL, "invariant"); 1179 guarantee(!take->is_busy(), "invariant"); 1180 take->Recycle(); 1181 omRelease(Self, take, false); 1182 } 1183 Thread::muxRelease(&gListLock); 1184 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1185 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1186 TEVENT(omFirst - reprovision); 1187 1188 const int mx = MonitorBound; 1189 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1190 // We can't safely induce a STW safepoint from omAlloc() as our thread 1191 // state may not be appropriate for such activities and callers may hold 1192 // naked oops, so instead we defer the action. 1193 InduceScavenge(Self, "omAlloc"); 1194 } 1195 continue; 1196 } 1197 1198 // 3: allocate a block of new ObjectMonitors 1199 // Both the local and global free lists are empty -- resort to malloc(). 1200 // In the current implementation objectMonitors are TSM - immortal. 1201 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1202 // each ObjectMonitor to start at the beginning of a cache line, 1203 // so we use align_up(). 1204 // A better solution would be to use C++ placement-new. 1205 // BEWARE: As it stands currently, we don't run the ctors! 1206 assert(_BLOCKSIZE > 1, "invariant"); 1207 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1208 PaddedEnd<ObjectMonitor> * temp; 1209 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1210 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1211 mtInternal); 1212 temp = (PaddedEnd<ObjectMonitor> *) 1213 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); 1214 1215 // NOTE: (almost) no way to recover if allocation failed. 1216 // We might be able to induce a STW safepoint and scavenge enough 1217 // objectMonitors to permit progress. 1218 if (temp == NULL) { 1219 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1220 "Allocate ObjectMonitors"); 1221 } 1222 (void)memset((void *) temp, 0, neededsize); 1223 1224 // Format the block. 1225 // initialize the linked list, each monitor points to its next 1226 // forming the single linked free list, the very first monitor 1227 // will points to next block, which forms the block list. 1228 // The trick of using the 1st element in the block as gBlockList 1229 // linkage should be reconsidered. A better implementation would 1230 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1231 1232 for (int i = 1; i < _BLOCKSIZE; i++) { 1233 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1234 } 1235 1236 // terminate the last monitor as the end of list 1237 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1238 1239 // Element [0] is reserved for global list linkage 1240 temp[0].set_object(CHAINMARKER); 1241 1242 // Consider carving out this thread's current request from the 1243 // block in hand. This avoids some lock traffic and redundant 1244 // list activity. 1245 1246 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1247 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1248 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1249 gMonitorPopulation += _BLOCKSIZE-1; 1250 gMonitorFreeCount += _BLOCKSIZE-1; 1251 1252 // Add the new block to the list of extant blocks (gBlockList). 1253 // The very first objectMonitor in a block is reserved and dedicated. 1254 // It serves as blocklist "next" linkage. 1255 temp[0].FreeNext = gBlockList; 1256 // There are lock-free uses of gBlockList so make sure that 1257 // the previous stores happen before we update gBlockList. 1258 OrderAccess::release_store(&gBlockList, temp); 1259 1260 // Add the new string of objectMonitors to the global free list 1261 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1262 gFreeList = temp + 1; 1263 Thread::muxRelease(&gListLock); 1264 TEVENT(Allocate block of monitors); 1265 } 1266 } 1267 1268 // Place "m" on the caller's private per-thread omFreeList. 1269 // In practice there's no need to clamp or limit the number of 1270 // monitors on a thread's omFreeList as the only time we'll call 1271 // omRelease is to return a monitor to the free list after a CAS 1272 // attempt failed. This doesn't allow unbounded #s of monitors to 1273 // accumulate on a thread's free list. 1274 // 1275 // Key constraint: all ObjectMonitors on a thread's free list and the global 1276 // free list must have their object field set to null. This prevents the 1277 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1278 1279 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1280 bool fromPerThreadAlloc) { 1281 guarantee(m->object() == NULL, "invariant"); 1282 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1283 // Remove from omInUseList 1284 if (MonitorInUseLists && fromPerThreadAlloc) { 1285 ObjectMonitor* cur_mid_in_use = NULL; 1286 bool extracted = false; 1287 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1288 if (m == mid) { 1289 // extract from per-thread in-use list 1290 if (mid == Self->omInUseList) { 1291 Self->omInUseList = mid->FreeNext; 1292 } else if (cur_mid_in_use != NULL) { 1293 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1294 } 1295 extracted = true; 1296 Self->omInUseCount--; 1297 if (ObjectMonitor::Knob_VerifyInUse) { 1298 verifyInUse(Self); 1299 } 1300 break; 1301 } 1302 } 1303 assert(extracted, "Should have extracted from in-use list"); 1304 } 1305 1306 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1307 m->FreeNext = Self->omFreeList; 1308 Self->omFreeList = m; 1309 Self->omFreeCount++; 1310 } 1311 1312 // Return the monitors of a moribund thread's local free list to 1313 // the global free list. Typically a thread calls omFlush() when 1314 // it's dying. We could also consider having the VM thread steal 1315 // monitors from threads that have not run java code over a few 1316 // consecutive STW safepoints. Relatedly, we might decay 1317 // omFreeProvision at STW safepoints. 1318 // 1319 // Also return the monitors of a moribund thread's omInUseList to 1320 // a global gOmInUseList under the global list lock so these 1321 // will continue to be scanned. 1322 // 1323 // We currently call omFlush() from Threads::remove() _before the thread 1324 // has been excised from the thread list and is no longer a mutator. 1325 // This means that omFlush() can not run concurrently with a safepoint and 1326 // interleave with the scavenge operator. In particular, this ensures that 1327 // the thread's monitors are scanned by a GC safepoint, either via 1328 // Thread::oops_do() (if safepoint happens before omFlush()) or via 1329 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1330 // monitors have been transferred to the global in-use list). 1331 1332 void ObjectSynchronizer::omFlush(Thread * Self) { 1333 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1334 Self->omFreeList = NULL; 1335 ObjectMonitor * tail = NULL; 1336 int tally = 0; 1337 if (list != NULL) { 1338 ObjectMonitor * s; 1339 // The thread is going away, the per-thread free monitors 1340 // are freed via set_owner(NULL) 1341 // Link them to tail, which will be linked into the global free list 1342 // gFreeList below, under the gListLock 1343 for (s = list; s != NULL; s = s->FreeNext) { 1344 tally++; 1345 tail = s; 1346 guarantee(s->object() == NULL, "invariant"); 1347 guarantee(!s->is_busy(), "invariant"); 1348 s->set_owner(NULL); // redundant but good hygiene 1349 TEVENT(omFlush - Move one); 1350 } 1351 guarantee(tail != NULL && list != NULL, "invariant"); 1352 } 1353 1354 ObjectMonitor * inUseList = Self->omInUseList; 1355 ObjectMonitor * inUseTail = NULL; 1356 int inUseTally = 0; 1357 if (inUseList != NULL) { 1358 Self->omInUseList = NULL; 1359 ObjectMonitor *cur_om; 1360 // The thread is going away, however the omInUseList inflated 1361 // monitors may still be in-use by other threads. 1362 // Link them to inUseTail, which will be linked into the global in-use list 1363 // gOmInUseList below, under the gListLock 1364 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1365 inUseTail = cur_om; 1366 inUseTally++; 1367 } 1368 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1369 Self->omInUseCount = 0; 1370 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1371 } 1372 1373 Thread::muxAcquire(&gListLock, "omFlush"); 1374 if (tail != NULL) { 1375 tail->FreeNext = gFreeList; 1376 gFreeList = list; 1377 gMonitorFreeCount += tally; 1378 assert(Self->omFreeCount == tally, "free-count off"); 1379 Self->omFreeCount = 0; 1380 } 1381 1382 if (inUseTail != NULL) { 1383 inUseTail->FreeNext = gOmInUseList; 1384 gOmInUseList = inUseList; 1385 gOmInUseCount += inUseTally; 1386 } 1387 1388 Thread::muxRelease(&gListLock); 1389 TEVENT(omFlush); 1390 } 1391 1392 // Fast path code shared by multiple functions 1393 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1394 markOop mark = obj->mark(); 1395 if (mark->has_monitor()) { 1396 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1397 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1398 return mark->monitor(); 1399 } 1400 return ObjectSynchronizer::inflate(Thread::current(), 1401 obj, 1402 inflate_cause_vm_internal); 1403 } 1404 1405 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1406 oop object, 1407 const InflateCause cause) { 1408 1409 // Inflate mutates the heap ... 1410 // Relaxing assertion for bug 6320749. 1411 assert(Universe::verify_in_progress() || 1412 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1413 1414 if (EnableValhalla) { 1415 guarantee(!object->klass_is_value_type(), "Attempt to inflate value type"); 1416 } 1417 1418 EventJavaMonitorInflate event; 1419 1420 for (;;) { 1421 const markOop mark = object->mark(); 1422 assert(!mark->has_bias_pattern(), "invariant"); 1423 1424 // The mark can be in one of the following states: 1425 // * Inflated - just return 1426 // * Stack-locked - coerce it to inflated 1427 // * INFLATING - busy wait for conversion to complete 1428 // * Neutral - aggressively inflate the object. 1429 // * BIASED - Illegal. We should never see this 1430 1431 // CASE: inflated 1432 if (mark->has_monitor()) { 1433 ObjectMonitor * inf = mark->monitor(); 1434 assert(inf->header()->is_neutral(), "invariant"); 1435 assert(inf->object() == object, "invariant"); 1436 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1437 return inf; 1438 } 1439 1440 // CASE: inflation in progress - inflating over a stack-lock. 1441 // Some other thread is converting from stack-locked to inflated. 1442 // Only that thread can complete inflation -- other threads must wait. 1443 // The INFLATING value is transient. 1444 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1445 // We could always eliminate polling by parking the thread on some auxiliary list. 1446 if (mark == markOopDesc::INFLATING()) { 1447 TEVENT(Inflate: spin while INFLATING); 1448 ReadStableMark(object); 1449 continue; 1450 } 1451 1452 // CASE: stack-locked 1453 // Could be stack-locked either by this thread or by some other thread. 1454 // 1455 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1456 // to install INFLATING into the mark word. We originally installed INFLATING, 1457 // allocated the objectmonitor, and then finally STed the address of the 1458 // objectmonitor into the mark. This was correct, but artificially lengthened 1459 // the interval in which INFLATED appeared in the mark, thus increasing 1460 // the odds of inflation contention. 1461 // 1462 // We now use per-thread private objectmonitor free lists. 1463 // These list are reprovisioned from the global free list outside the 1464 // critical INFLATING...ST interval. A thread can transfer 1465 // multiple objectmonitors en-mass from the global free list to its local free list. 1466 // This reduces coherency traffic and lock contention on the global free list. 1467 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1468 // before or after the CAS(INFLATING) operation. 1469 // See the comments in omAlloc(). 1470 1471 if (mark->has_locker()) { 1472 ObjectMonitor * m = omAlloc(Self); 1473 // Optimistically prepare the objectmonitor - anticipate successful CAS 1474 // We do this before the CAS in order to minimize the length of time 1475 // in which INFLATING appears in the mark. 1476 m->Recycle(); 1477 m->_Responsible = NULL; 1478 m->_recursions = 0; 1479 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1480 1481 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark); 1482 if (cmp != mark) { 1483 omRelease(Self, m, true); 1484 continue; // Interference -- just retry 1485 } 1486 1487 // We've successfully installed INFLATING (0) into the mark-word. 1488 // This is the only case where 0 will appear in a mark-word. 1489 // Only the singular thread that successfully swings the mark-word 1490 // to 0 can perform (or more precisely, complete) inflation. 1491 // 1492 // Why do we CAS a 0 into the mark-word instead of just CASing the 1493 // mark-word from the stack-locked value directly to the new inflated state? 1494 // Consider what happens when a thread unlocks a stack-locked object. 1495 // It attempts to use CAS to swing the displaced header value from the 1496 // on-stack basiclock back into the object header. Recall also that the 1497 // header value (hashcode, etc) can reside in (a) the object header, or 1498 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1499 // header in an objectMonitor. The inflate() routine must copy the header 1500 // value from the basiclock on the owner's stack to the objectMonitor, all 1501 // the while preserving the hashCode stability invariants. If the owner 1502 // decides to release the lock while the value is 0, the unlock will fail 1503 // and control will eventually pass from slow_exit() to inflate. The owner 1504 // will then spin, waiting for the 0 value to disappear. Put another way, 1505 // the 0 causes the owner to stall if the owner happens to try to 1506 // drop the lock (restoring the header from the basiclock to the object) 1507 // while inflation is in-progress. This protocol avoids races that might 1508 // would otherwise permit hashCode values to change or "flicker" for an object. 1509 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1510 // 0 serves as a "BUSY" inflate-in-progress indicator. 1511 1512 1513 // fetch the displaced mark from the owner's stack. 1514 // The owner can't die or unwind past the lock while our INFLATING 1515 // object is in the mark. Furthermore the owner can't complete 1516 // an unlock on the object, either. 1517 markOop dmw = mark->displaced_mark_helper(); 1518 assert(dmw->is_neutral(), "invariant"); 1519 1520 // Setup monitor fields to proper values -- prepare the monitor 1521 m->set_header(dmw); 1522 1523 // Optimization: if the mark->locker stack address is associated 1524 // with this thread we could simply set m->_owner = Self. 1525 // Note that a thread can inflate an object 1526 // that it has stack-locked -- as might happen in wait() -- directly 1527 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1528 m->set_owner(mark->locker()); 1529 m->set_object(object); 1530 // TODO-FIXME: assert BasicLock->dhw != 0. 1531 1532 // Must preserve store ordering. The monitor state must 1533 // be stable at the time of publishing the monitor address. 1534 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1535 object->release_set_mark(markOopDesc::encode(m)); 1536 1537 // Hopefully the performance counters are allocated on distinct cache lines 1538 // to avoid false sharing on MP systems ... 1539 OM_PERFDATA_OP(Inflations, inc()); 1540 TEVENT(Inflate: overwrite stacklock); 1541 if (log_is_enabled(Debug, monitorinflation)) { 1542 if (object->is_instance()) { 1543 ResourceMark rm; 1544 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1545 p2i(object), p2i(object->mark()), 1546 object->klass()->external_name()); 1547 } 1548 } 1549 if (event.should_commit()) { 1550 post_monitor_inflate_event(event, object, cause); 1551 } 1552 return m; 1553 } 1554 1555 // CASE: neutral 1556 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1557 // If we know we're inflating for entry it's better to inflate by swinging a 1558 // pre-locked objectMonitor pointer into the object header. A successful 1559 // CAS inflates the object *and* confers ownership to the inflating thread. 1560 // In the current implementation we use a 2-step mechanism where we CAS() 1561 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1562 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1563 // would be useful. 1564 1565 assert(mark->is_neutral(), "invariant"); 1566 ObjectMonitor * m = omAlloc(Self); 1567 // prepare m for installation - set monitor to initial state 1568 m->Recycle(); 1569 m->set_header(mark); 1570 m->set_owner(NULL); 1571 m->set_object(object); 1572 m->_recursions = 0; 1573 m->_Responsible = NULL; 1574 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1575 1576 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { 1577 m->set_object(NULL); 1578 m->set_owner(NULL); 1579 m->Recycle(); 1580 omRelease(Self, m, true); 1581 m = NULL; 1582 continue; 1583 // interference - the markword changed - just retry. 1584 // The state-transitions are one-way, so there's no chance of 1585 // live-lock -- "Inflated" is an absorbing state. 1586 } 1587 1588 // Hopefully the performance counters are allocated on distinct 1589 // cache lines to avoid false sharing on MP systems ... 1590 OM_PERFDATA_OP(Inflations, inc()); 1591 TEVENT(Inflate: overwrite neutral); 1592 if (log_is_enabled(Debug, monitorinflation)) { 1593 if (object->is_instance()) { 1594 ResourceMark rm; 1595 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1596 p2i(object), p2i(object->mark()), 1597 object->klass()->external_name()); 1598 } 1599 } 1600 if (event.should_commit()) { 1601 post_monitor_inflate_event(event, object, cause); 1602 } 1603 return m; 1604 } 1605 } 1606 1607 1608 // Deflate_idle_monitors() is called at all safepoints, immediately 1609 // after all mutators are stopped, but before any objects have moved. 1610 // It traverses the list of known monitors, deflating where possible. 1611 // The scavenged monitor are returned to the monitor free list. 1612 // 1613 // Beware that we scavenge at *every* stop-the-world point. 1614 // Having a large number of monitors in-circulation negatively 1615 // impacts the performance of some applications (e.g., PointBase). 1616 // Broadly, we want to minimize the # of monitors in circulation. 1617 // 1618 // We have added a flag, MonitorInUseLists, which creates a list 1619 // of active monitors for each thread. deflate_idle_monitors() 1620 // only scans the per-thread in-use lists. omAlloc() puts all 1621 // assigned monitors on the per-thread list. deflate_idle_monitors() 1622 // returns the non-busy monitors to the global free list. 1623 // When a thread dies, omFlush() adds the list of active monitors for 1624 // that thread to a global gOmInUseList acquiring the 1625 // global list lock. deflate_idle_monitors() acquires the global 1626 // list lock to scan for non-busy monitors to the global free list. 1627 // An alternative could have used a single global in-use list. The 1628 // downside would have been the additional cost of acquiring the global list lock 1629 // for every omAlloc(). 1630 // 1631 // Perversely, the heap size -- and thus the STW safepoint rate -- 1632 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1633 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1634 // This is an unfortunate aspect of this design. 1635 1636 enum ManifestConstants { 1637 ClearResponsibleAtSTW = 0 1638 }; 1639 1640 // Deflate a single monitor if not in-use 1641 // Return true if deflated, false if in-use 1642 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1643 ObjectMonitor** freeHeadp, 1644 ObjectMonitor** freeTailp) { 1645 bool deflated; 1646 // Normal case ... The monitor is associated with obj. 1647 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1648 guarantee(mid == obj->mark()->monitor(), "invariant"); 1649 guarantee(mid->header()->is_neutral(), "invariant"); 1650 1651 if (mid->is_busy()) { 1652 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1653 deflated = false; 1654 } else { 1655 // Deflate the monitor if it is no longer being used 1656 // It's idle - scavenge and return to the global free list 1657 // plain old deflation ... 1658 TEVENT(deflate_idle_monitors - scavenge1); 1659 if (log_is_enabled(Debug, monitorinflation)) { 1660 if (obj->is_instance()) { 1661 ResourceMark rm; 1662 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1663 "mark " INTPTR_FORMAT " , type %s", 1664 p2i(obj), p2i(obj->mark()), 1665 obj->klass()->external_name()); 1666 } 1667 } 1668 1669 // Restore the header back to obj 1670 obj->release_set_mark(mid->header()); 1671 mid->clear(); 1672 1673 assert(mid->object() == NULL, "invariant"); 1674 1675 // Move the object to the working free list defined by freeHeadp, freeTailp 1676 if (*freeHeadp == NULL) *freeHeadp = mid; 1677 if (*freeTailp != NULL) { 1678 ObjectMonitor * prevtail = *freeTailp; 1679 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1680 prevtail->FreeNext = mid; 1681 } 1682 *freeTailp = mid; 1683 deflated = true; 1684 } 1685 return deflated; 1686 } 1687 1688 // Walk a given monitor list, and deflate idle monitors 1689 // The given list could be a per-thread list or a global list 1690 // Caller acquires gListLock. 1691 // 1692 // In the case of parallel processing of thread local monitor lists, 1693 // work is done by Threads::parallel_threads_do() which ensures that 1694 // each Java thread is processed by exactly one worker thread, and 1695 // thus avoid conflicts that would arise when worker threads would 1696 // process the same monitor lists concurrently. 1697 // 1698 // See also ParallelSPCleanupTask and 1699 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 1700 // Threads::parallel_java_threads_do() in thread.cpp. 1701 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1702 ObjectMonitor** freeHeadp, 1703 ObjectMonitor** freeTailp) { 1704 ObjectMonitor* mid; 1705 ObjectMonitor* next; 1706 ObjectMonitor* cur_mid_in_use = NULL; 1707 int deflated_count = 0; 1708 1709 for (mid = *listHeadp; mid != NULL;) { 1710 oop obj = (oop) mid->object(); 1711 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1712 // if deflate_monitor succeeded, 1713 // extract from per-thread in-use list 1714 if (mid == *listHeadp) { 1715 *listHeadp = mid->FreeNext; 1716 } else if (cur_mid_in_use != NULL) { 1717 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1718 } 1719 next = mid->FreeNext; 1720 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1721 mid = next; 1722 deflated_count++; 1723 } else { 1724 cur_mid_in_use = mid; 1725 mid = mid->FreeNext; 1726 } 1727 } 1728 return deflated_count; 1729 } 1730 1731 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1732 counters->nInuse = 0; // currently associated with objects 1733 counters->nInCirculation = 0; // extant 1734 counters->nScavenged = 0; // reclaimed 1735 } 1736 1737 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 1738 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1739 bool deflated = false; 1740 1741 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1742 ObjectMonitor * freeTailp = NULL; 1743 1744 TEVENT(deflate_idle_monitors); 1745 // Prevent omFlush from changing mids in Thread dtor's during deflation 1746 // And in case the vm thread is acquiring a lock during a safepoint 1747 // See e.g. 6320749 1748 Thread::muxAcquire(&gListLock, "scavenge - return"); 1749 1750 if (MonitorInUseLists) { 1751 // Note: the thread-local monitors lists get deflated in 1752 // a separate pass. See deflate_thread_local_monitors(). 1753 1754 // For moribund threads, scan gOmInUseList 1755 if (gOmInUseList) { 1756 counters->nInCirculation += gOmInUseCount; 1757 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1758 gOmInUseCount -= deflated_count; 1759 counters->nScavenged += deflated_count; 1760 counters->nInuse += gOmInUseCount; 1761 } 1762 1763 } else { 1764 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 1765 for (; block != NULL; block = next(block)) { 1766 // Iterate over all extant monitors - Scavenge all idle monitors. 1767 assert(block->object() == CHAINMARKER, "must be a block header"); 1768 counters->nInCirculation += _BLOCKSIZE; 1769 for (int i = 1; i < _BLOCKSIZE; i++) { 1770 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1771 oop obj = (oop)mid->object(); 1772 1773 if (obj == NULL) { 1774 // The monitor is not associated with an object. 1775 // The monitor should either be a thread-specific private 1776 // free list or the global free list. 1777 // obj == NULL IMPLIES mid->is_busy() == 0 1778 guarantee(!mid->is_busy(), "invariant"); 1779 continue; 1780 } 1781 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1782 1783 if (deflated) { 1784 mid->FreeNext = NULL; 1785 counters->nScavenged++; 1786 } else { 1787 counters->nInuse++; 1788 } 1789 } 1790 } 1791 } 1792 1793 // Move the scavenged monitors back to the global free list. 1794 if (freeHeadp != NULL) { 1795 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant"); 1796 assert(freeTailp->FreeNext == NULL, "invariant"); 1797 // constant-time list splice - prepend scavenged segment to gFreeList 1798 freeTailp->FreeNext = gFreeList; 1799 gFreeList = freeHeadp; 1800 } 1801 Thread::muxRelease(&gListLock); 1802 1803 } 1804 1805 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1806 gMonitorFreeCount += counters->nScavenged; 1807 1808 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1809 1810 if (ObjectMonitor::Knob_Verbose) { 1811 tty->print_cr("INFO: Deflate: InCirc=%d InUse=%d Scavenged=%d " 1812 "ForceMonitorScavenge=%d : pop=%d free=%d", 1813 counters->nInCirculation, counters->nInuse, counters->nScavenged, ForceMonitorScavenge, 1814 gMonitorPopulation, gMonitorFreeCount); 1815 tty->flush(); 1816 } 1817 1818 ForceMonitorScavenge = 0; // Reset 1819 1820 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); 1821 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); 1822 1823 // TODO: Add objectMonitor leak detection. 1824 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1825 GVars.stwRandom = os::random(); 1826 GVars.stwCycle++; 1827 } 1828 1829 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 1830 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1831 if (!MonitorInUseLists) return; 1832 1833 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1834 ObjectMonitor * freeTailp = NULL; 1835 1836 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp); 1837 1838 Thread::muxAcquire(&gListLock, "scavenge - return"); 1839 1840 // Adjust counters 1841 counters->nInCirculation += thread->omInUseCount; 1842 thread->omInUseCount -= deflated_count; 1843 if (ObjectMonitor::Knob_VerifyInUse) { 1844 verifyInUse(thread); 1845 } 1846 counters->nScavenged += deflated_count; 1847 counters->nInuse += thread->omInUseCount; 1848 1849 // Move the scavenged monitors back to the global free list. 1850 if (freeHeadp != NULL) { 1851 guarantee(freeTailp != NULL && deflated_count > 0, "invariant"); 1852 assert(freeTailp->FreeNext == NULL, "invariant"); 1853 1854 // constant-time list splice - prepend scavenged segment to gFreeList 1855 freeTailp->FreeNext = gFreeList; 1856 gFreeList = freeHeadp; 1857 } 1858 Thread::muxRelease(&gListLock); 1859 } 1860 1861 // Monitor cleanup on JavaThread::exit 1862 1863 // Iterate through monitor cache and attempt to release thread's monitors 1864 // Gives up on a particular monitor if an exception occurs, but continues 1865 // the overall iteration, swallowing the exception. 1866 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1867 private: 1868 TRAPS; 1869 1870 public: 1871 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1872 void do_monitor(ObjectMonitor* mid) { 1873 if (mid->owner() == THREAD) { 1874 if (ObjectMonitor::Knob_VerifyMatch != 0) { 1875 ResourceMark rm; 1876 Handle obj(THREAD, (oop) mid->object()); 1877 tty->print("INFO: unexpected locked object:"); 1878 javaVFrame::print_locked_object_class_name(tty, obj, "locked"); 1879 fatal("exiting JavaThread=" INTPTR_FORMAT 1880 " unexpectedly owns ObjectMonitor=" INTPTR_FORMAT, 1881 p2i(THREAD), p2i(mid)); 1882 } 1883 (void)mid->complete_exit(CHECK); 1884 } 1885 } 1886 }; 1887 1888 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1889 // ignored. This is meant to be called during JNI thread detach which assumes 1890 // all remaining monitors are heavyweight. All exceptions are swallowed. 1891 // Scanning the extant monitor list can be time consuming. 1892 // A simple optimization is to add a per-thread flag that indicates a thread 1893 // called jni_monitorenter() during its lifetime. 1894 // 1895 // Instead of No_Savepoint_Verifier it might be cheaper to 1896 // use an idiom of the form: 1897 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1898 // <code that must not run at safepoint> 1899 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1900 // Since the tests are extremely cheap we could leave them enabled 1901 // for normal product builds. 1902 1903 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1904 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1905 NoSafepointVerifier nsv; 1906 ReleaseJavaMonitorsClosure rjmc(THREAD); 1907 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1908 ObjectSynchronizer::monitors_iterate(&rjmc); 1909 Thread::muxRelease(&gListLock); 1910 THREAD->clear_pending_exception(); 1911 } 1912 1913 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1914 switch (cause) { 1915 case inflate_cause_vm_internal: return "VM Internal"; 1916 case inflate_cause_monitor_enter: return "Monitor Enter"; 1917 case inflate_cause_wait: return "Monitor Wait"; 1918 case inflate_cause_notify: return "Monitor Notify"; 1919 case inflate_cause_hash_code: return "Monitor Hash Code"; 1920 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1921 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1922 default: 1923 ShouldNotReachHere(); 1924 } 1925 return "Unknown"; 1926 } 1927 1928 static void post_monitor_inflate_event(EventJavaMonitorInflate& event, 1929 const oop obj, 1930 const ObjectSynchronizer::InflateCause cause) { 1931 #if INCLUDE_TRACE 1932 assert(event.should_commit(), "check outside"); 1933 event.set_monitorClass(obj->klass()); 1934 event.set_address((TYPE_ADDRESS)(uintptr_t)(void*)obj); 1935 event.set_cause((u1)cause); 1936 event.commit(); 1937 #endif 1938 } 1939 1940 //------------------------------------------------------------------------------ 1941 // Debugging code 1942 1943 void ObjectSynchronizer::sanity_checks(const bool verbose, 1944 const uint cache_line_size, 1945 int *error_cnt_ptr, 1946 int *warning_cnt_ptr) { 1947 u_char *addr_begin = (u_char*)&GVars; 1948 u_char *addr_stwRandom = (u_char*)&GVars.stwRandom; 1949 u_char *addr_hcSequence = (u_char*)&GVars.hcSequence; 1950 1951 if (verbose) { 1952 tty->print_cr("INFO: sizeof(SharedGlobals)=" SIZE_FORMAT, 1953 sizeof(SharedGlobals)); 1954 } 1955 1956 uint offset_stwRandom = (uint)(addr_stwRandom - addr_begin); 1957 if (verbose) tty->print_cr("INFO: offset(stwRandom)=%u", offset_stwRandom); 1958 1959 uint offset_hcSequence = (uint)(addr_hcSequence - addr_begin); 1960 if (verbose) { 1961 tty->print_cr("INFO: offset(_hcSequence)=%u", offset_hcSequence); 1962 } 1963 1964 if (cache_line_size != 0) { 1965 // We were able to determine the L1 data cache line size so 1966 // do some cache line specific sanity checks 1967 1968 if (offset_stwRandom < cache_line_size) { 1969 tty->print_cr("WARNING: the SharedGlobals.stwRandom field is closer " 1970 "to the struct beginning than a cache line which permits " 1971 "false sharing."); 1972 (*warning_cnt_ptr)++; 1973 } 1974 1975 if ((offset_hcSequence - offset_stwRandom) < cache_line_size) { 1976 tty->print_cr("WARNING: the SharedGlobals.stwRandom and " 1977 "SharedGlobals.hcSequence fields are closer than a cache " 1978 "line which permits false sharing."); 1979 (*warning_cnt_ptr)++; 1980 } 1981 1982 if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) { 1983 tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer " 1984 "to the struct end than a cache line which permits false " 1985 "sharing."); 1986 (*warning_cnt_ptr)++; 1987 } 1988 } 1989 } 1990 1991 #ifndef PRODUCT 1992 1993 // Check if monitor belongs to the monitor cache 1994 // The list is grow-only so it's *relatively* safe to traverse 1995 // the list of extant blocks without taking a lock. 1996 1997 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1998 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 1999 while (block != NULL) { 2000 assert(block->object() == CHAINMARKER, "must be a block header"); 2001 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 2002 address mon = (address)monitor; 2003 address blk = (address)block; 2004 size_t diff = mon - blk; 2005 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 2006 return 1; 2007 } 2008 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 2009 } 2010 return 0; 2011 } 2012 2013 #endif