1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "jfr/jfrEvents.hpp" 29 #include "memory/allocation.inline.hpp" 30 #include "memory/metaspaceShared.hpp" 31 #include "memory/padded.hpp" 32 #include "memory/resourceArea.hpp" 33 #include "oops/markOop.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/atomic.hpp" 36 #include "runtime/biasedLocking.hpp" 37 #include "runtime/handles.inline.hpp" 38 #include "runtime/interfaceSupport.inline.hpp" 39 #include "runtime/mutexLocker.hpp" 40 #include "runtime/objectMonitor.hpp" 41 #include "runtime/objectMonitor.inline.hpp" 42 #include "runtime/osThread.hpp" 43 #include "runtime/safepointVerifiers.hpp" 44 #include "runtime/sharedRuntime.hpp" 45 #include "runtime/stubRoutines.hpp" 46 #include "runtime/synchronizer.hpp" 47 #include "runtime/thread.inline.hpp" 48 #include "runtime/vframe.hpp" 49 #include "runtime/vmThread.hpp" 50 #include "utilities/align.hpp" 51 #include "utilities/dtrace.hpp" 52 #include "utilities/events.hpp" 53 #include "utilities/preserveException.hpp" 54 55 // The "core" versions of monitor enter and exit reside in this file. 56 // The interpreter and compilers contain specialized transliterated 57 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 58 // for instance. If you make changes here, make sure to modify the 59 // interpreter, and both C1 and C2 fast-path inline locking code emission. 60 // 61 // ----------------------------------------------------------------------------- 62 63 #ifdef DTRACE_ENABLED 64 65 // Only bother with this argument setup if dtrace is available 66 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 67 68 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 69 char* bytes = NULL; \ 70 int len = 0; \ 71 jlong jtid = SharedRuntime::get_java_tid(thread); \ 72 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 73 if (klassname != NULL) { \ 74 bytes = (char*)klassname->bytes(); \ 75 len = klassname->utf8_length(); \ 76 } 77 78 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 79 { \ 80 if (DTraceMonitorProbes) { \ 81 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 82 HOTSPOT_MONITOR_WAIT(jtid, \ 83 (uintptr_t)(monitor), bytes, len, (millis)); \ 84 } \ 85 } 86 87 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 88 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 89 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 90 91 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 92 { \ 93 if (DTraceMonitorProbes) { \ 94 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 95 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 96 (uintptr_t)(monitor), bytes, len); \ 97 } \ 98 } 99 100 #else // ndef DTRACE_ENABLED 101 102 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 103 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 104 105 #endif // ndef DTRACE_ENABLED 106 107 // This exists only as a workaround of dtrace bug 6254741 108 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 109 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 110 return 0; 111 } 112 113 #define NINFLATIONLOCKS 256 114 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 115 116 // global list of blocks of monitors 117 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL; 118 // global monitor free list 119 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 120 // global monitor in-use list, for moribund threads, 121 // monitors they inflated need to be scanned for deflation 122 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 123 // count of entries in gOmInUseList 124 int ObjectSynchronizer::gOmInUseCount = 0; 125 126 static volatile intptr_t gListLock = 0; // protects global monitor lists 127 static volatile int gMonitorFreeCount = 0; // # on gFreeList 128 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 129 130 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 131 132 133 // =====================> Quick functions 134 135 // The quick_* forms are special fast-path variants used to improve 136 // performance. In the simplest case, a "quick_*" implementation could 137 // simply return false, in which case the caller will perform the necessary 138 // state transitions and call the slow-path form. 139 // The fast-path is designed to handle frequently arising cases in an efficient 140 // manner and is just a degenerate "optimistic" variant of the slow-path. 141 // returns true -- to indicate the call was satisfied. 142 // returns false -- to indicate the call needs the services of the slow-path. 143 // A no-loitering ordinance is in effect for code in the quick_* family 144 // operators: safepoints or indefinite blocking (blocking that might span a 145 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 146 // entry. 147 // 148 // Consider: An interesting optimization is to have the JIT recognize the 149 // following common idiom: 150 // synchronized (someobj) { .... ; notify(); } 151 // That is, we find a notify() or notifyAll() call that immediately precedes 152 // the monitorexit operation. In that case the JIT could fuse the operations 153 // into a single notifyAndExit() runtime primitive. 154 155 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 156 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 157 assert(self->is_Java_thread(), "invariant"); 158 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 159 NoSafepointVerifier nsv; 160 if (obj == NULL) return false; // slow-path for invalid obj 161 const markOop mark = obj->mark(); 162 163 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 164 // Degenerate notify 165 // stack-locked by caller so by definition the implied waitset is empty. 166 return true; 167 } 168 169 if (mark->has_monitor()) { 170 ObjectMonitor * const mon = mark->monitor(); 171 assert(oopDesc::equals((oop) mon->object(), obj), "invariant"); 172 if (mon->owner() != self) return false; // slow-path for IMS exception 173 174 if (mon->first_waiter() != NULL) { 175 // We have one or more waiters. Since this is an inflated monitor 176 // that we own, we can transfer one or more threads from the waitset 177 // to the entrylist here and now, avoiding the slow-path. 178 if (all) { 179 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 180 } else { 181 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 182 } 183 int tally = 0; 184 do { 185 mon->INotify(self); 186 ++tally; 187 } while (mon->first_waiter() != NULL && all); 188 OM_PERFDATA_OP(Notifications, inc(tally)); 189 } 190 return true; 191 } 192 193 // biased locking and any other IMS exception states take the slow-path 194 return false; 195 } 196 197 198 // The LockNode emitted directly at the synchronization site would have 199 // been too big if it were to have included support for the cases of inflated 200 // recursive enter and exit, so they go here instead. 201 // Note that we can't safely call AsyncPrintJavaStack() from within 202 // quick_enter() as our thread state remains _in_Java. 203 204 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 205 BasicLock * lock) { 206 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 207 assert(Self->is_Java_thread(), "invariant"); 208 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 209 NoSafepointVerifier nsv; 210 if (obj == NULL) return false; // Need to throw NPE 211 const markOop mark = obj->mark(); 212 213 if (mark->has_monitor()) { 214 ObjectMonitor * const m = mark->monitor(); 215 assert(oopDesc::equals((oop) m->object(), obj), "invariant"); 216 Thread * const owner = (Thread *) m->_owner; 217 218 // Lock contention and Transactional Lock Elision (TLE) diagnostics 219 // and observability 220 // Case: light contention possibly amenable to TLE 221 // Case: TLE inimical operations such as nested/recursive synchronization 222 223 if (owner == Self) { 224 m->_recursions++; 225 return true; 226 } 227 228 // This Java Monitor is inflated so obj's header will never be 229 // displaced to this thread's BasicLock. Make the displaced header 230 // non-NULL so this BasicLock is not seen as recursive nor as 231 // being locked. We do this unconditionally so that this thread's 232 // BasicLock cannot be mis-interpreted by any stack walkers. For 233 // performance reasons, stack walkers generally first check for 234 // Biased Locking in the object's header, the second check is for 235 // stack-locking in the object's header, the third check is for 236 // recursive stack-locking in the displaced header in the BasicLock, 237 // and last are the inflated Java Monitor (ObjectMonitor) checks. 238 lock->set_displaced_header(markOopDesc::unused_mark()); 239 240 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { 241 assert(m->_recursions == 0, "invariant"); 242 assert(m->_owner == Self, "invariant"); 243 return true; 244 } 245 } 246 247 // Note that we could inflate in quick_enter. 248 // This is likely a useful optimization 249 // Critically, in quick_enter() we must not: 250 // -- perform bias revocation, or 251 // -- block indefinitely, or 252 // -- reach a safepoint 253 254 return false; // revert to slow-path 255 } 256 257 // ----------------------------------------------------------------------------- 258 // Fast Monitor Enter/Exit 259 // This the fast monitor enter. The interpreter and compiler use 260 // some assembly copies of this code. Make sure update those code 261 // if the following function is changed. The implementation is 262 // extremely sensitive to race condition. Be careful. 263 264 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 265 bool attempt_rebias, TRAPS) { 266 if (UseBiasedLocking) { 267 if (!SafepointSynchronize::is_at_safepoint()) { 268 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 269 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 270 return; 271 } 272 } else { 273 assert(!attempt_rebias, "can not rebias toward VM thread"); 274 BiasedLocking::revoke_at_safepoint(obj); 275 } 276 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 277 } 278 279 slow_enter(obj, lock, THREAD); 280 } 281 282 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 283 markOop mark = object->mark(); 284 // We cannot check for Biased Locking if we are racing an inflation. 285 assert(mark == markOopDesc::INFLATING() || 286 !mark->has_bias_pattern(), "should not see bias pattern here"); 287 288 markOop dhw = lock->displaced_header(); 289 if (dhw == NULL) { 290 // If the displaced header is NULL, then this exit matches up with 291 // a recursive enter. No real work to do here except for diagnostics. 292 #ifndef PRODUCT 293 if (mark != markOopDesc::INFLATING()) { 294 // Only do diagnostics if we are not racing an inflation. Simply 295 // exiting a recursive enter of a Java Monitor that is being 296 // inflated is safe; see the has_monitor() comment below. 297 assert(!mark->is_neutral(), "invariant"); 298 assert(!mark->has_locker() || 299 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 300 if (mark->has_monitor()) { 301 // The BasicLock's displaced_header is marked as a recursive 302 // enter and we have an inflated Java Monitor (ObjectMonitor). 303 // This is a special case where the Java Monitor was inflated 304 // after this thread entered the stack-lock recursively. When a 305 // Java Monitor is inflated, we cannot safely walk the Java 306 // Monitor owner's stack and update the BasicLocks because a 307 // Java Monitor can be asynchronously inflated by a thread that 308 // does not own the Java Monitor. 309 ObjectMonitor * m = mark->monitor(); 310 assert(((oop)(m->object()))->mark() == mark, "invariant"); 311 assert(m->is_entered(THREAD), "invariant"); 312 } 313 } 314 #endif 315 return; 316 } 317 318 if (mark == (markOop) lock) { 319 // If the object is stack-locked by the current thread, try to 320 // swing the displaced header from the BasicLock back to the mark. 321 assert(dhw->is_neutral(), "invariant"); 322 if (object->cas_set_mark(dhw, mark) == mark) { 323 return; 324 } 325 } 326 327 // We have to take the slow-path of possible inflation and then exit. 328 ObjectSynchronizer::inflate(THREAD, 329 object, 330 inflate_cause_vm_internal)->exit(true, THREAD); 331 } 332 333 // ----------------------------------------------------------------------------- 334 // Interpreter/Compiler Slow Case 335 // This routine is used to handle interpreter/compiler slow case 336 // We don't need to use fast path here, because it must have been 337 // failed in the interpreter/compiler code. 338 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 339 markOop mark = obj->mark(); 340 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 341 342 if (mark->is_neutral()) { 343 // Anticipate successful CAS -- the ST of the displaced mark must 344 // be visible <= the ST performed by the CAS. 345 lock->set_displaced_header(mark); 346 if (mark == obj()->cas_set_mark((markOop) lock, mark)) { 347 return; 348 } 349 // Fall through to inflate() ... 350 } else if (mark->has_locker() && 351 THREAD->is_lock_owned((address)mark->locker())) { 352 assert(lock != mark->locker(), "must not re-lock the same lock"); 353 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 354 lock->set_displaced_header(NULL); 355 return; 356 } 357 358 // The object header will never be displaced to this lock, 359 // so it does not matter what the value is, except that it 360 // must be non-zero to avoid looking like a re-entrant lock, 361 // and must not look locked either. 362 lock->set_displaced_header(markOopDesc::unused_mark()); 363 ObjectSynchronizer::inflate(THREAD, 364 obj(), 365 inflate_cause_monitor_enter)->enter(THREAD); 366 } 367 368 // This routine is used to handle interpreter/compiler slow case 369 // We don't need to use fast path here, because it must have 370 // failed in the interpreter/compiler code. Simply use the heavy 371 // weight monitor should be ok, unless someone find otherwise. 372 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 373 fast_exit(object, lock, THREAD); 374 } 375 376 // ----------------------------------------------------------------------------- 377 // Class Loader support to workaround deadlocks on the class loader lock objects 378 // Also used by GC 379 // complete_exit()/reenter() are used to wait on a nested lock 380 // i.e. to give up an outer lock completely and then re-enter 381 // Used when holding nested locks - lock acquisition order: lock1 then lock2 382 // 1) complete_exit lock1 - saving recursion count 383 // 2) wait on lock2 384 // 3) when notified on lock2, unlock lock2 385 // 4) reenter lock1 with original recursion count 386 // 5) lock lock2 387 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 388 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 389 if (UseBiasedLocking) { 390 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 391 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 392 } 393 394 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 395 obj(), 396 inflate_cause_vm_internal); 397 398 return monitor->complete_exit(THREAD); 399 } 400 401 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 402 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 403 if (UseBiasedLocking) { 404 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 405 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 406 } 407 408 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 409 obj(), 410 inflate_cause_vm_internal); 411 412 monitor->reenter(recursion, THREAD); 413 } 414 // ----------------------------------------------------------------------------- 415 // JNI locks on java objects 416 // NOTE: must use heavy weight monitor to handle jni monitor enter 417 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 418 // the current locking is from JNI instead of Java code 419 if (UseBiasedLocking) { 420 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 421 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 422 } 423 THREAD->set_current_pending_monitor_is_from_java(false); 424 ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 425 THREAD->set_current_pending_monitor_is_from_java(true); 426 } 427 428 // NOTE: must use heavy weight monitor to handle jni monitor exit 429 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 430 if (UseBiasedLocking) { 431 Handle h_obj(THREAD, obj); 432 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 433 obj = h_obj(); 434 } 435 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 436 437 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 438 obj, 439 inflate_cause_jni_exit); 440 // If this thread has locked the object, exit the monitor. Note: can't use 441 // monitor->check(CHECK); must exit even if an exception is pending. 442 if (monitor->check(THREAD)) { 443 monitor->exit(true, THREAD); 444 } 445 } 446 447 // ----------------------------------------------------------------------------- 448 // Internal VM locks on java objects 449 // standard constructor, allows locking failures 450 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 451 _dolock = doLock; 452 _thread = thread; 453 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 454 _obj = obj; 455 456 if (_dolock) { 457 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 458 } 459 } 460 461 ObjectLocker::~ObjectLocker() { 462 if (_dolock) { 463 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 464 } 465 } 466 467 468 // ----------------------------------------------------------------------------- 469 // Wait/Notify/NotifyAll 470 // NOTE: must use heavy weight monitor to handle wait() 471 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 472 if (UseBiasedLocking) { 473 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 474 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 475 } 476 if (millis < 0) { 477 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 478 } 479 ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD, 480 obj(), 481 inflate_cause_wait); 482 483 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 484 monitor->wait(millis, true, THREAD); 485 486 // This dummy call is in place to get around dtrace bug 6254741. Once 487 // that's fixed we can uncomment the following line, remove the call 488 // and change this function back into a "void" func. 489 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 490 return dtrace_waited_probe(monitor, obj, THREAD); 491 } 492 493 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 494 if (UseBiasedLocking) { 495 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 496 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 497 } 498 if (millis < 0) { 499 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 500 } 501 ObjectSynchronizer::inflate(THREAD, 502 obj(), 503 inflate_cause_wait)->wait(millis, false, THREAD); 504 } 505 506 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 507 if (UseBiasedLocking) { 508 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 509 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 510 } 511 512 markOop mark = obj->mark(); 513 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 514 return; 515 } 516 ObjectSynchronizer::inflate(THREAD, 517 obj(), 518 inflate_cause_notify)->notify(THREAD); 519 } 520 521 // NOTE: see comment of notify() 522 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 523 if (UseBiasedLocking) { 524 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 525 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 526 } 527 528 markOop mark = obj->mark(); 529 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 530 return; 531 } 532 ObjectSynchronizer::inflate(THREAD, 533 obj(), 534 inflate_cause_notify)->notifyAll(THREAD); 535 } 536 537 // ----------------------------------------------------------------------------- 538 // Hash Code handling 539 // 540 // Performance concern: 541 // OrderAccess::storestore() calls release() which at one time stored 0 542 // into the global volatile OrderAccess::dummy variable. This store was 543 // unnecessary for correctness. Many threads storing into a common location 544 // causes considerable cache migration or "sloshing" on large SMP systems. 545 // As such, I avoided using OrderAccess::storestore(). In some cases 546 // OrderAccess::fence() -- which incurs local latency on the executing 547 // processor -- is a better choice as it scales on SMP systems. 548 // 549 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 550 // a discussion of coherency costs. Note that all our current reference 551 // platforms provide strong ST-ST order, so the issue is moot on IA32, 552 // x64, and SPARC. 553 // 554 // As a general policy we use "volatile" to control compiler-based reordering 555 // and explicit fences (barriers) to control for architectural reordering 556 // performed by the CPU(s) or platform. 557 558 struct SharedGlobals { 559 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 560 // These are highly shared mostly-read variables. 561 // To avoid false-sharing they need to be the sole occupants of a cache line. 562 volatile int stwRandom; 563 volatile int stwCycle; 564 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 565 // Hot RW variable -- Sequester to avoid false-sharing 566 volatile int hcSequence; 567 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 568 }; 569 570 static SharedGlobals GVars; 571 static int MonitorScavengeThreshold = 1000000; 572 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 573 574 static markOop ReadStableMark(oop obj) { 575 markOop mark = obj->mark(); 576 if (!mark->is_being_inflated()) { 577 return mark; // normal fast-path return 578 } 579 580 int its = 0; 581 for (;;) { 582 markOop mark = obj->mark(); 583 if (!mark->is_being_inflated()) { 584 return mark; // normal fast-path return 585 } 586 587 // The object is being inflated by some other thread. 588 // The caller of ReadStableMark() must wait for inflation to complete. 589 // Avoid live-lock 590 // TODO: consider calling SafepointSynchronize::do_call_back() while 591 // spinning to see if there's a safepoint pending. If so, immediately 592 // yielding or blocking would be appropriate. Avoid spinning while 593 // there is a safepoint pending. 594 // TODO: add inflation contention performance counters. 595 // TODO: restrict the aggregate number of spinners. 596 597 ++its; 598 if (its > 10000 || !os::is_MP()) { 599 if (its & 1) { 600 os::naked_yield(); 601 } else { 602 // Note that the following code attenuates the livelock problem but is not 603 // a complete remedy. A more complete solution would require that the inflating 604 // thread hold the associated inflation lock. The following code simply restricts 605 // the number of spinners to at most one. We'll have N-2 threads blocked 606 // on the inflationlock, 1 thread holding the inflation lock and using 607 // a yield/park strategy, and 1 thread in the midst of inflation. 608 // A more refined approach would be to change the encoding of INFLATING 609 // to allow encapsulation of a native thread pointer. Threads waiting for 610 // inflation to complete would use CAS to push themselves onto a singly linked 611 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 612 // and calling park(). When inflation was complete the thread that accomplished inflation 613 // would detach the list and set the markword to inflated with a single CAS and 614 // then for each thread on the list, set the flag and unpark() the thread. 615 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 616 // wakes at most one thread whereas we need to wake the entire list. 617 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 618 int YieldThenBlock = 0; 619 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 620 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 621 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 622 while (obj->mark() == markOopDesc::INFLATING()) { 623 // Beware: NakedYield() is advisory and has almost no effect on some platforms 624 // so we periodically call Self->_ParkEvent->park(1). 625 // We use a mixed spin/yield/block mechanism. 626 if ((YieldThenBlock++) >= 16) { 627 Thread::current()->_ParkEvent->park(1); 628 } else { 629 os::naked_yield(); 630 } 631 } 632 Thread::muxRelease(gInflationLocks + ix); 633 } 634 } else { 635 SpinPause(); // SMP-polite spinning 636 } 637 } 638 } 639 640 // hashCode() generation : 641 // 642 // Possibilities: 643 // * MD5Digest of {obj,stwRandom} 644 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 645 // * A DES- or AES-style SBox[] mechanism 646 // * One of the Phi-based schemes, such as: 647 // 2654435761 = 2^32 * Phi (golden ratio) 648 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 649 // * A variation of Marsaglia's shift-xor RNG scheme. 650 // * (obj ^ stwRandom) is appealing, but can result 651 // in undesirable regularity in the hashCode values of adjacent objects 652 // (objects allocated back-to-back, in particular). This could potentially 653 // result in hashtable collisions and reduced hashtable efficiency. 654 // There are simple ways to "diffuse" the middle address bits over the 655 // generated hashCode values: 656 657 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 658 intptr_t value = 0; 659 if (hashCode == 0) { 660 // This form uses global Park-Miller RNG. 661 // On MP system we'll have lots of RW access to a global, so the 662 // mechanism induces lots of coherency traffic. 663 value = os::random(); 664 } else if (hashCode == 1) { 665 // This variation has the property of being stable (idempotent) 666 // between STW operations. This can be useful in some of the 1-0 667 // synchronization schemes. 668 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 669 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 670 } else if (hashCode == 2) { 671 value = 1; // for sensitivity testing 672 } else if (hashCode == 3) { 673 value = ++GVars.hcSequence; 674 } else if (hashCode == 4) { 675 value = cast_from_oop<intptr_t>(obj); 676 } else { 677 // Marsaglia's xor-shift scheme with thread-specific state 678 // This is probably the best overall implementation -- we'll 679 // likely make this the default in future releases. 680 unsigned t = Self->_hashStateX; 681 t ^= (t << 11); 682 Self->_hashStateX = Self->_hashStateY; 683 Self->_hashStateY = Self->_hashStateZ; 684 Self->_hashStateZ = Self->_hashStateW; 685 unsigned v = Self->_hashStateW; 686 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 687 Self->_hashStateW = v; 688 value = v; 689 } 690 691 value &= markOopDesc::hash_mask; 692 if (value == 0) value = 0xBAD; 693 assert(value != markOopDesc::no_hash, "invariant"); 694 return value; 695 } 696 697 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 698 if (UseBiasedLocking) { 699 // NOTE: many places throughout the JVM do not expect a safepoint 700 // to be taken here, in particular most operations on perm gen 701 // objects. However, we only ever bias Java instances and all of 702 // the call sites of identity_hash that might revoke biases have 703 // been checked to make sure they can handle a safepoint. The 704 // added check of the bias pattern is to avoid useless calls to 705 // thread-local storage. 706 if (obj->mark()->has_bias_pattern()) { 707 // Handle for oop obj in case of STW safepoint 708 Handle hobj(Self, obj); 709 // Relaxing assertion for bug 6320749. 710 assert(Universe::verify_in_progress() || 711 !SafepointSynchronize::is_at_safepoint(), 712 "biases should not be seen by VM thread here"); 713 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 714 obj = hobj(); 715 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 716 } 717 } 718 719 // hashCode() is a heap mutator ... 720 // Relaxing assertion for bug 6320749. 721 assert(Universe::verify_in_progress() || DumpSharedSpaces || 722 !SafepointSynchronize::is_at_safepoint(), "invariant"); 723 assert(Universe::verify_in_progress() || DumpSharedSpaces || 724 Self->is_Java_thread() , "invariant"); 725 assert(Universe::verify_in_progress() || DumpSharedSpaces || 726 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 727 728 ObjectMonitor* monitor = NULL; 729 markOop temp, test; 730 intptr_t hash; 731 markOop mark = ReadStableMark(obj); 732 733 // object should remain ineligible for biased locking 734 assert(!mark->has_bias_pattern(), "invariant"); 735 736 if (mark->is_neutral()) { 737 hash = mark->hash(); // this is a normal header 738 if (hash) { // if it has hash, just return it 739 return hash; 740 } 741 hash = get_next_hash(Self, obj); // allocate a new hash code 742 temp = mark->copy_set_hash(hash); // merge the hash code into header 743 // use (machine word version) atomic operation to install the hash 744 test = obj->cas_set_mark(temp, mark); 745 if (test == mark) { 746 return hash; 747 } 748 // If atomic operation failed, we must inflate the header 749 // into heavy weight monitor. We could add more code here 750 // for fast path, but it does not worth the complexity. 751 } else if (mark->has_monitor()) { 752 monitor = mark->monitor(); 753 temp = monitor->header(); 754 assert(temp->is_neutral(), "invariant"); 755 hash = temp->hash(); 756 if (hash) { 757 return hash; 758 } 759 // Skip to the following code to reduce code size 760 } else if (Self->is_lock_owned((address)mark->locker())) { 761 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 762 assert(temp->is_neutral(), "invariant"); 763 hash = temp->hash(); // by current thread, check if the displaced 764 if (hash) { // header contains hash code 765 return hash; 766 } 767 // WARNING: 768 // The displaced header is strictly immutable. 769 // It can NOT be changed in ANY cases. So we have 770 // to inflate the header into heavyweight monitor 771 // even the current thread owns the lock. The reason 772 // is the BasicLock (stack slot) will be asynchronously 773 // read by other threads during the inflate() function. 774 // Any change to stack may not propagate to other threads 775 // correctly. 776 } 777 778 // Inflate the monitor to set hash code 779 monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code); 780 // Load displaced header and check it has hash code 781 mark = monitor->header(); 782 assert(mark->is_neutral(), "invariant"); 783 hash = mark->hash(); 784 if (hash == 0) { 785 hash = get_next_hash(Self, obj); 786 temp = mark->copy_set_hash(hash); // merge hash code into header 787 assert(temp->is_neutral(), "invariant"); 788 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); 789 if (test != mark) { 790 // The only update to the header in the monitor (outside GC) 791 // is install the hash code. If someone add new usage of 792 // displaced header, please update this code 793 hash = test->hash(); 794 assert(test->is_neutral(), "invariant"); 795 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 796 } 797 } 798 // We finally get the hash 799 return hash; 800 } 801 802 // Deprecated -- use FastHashCode() instead. 803 804 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 805 return FastHashCode(Thread::current(), obj()); 806 } 807 808 809 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 810 Handle h_obj) { 811 if (UseBiasedLocking) { 812 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 813 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 814 } 815 816 assert(thread == JavaThread::current(), "Can only be called on current thread"); 817 oop obj = h_obj(); 818 819 markOop mark = ReadStableMark(obj); 820 821 // Uncontended case, header points to stack 822 if (mark->has_locker()) { 823 return thread->is_lock_owned((address)mark->locker()); 824 } 825 // Contended case, header points to ObjectMonitor (tagged pointer) 826 if (mark->has_monitor()) { 827 ObjectMonitor* monitor = mark->monitor(); 828 return monitor->is_entered(thread) != 0; 829 } 830 // Unlocked case, header in place 831 assert(mark->is_neutral(), "sanity check"); 832 return false; 833 } 834 835 // Be aware of this method could revoke bias of the lock object. 836 // This method queries the ownership of the lock handle specified by 'h_obj'. 837 // If the current thread owns the lock, it returns owner_self. If no 838 // thread owns the lock, it returns owner_none. Otherwise, it will return 839 // owner_other. 840 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 841 (JavaThread *self, Handle h_obj) { 842 // The caller must beware this method can revoke bias, and 843 // revocation can result in a safepoint. 844 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 845 assert(self->thread_state() != _thread_blocked, "invariant"); 846 847 // Possible mark states: neutral, biased, stack-locked, inflated 848 849 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 850 // CASE: biased 851 BiasedLocking::revoke_and_rebias(h_obj, false, self); 852 assert(!h_obj->mark()->has_bias_pattern(), 853 "biases should be revoked by now"); 854 } 855 856 assert(self == JavaThread::current(), "Can only be called on current thread"); 857 oop obj = h_obj(); 858 markOop mark = ReadStableMark(obj); 859 860 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 861 if (mark->has_locker()) { 862 return self->is_lock_owned((address)mark->locker()) ? 863 owner_self : owner_other; 864 } 865 866 // CASE: inflated. Mark (tagged pointer) points to an objectMonitor. 867 // The Object:ObjectMonitor relationship is stable as long as we're 868 // not at a safepoint. 869 if (mark->has_monitor()) { 870 void * owner = mark->monitor()->_owner; 871 if (owner == NULL) return owner_none; 872 return (owner == self || 873 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 874 } 875 876 // CASE: neutral 877 assert(mark->is_neutral(), "sanity check"); 878 return owner_none; // it's unlocked 879 } 880 881 // FIXME: jvmti should call this 882 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 883 if (UseBiasedLocking) { 884 if (SafepointSynchronize::is_at_safepoint()) { 885 BiasedLocking::revoke_at_safepoint(h_obj); 886 } else { 887 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 888 } 889 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 890 } 891 892 oop obj = h_obj(); 893 address owner = NULL; 894 895 markOop mark = ReadStableMark(obj); 896 897 // Uncontended case, header points to stack 898 if (mark->has_locker()) { 899 owner = (address) mark->locker(); 900 } 901 902 // Contended case, header points to ObjectMonitor (tagged pointer) 903 if (mark->has_monitor()) { 904 ObjectMonitor* monitor = mark->monitor(); 905 assert(monitor != NULL, "monitor should be non-null"); 906 owner = (address) monitor->owner(); 907 } 908 909 if (owner != NULL) { 910 // owning_thread_from_monitor_owner() may also return NULL here 911 return Threads::owning_thread_from_monitor_owner(t_list, owner); 912 } 913 914 // Unlocked case, header in place 915 // Cannot have assertion since this object may have been 916 // locked by another thread when reaching here. 917 // assert(mark->is_neutral(), "sanity check"); 918 919 return NULL; 920 } 921 922 // Visitors ... 923 924 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 925 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 926 while (block != NULL) { 927 assert(block->object() == CHAINMARKER, "must be a block header"); 928 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 929 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 930 oop object = (oop)mid->object(); 931 if (object != NULL) { 932 closure->do_monitor(mid); 933 } 934 } 935 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 936 } 937 } 938 939 // Get the next block in the block list. 940 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) { 941 assert(block->object() == CHAINMARKER, "must be a block header"); 942 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext; 943 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 944 return block; 945 } 946 947 static bool monitors_used_above_threshold() { 948 if (gMonitorPopulation == 0) { 949 return false; 950 } 951 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 952 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; 953 return monitor_usage > MonitorUsedDeflationThreshold; 954 } 955 956 bool ObjectSynchronizer::is_cleanup_needed() { 957 if (MonitorUsedDeflationThreshold > 0) { 958 return monitors_used_above_threshold(); 959 } 960 return false; 961 } 962 963 void ObjectSynchronizer::oops_do(OopClosure* f) { 964 if (MonitorInUseLists) { 965 // When using thread local monitor lists, we only scan the 966 // global used list here (for moribund threads), and 967 // the thread-local monitors in Thread::oops_do(). 968 global_used_oops_do(f); 969 } else { 970 global_oops_do(f); 971 } 972 } 973 974 void ObjectSynchronizer::global_oops_do(OopClosure* f) { 975 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 976 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 977 for (; block != NULL; block = next(block)) { 978 assert(block->object() == CHAINMARKER, "must be a block header"); 979 for (int i = 1; i < _BLOCKSIZE; i++) { 980 ObjectMonitor* mid = (ObjectMonitor *)&block[i]; 981 if (mid->object() != NULL) { 982 f->do_oop((oop*)mid->object_addr()); 983 } 984 } 985 } 986 } 987 988 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 989 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 990 list_oops_do(gOmInUseList, f); 991 } 992 993 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 994 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 995 list_oops_do(thread->omInUseList, f); 996 } 997 998 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 999 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1000 ObjectMonitor* mid; 1001 for (mid = list; mid != NULL; mid = mid->FreeNext) { 1002 if (mid->object() != NULL) { 1003 f->do_oop((oop*)mid->object_addr()); 1004 } 1005 } 1006 } 1007 1008 1009 // ----------------------------------------------------------------------------- 1010 // ObjectMonitor Lifecycle 1011 // ----------------------- 1012 // Inflation unlinks monitors from the global gFreeList and 1013 // associates them with objects. Deflation -- which occurs at 1014 // STW-time -- disassociates idle monitors from objects. Such 1015 // scavenged monitors are returned to the gFreeList. 1016 // 1017 // The global list is protected by gListLock. All the critical sections 1018 // are short and operate in constant-time. 1019 // 1020 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1021 // 1022 // Lifecycle: 1023 // -- unassigned and on the global free list 1024 // -- unassigned and on a thread's private omFreeList 1025 // -- assigned to an object. The object is inflated and the mark refers 1026 // to the objectmonitor. 1027 1028 1029 // Constraining monitor pool growth via MonitorBound ... 1030 // 1031 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1032 // the rate of scavenging is driven primarily by GC. As such, we can find 1033 // an inordinate number of monitors in circulation. 1034 // To avoid that scenario we can artificially induce a STW safepoint 1035 // if the pool appears to be growing past some reasonable bound. 1036 // Generally we favor time in space-time tradeoffs, but as there's no 1037 // natural back-pressure on the # of extant monitors we need to impose some 1038 // type of limit. Beware that if MonitorBound is set to too low a value 1039 // we could just loop. In addition, if MonitorBound is set to a low value 1040 // we'll incur more safepoints, which are harmful to performance. 1041 // See also: GuaranteedSafepointInterval 1042 // 1043 // The current implementation uses asynchronous VM operations. 1044 1045 static void InduceScavenge(Thread * Self, const char * Whence) { 1046 // Induce STW safepoint to trim monitors 1047 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1048 // More precisely, trigger an asynchronous STW safepoint as the number 1049 // of active monitors passes the specified threshold. 1050 // TODO: assert thread state is reasonable 1051 1052 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1053 // Induce a 'null' safepoint to scavenge monitors 1054 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1055 // to the VMthread and have a lifespan longer than that of this activation record. 1056 // The VMThread will delete the op when completed. 1057 VMThread::execute(new VM_ScavengeMonitors()); 1058 } 1059 } 1060 1061 void ObjectSynchronizer::verifyInUse(Thread *Self) { 1062 ObjectMonitor* mid; 1063 int in_use_tally = 0; 1064 for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) { 1065 in_use_tally++; 1066 } 1067 assert(in_use_tally == Self->omInUseCount, "in-use count off"); 1068 1069 int free_tally = 0; 1070 for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) { 1071 free_tally++; 1072 } 1073 assert(free_tally == Self->omFreeCount, "free count off"); 1074 } 1075 1076 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1077 // A large MAXPRIVATE value reduces both list lock contention 1078 // and list coherency traffic, but also tends to increase the 1079 // number of objectMonitors in circulation as well as the STW 1080 // scavenge costs. As usual, we lean toward time in space-time 1081 // tradeoffs. 1082 const int MAXPRIVATE = 1024; 1083 for (;;) { 1084 ObjectMonitor * m; 1085 1086 // 1: try to allocate from the thread's local omFreeList. 1087 // Threads will attempt to allocate first from their local list, then 1088 // from the global list, and only after those attempts fail will the thread 1089 // attempt to instantiate new monitors. Thread-local free lists take 1090 // heat off the gListLock and improve allocation latency, as well as reducing 1091 // coherency traffic on the shared global list. 1092 m = Self->omFreeList; 1093 if (m != NULL) { 1094 Self->omFreeList = m->FreeNext; 1095 Self->omFreeCount--; 1096 // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene 1097 guarantee(m->object() == NULL, "invariant"); 1098 if (MonitorInUseLists) { 1099 m->FreeNext = Self->omInUseList; 1100 Self->omInUseList = m; 1101 Self->omInUseCount++; 1102 } else { 1103 m->FreeNext = NULL; 1104 } 1105 return m; 1106 } 1107 1108 // 2: try to allocate from the global gFreeList 1109 // CONSIDER: use muxTry() instead of muxAcquire(). 1110 // If the muxTry() fails then drop immediately into case 3. 1111 // If we're using thread-local free lists then try 1112 // to reprovision the caller's free list. 1113 if (gFreeList != NULL) { 1114 // Reprovision the thread's omFreeList. 1115 // Use bulk transfers to reduce the allocation rate and heat 1116 // on various locks. 1117 Thread::muxAcquire(&gListLock, "omAlloc"); 1118 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1119 gMonitorFreeCount--; 1120 ObjectMonitor * take = gFreeList; 1121 gFreeList = take->FreeNext; 1122 guarantee(take->object() == NULL, "invariant"); 1123 guarantee(!take->is_busy(), "invariant"); 1124 take->Recycle(); 1125 omRelease(Self, take, false); 1126 } 1127 Thread::muxRelease(&gListLock); 1128 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1129 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1130 1131 const int mx = MonitorBound; 1132 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1133 // We can't safely induce a STW safepoint from omAlloc() as our thread 1134 // state may not be appropriate for such activities and callers may hold 1135 // naked oops, so instead we defer the action. 1136 InduceScavenge(Self, "omAlloc"); 1137 } 1138 continue; 1139 } 1140 1141 // 3: allocate a block of new ObjectMonitors 1142 // Both the local and global free lists are empty -- resort to malloc(). 1143 // In the current implementation objectMonitors are TSM - immortal. 1144 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1145 // each ObjectMonitor to start at the beginning of a cache line, 1146 // so we use align_up(). 1147 // A better solution would be to use C++ placement-new. 1148 // BEWARE: As it stands currently, we don't run the ctors! 1149 assert(_BLOCKSIZE > 1, "invariant"); 1150 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1151 PaddedEnd<ObjectMonitor> * temp; 1152 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1153 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1154 mtInternal); 1155 temp = (PaddedEnd<ObjectMonitor> *) 1156 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); 1157 1158 // NOTE: (almost) no way to recover if allocation failed. 1159 // We might be able to induce a STW safepoint and scavenge enough 1160 // objectMonitors to permit progress. 1161 if (temp == NULL) { 1162 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1163 "Allocate ObjectMonitors"); 1164 } 1165 (void)memset((void *) temp, 0, neededsize); 1166 1167 // Format the block. 1168 // initialize the linked list, each monitor points to its next 1169 // forming the single linked free list, the very first monitor 1170 // will points to next block, which forms the block list. 1171 // The trick of using the 1st element in the block as gBlockList 1172 // linkage should be reconsidered. A better implementation would 1173 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1174 1175 for (int i = 1; i < _BLOCKSIZE; i++) { 1176 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1177 } 1178 1179 // terminate the last monitor as the end of list 1180 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1181 1182 // Element [0] is reserved for global list linkage 1183 temp[0].set_object(CHAINMARKER); 1184 1185 // Consider carving out this thread's current request from the 1186 // block in hand. This avoids some lock traffic and redundant 1187 // list activity. 1188 1189 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1190 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1191 Thread::muxAcquire(&gListLock, "omAlloc [2]"); 1192 gMonitorPopulation += _BLOCKSIZE-1; 1193 gMonitorFreeCount += _BLOCKSIZE-1; 1194 1195 // Add the new block to the list of extant blocks (gBlockList). 1196 // The very first objectMonitor in a block is reserved and dedicated. 1197 // It serves as blocklist "next" linkage. 1198 temp[0].FreeNext = gBlockList; 1199 // There are lock-free uses of gBlockList so make sure that 1200 // the previous stores happen before we update gBlockList. 1201 OrderAccess::release_store(&gBlockList, temp); 1202 1203 // Add the new string of objectMonitors to the global free list 1204 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1205 gFreeList = temp + 1; 1206 Thread::muxRelease(&gListLock); 1207 } 1208 } 1209 1210 // Place "m" on the caller's private per-thread omFreeList. 1211 // In practice there's no need to clamp or limit the number of 1212 // monitors on a thread's omFreeList as the only time we'll call 1213 // omRelease is to return a monitor to the free list after a CAS 1214 // attempt failed. This doesn't allow unbounded #s of monitors to 1215 // accumulate on a thread's free list. 1216 // 1217 // Key constraint: all ObjectMonitors on a thread's free list and the global 1218 // free list must have their object field set to null. This prevents the 1219 // scavenger -- deflate_idle_monitors -- from reclaiming them. 1220 1221 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1222 bool fromPerThreadAlloc) { 1223 guarantee(m->object() == NULL, "invariant"); 1224 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1225 // Remove from omInUseList 1226 if (MonitorInUseLists && fromPerThreadAlloc) { 1227 ObjectMonitor* cur_mid_in_use = NULL; 1228 bool extracted = false; 1229 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1230 if (m == mid) { 1231 // extract from per-thread in-use list 1232 if (mid == Self->omInUseList) { 1233 Self->omInUseList = mid->FreeNext; 1234 } else if (cur_mid_in_use != NULL) { 1235 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1236 } 1237 extracted = true; 1238 Self->omInUseCount--; 1239 break; 1240 } 1241 } 1242 assert(extracted, "Should have extracted from in-use list"); 1243 } 1244 1245 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1246 m->FreeNext = Self->omFreeList; 1247 Self->omFreeList = m; 1248 Self->omFreeCount++; 1249 } 1250 1251 // Return the monitors of a moribund thread's local free list to 1252 // the global free list. Typically a thread calls omFlush() when 1253 // it's dying. We could also consider having the VM thread steal 1254 // monitors from threads that have not run java code over a few 1255 // consecutive STW safepoints. Relatedly, we might decay 1256 // omFreeProvision at STW safepoints. 1257 // 1258 // Also return the monitors of a moribund thread's omInUseList to 1259 // a global gOmInUseList under the global list lock so these 1260 // will continue to be scanned. 1261 // 1262 // We currently call omFlush() from Threads::remove() _before the thread 1263 // has been excised from the thread list and is no longer a mutator. 1264 // This means that omFlush() can not run concurrently with a safepoint and 1265 // interleave with the scavenge operator. In particular, this ensures that 1266 // the thread's monitors are scanned by a GC safepoint, either via 1267 // Thread::oops_do() (if safepoint happens before omFlush()) or via 1268 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1269 // monitors have been transferred to the global in-use list). 1270 1271 void ObjectSynchronizer::omFlush(Thread * Self) { 1272 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1273 Self->omFreeList = NULL; 1274 ObjectMonitor * tail = NULL; 1275 int tally = 0; 1276 if (list != NULL) { 1277 ObjectMonitor * s; 1278 // The thread is going away, the per-thread free monitors 1279 // are freed via set_owner(NULL) 1280 // Link them to tail, which will be linked into the global free list 1281 // gFreeList below, under the gListLock 1282 for (s = list; s != NULL; s = s->FreeNext) { 1283 tally++; 1284 tail = s; 1285 guarantee(s->object() == NULL, "invariant"); 1286 guarantee(!s->is_busy(), "invariant"); 1287 s->set_owner(NULL); // redundant but good hygiene 1288 } 1289 guarantee(tail != NULL && list != NULL, "invariant"); 1290 } 1291 1292 ObjectMonitor * inUseList = Self->omInUseList; 1293 ObjectMonitor * inUseTail = NULL; 1294 int inUseTally = 0; 1295 if (inUseList != NULL) { 1296 Self->omInUseList = NULL; 1297 ObjectMonitor *cur_om; 1298 // The thread is going away, however the omInUseList inflated 1299 // monitors may still be in-use by other threads. 1300 // Link them to inUseTail, which will be linked into the global in-use list 1301 // gOmInUseList below, under the gListLock 1302 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1303 inUseTail = cur_om; 1304 inUseTally++; 1305 } 1306 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1307 Self->omInUseCount = 0; 1308 guarantee(inUseTail != NULL && inUseList != NULL, "invariant"); 1309 } 1310 1311 Thread::muxAcquire(&gListLock, "omFlush"); 1312 if (tail != NULL) { 1313 tail->FreeNext = gFreeList; 1314 gFreeList = list; 1315 gMonitorFreeCount += tally; 1316 assert(Self->omFreeCount == tally, "free-count off"); 1317 Self->omFreeCount = 0; 1318 } 1319 1320 if (inUseTail != NULL) { 1321 inUseTail->FreeNext = gOmInUseList; 1322 gOmInUseList = inUseList; 1323 gOmInUseCount += inUseTally; 1324 } 1325 1326 Thread::muxRelease(&gListLock); 1327 } 1328 1329 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1330 const oop obj, 1331 ObjectSynchronizer::InflateCause cause) { 1332 assert(event != NULL, "invariant"); 1333 assert(event->should_commit(), "invariant"); 1334 event->set_monitorClass(obj->klass()); 1335 event->set_address((uintptr_t)(void*)obj); 1336 event->set_cause((u1)cause); 1337 event->commit(); 1338 } 1339 1340 // Fast path code shared by multiple functions 1341 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) { 1342 markOop mark = obj->mark(); 1343 if (mark->has_monitor()) { 1344 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1345 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1346 return mark->monitor(); 1347 } 1348 return ObjectSynchronizer::inflate(Thread::current(), 1349 obj, 1350 inflate_cause_vm_internal); 1351 } 1352 1353 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1354 oop object, 1355 const InflateCause cause) { 1356 1357 // Inflate mutates the heap ... 1358 // Relaxing assertion for bug 6320749. 1359 assert(Universe::verify_in_progress() || 1360 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1361 1362 EventJavaMonitorInflate event; 1363 1364 for (;;) { 1365 const markOop mark = object->mark(); 1366 assert(!mark->has_bias_pattern(), "invariant"); 1367 1368 // The mark can be in one of the following states: 1369 // * Inflated - just return 1370 // * Stack-locked - coerce it to inflated 1371 // * INFLATING - busy wait for conversion to complete 1372 // * Neutral - aggressively inflate the object. 1373 // * BIASED - Illegal. We should never see this 1374 1375 // CASE: inflated 1376 if (mark->has_monitor()) { 1377 ObjectMonitor * inf = mark->monitor(); 1378 assert(inf->header()->is_neutral(), "invariant"); 1379 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); 1380 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1381 return inf; 1382 } 1383 1384 // CASE: inflation in progress - inflating over a stack-lock. 1385 // Some other thread is converting from stack-locked to inflated. 1386 // Only that thread can complete inflation -- other threads must wait. 1387 // The INFLATING value is transient. 1388 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1389 // We could always eliminate polling by parking the thread on some auxiliary list. 1390 if (mark == markOopDesc::INFLATING()) { 1391 ReadStableMark(object); 1392 continue; 1393 } 1394 1395 // CASE: stack-locked 1396 // Could be stack-locked either by this thread or by some other thread. 1397 // 1398 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1399 // to install INFLATING into the mark word. We originally installed INFLATING, 1400 // allocated the objectmonitor, and then finally STed the address of the 1401 // objectmonitor into the mark. This was correct, but artificially lengthened 1402 // the interval in which INFLATED appeared in the mark, thus increasing 1403 // the odds of inflation contention. 1404 // 1405 // We now use per-thread private objectmonitor free lists. 1406 // These list are reprovisioned from the global free list outside the 1407 // critical INFLATING...ST interval. A thread can transfer 1408 // multiple objectmonitors en-mass from the global free list to its local free list. 1409 // This reduces coherency traffic and lock contention on the global free list. 1410 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1411 // before or after the CAS(INFLATING) operation. 1412 // See the comments in omAlloc(). 1413 1414 if (mark->has_locker()) { 1415 ObjectMonitor * m = omAlloc(Self); 1416 // Optimistically prepare the objectmonitor - anticipate successful CAS 1417 // We do this before the CAS in order to minimize the length of time 1418 // in which INFLATING appears in the mark. 1419 m->Recycle(); 1420 m->_Responsible = NULL; 1421 m->_recursions = 0; 1422 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1423 1424 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark); 1425 if (cmp != mark) { 1426 omRelease(Self, m, true); 1427 continue; // Interference -- just retry 1428 } 1429 1430 // We've successfully installed INFLATING (0) into the mark-word. 1431 // This is the only case where 0 will appear in a mark-word. 1432 // Only the singular thread that successfully swings the mark-word 1433 // to 0 can perform (or more precisely, complete) inflation. 1434 // 1435 // Why do we CAS a 0 into the mark-word instead of just CASing the 1436 // mark-word from the stack-locked value directly to the new inflated state? 1437 // Consider what happens when a thread unlocks a stack-locked object. 1438 // It attempts to use CAS to swing the displaced header value from the 1439 // on-stack basiclock back into the object header. Recall also that the 1440 // header value (hashcode, etc) can reside in (a) the object header, or 1441 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1442 // header in an objectMonitor. The inflate() routine must copy the header 1443 // value from the basiclock on the owner's stack to the objectMonitor, all 1444 // the while preserving the hashCode stability invariants. If the owner 1445 // decides to release the lock while the value is 0, the unlock will fail 1446 // and control will eventually pass from slow_exit() to inflate. The owner 1447 // will then spin, waiting for the 0 value to disappear. Put another way, 1448 // the 0 causes the owner to stall if the owner happens to try to 1449 // drop the lock (restoring the header from the basiclock to the object) 1450 // while inflation is in-progress. This protocol avoids races that might 1451 // would otherwise permit hashCode values to change or "flicker" for an object. 1452 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1453 // 0 serves as a "BUSY" inflate-in-progress indicator. 1454 1455 1456 // fetch the displaced mark from the owner's stack. 1457 // The owner can't die or unwind past the lock while our INFLATING 1458 // object is in the mark. Furthermore the owner can't complete 1459 // an unlock on the object, either. 1460 markOop dmw = mark->displaced_mark_helper(); 1461 assert(dmw->is_neutral(), "invariant"); 1462 1463 // Setup monitor fields to proper values -- prepare the monitor 1464 m->set_header(dmw); 1465 1466 // Optimization: if the mark->locker stack address is associated 1467 // with this thread we could simply set m->_owner = Self. 1468 // Note that a thread can inflate an object 1469 // that it has stack-locked -- as might happen in wait() -- directly 1470 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1471 m->set_owner(mark->locker()); 1472 m->set_object(object); 1473 // TODO-FIXME: assert BasicLock->dhw != 0. 1474 1475 // Must preserve store ordering. The monitor state must 1476 // be stable at the time of publishing the monitor address. 1477 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1478 object->release_set_mark(markOopDesc::encode(m)); 1479 1480 // Hopefully the performance counters are allocated on distinct cache lines 1481 // to avoid false sharing on MP systems ... 1482 OM_PERFDATA_OP(Inflations, inc()); 1483 if (log_is_enabled(Debug, monitorinflation)) { 1484 if (object->is_instance()) { 1485 ResourceMark rm; 1486 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1487 p2i(object), p2i(object->mark()), 1488 object->klass()->external_name()); 1489 } 1490 } 1491 if (event.should_commit()) { 1492 post_monitor_inflate_event(&event, object, cause); 1493 } 1494 return m; 1495 } 1496 1497 // CASE: neutral 1498 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1499 // If we know we're inflating for entry it's better to inflate by swinging a 1500 // pre-locked objectMonitor pointer into the object header. A successful 1501 // CAS inflates the object *and* confers ownership to the inflating thread. 1502 // In the current implementation we use a 2-step mechanism where we CAS() 1503 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1504 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1505 // would be useful. 1506 1507 assert(mark->is_neutral(), "invariant"); 1508 ObjectMonitor * m = omAlloc(Self); 1509 // prepare m for installation - set monitor to initial state 1510 m->Recycle(); 1511 m->set_header(mark); 1512 m->set_owner(NULL); 1513 m->set_object(object); 1514 m->_recursions = 0; 1515 m->_Responsible = NULL; 1516 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1517 1518 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { 1519 m->set_object(NULL); 1520 m->set_owner(NULL); 1521 m->Recycle(); 1522 omRelease(Self, m, true); 1523 m = NULL; 1524 continue; 1525 // interference - the markword changed - just retry. 1526 // The state-transitions are one-way, so there's no chance of 1527 // live-lock -- "Inflated" is an absorbing state. 1528 } 1529 1530 // Hopefully the performance counters are allocated on distinct 1531 // cache lines to avoid false sharing on MP systems ... 1532 OM_PERFDATA_OP(Inflations, inc()); 1533 if (log_is_enabled(Debug, monitorinflation)) { 1534 if (object->is_instance()) { 1535 ResourceMark rm; 1536 log_debug(monitorinflation)("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 1537 p2i(object), p2i(object->mark()), 1538 object->klass()->external_name()); 1539 } 1540 } 1541 if (event.should_commit()) { 1542 post_monitor_inflate_event(&event, object, cause); 1543 } 1544 return m; 1545 } 1546 } 1547 1548 1549 // Deflate_idle_monitors() is called at all safepoints, immediately 1550 // after all mutators are stopped, but before any objects have moved. 1551 // It traverses the list of known monitors, deflating where possible. 1552 // The scavenged monitor are returned to the monitor free list. 1553 // 1554 // Beware that we scavenge at *every* stop-the-world point. 1555 // Having a large number of monitors in-circulation negatively 1556 // impacts the performance of some applications (e.g., PointBase). 1557 // Broadly, we want to minimize the # of monitors in circulation. 1558 // 1559 // We have added a flag, MonitorInUseLists, which creates a list 1560 // of active monitors for each thread. deflate_idle_monitors() 1561 // only scans the per-thread in-use lists. omAlloc() puts all 1562 // assigned monitors on the per-thread list. deflate_idle_monitors() 1563 // returns the non-busy monitors to the global free list. 1564 // When a thread dies, omFlush() adds the list of active monitors for 1565 // that thread to a global gOmInUseList acquiring the 1566 // global list lock. deflate_idle_monitors() acquires the global 1567 // list lock to scan for non-busy monitors to the global free list. 1568 // An alternative could have used a single global in-use list. The 1569 // downside would have been the additional cost of acquiring the global list lock 1570 // for every omAlloc(). 1571 // 1572 // Perversely, the heap size -- and thus the STW safepoint rate -- 1573 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1574 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1575 // This is an unfortunate aspect of this design. 1576 1577 enum ManifestConstants { 1578 ClearResponsibleAtSTW = 0 1579 }; 1580 1581 // Deflate a single monitor if not in-use 1582 // Return true if deflated, false if in-use 1583 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1584 ObjectMonitor** freeHeadp, 1585 ObjectMonitor** freeTailp) { 1586 bool deflated; 1587 // Normal case ... The monitor is associated with obj. 1588 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1589 guarantee(mid == obj->mark()->monitor(), "invariant"); 1590 guarantee(mid->header()->is_neutral(), "invariant"); 1591 1592 if (mid->is_busy()) { 1593 if (ClearResponsibleAtSTW) mid->_Responsible = NULL; 1594 deflated = false; 1595 } else { 1596 // Deflate the monitor if it is no longer being used 1597 // It's idle - scavenge and return to the global free list 1598 // plain old deflation ... 1599 if (log_is_enabled(Debug, monitorinflation)) { 1600 if (obj->is_instance()) { 1601 ResourceMark rm; 1602 log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , " 1603 "mark " INTPTR_FORMAT " , type %s", 1604 p2i(obj), p2i(obj->mark()), 1605 obj->klass()->external_name()); 1606 } 1607 } 1608 1609 // Restore the header back to obj 1610 obj->release_set_mark(mid->header()); 1611 mid->clear(); 1612 1613 assert(mid->object() == NULL, "invariant"); 1614 1615 // Move the object to the working free list defined by freeHeadp, freeTailp 1616 if (*freeHeadp == NULL) *freeHeadp = mid; 1617 if (*freeTailp != NULL) { 1618 ObjectMonitor * prevtail = *freeTailp; 1619 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1620 prevtail->FreeNext = mid; 1621 } 1622 *freeTailp = mid; 1623 deflated = true; 1624 } 1625 return deflated; 1626 } 1627 1628 // Walk a given monitor list, and deflate idle monitors 1629 // The given list could be a per-thread list or a global list 1630 // Caller acquires gListLock. 1631 // 1632 // In the case of parallel processing of thread local monitor lists, 1633 // work is done by Threads::parallel_threads_do() which ensures that 1634 // each Java thread is processed by exactly one worker thread, and 1635 // thus avoid conflicts that would arise when worker threads would 1636 // process the same monitor lists concurrently. 1637 // 1638 // See also ParallelSPCleanupTask and 1639 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 1640 // Threads::parallel_java_threads_do() in thread.cpp. 1641 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1642 ObjectMonitor** freeHeadp, 1643 ObjectMonitor** freeTailp) { 1644 ObjectMonitor* mid; 1645 ObjectMonitor* next; 1646 ObjectMonitor* cur_mid_in_use = NULL; 1647 int deflated_count = 0; 1648 1649 for (mid = *listHeadp; mid != NULL;) { 1650 oop obj = (oop) mid->object(); 1651 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1652 // if deflate_monitor succeeded, 1653 // extract from per-thread in-use list 1654 if (mid == *listHeadp) { 1655 *listHeadp = mid->FreeNext; 1656 } else if (cur_mid_in_use != NULL) { 1657 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1658 } 1659 next = mid->FreeNext; 1660 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1661 mid = next; 1662 deflated_count++; 1663 } else { 1664 cur_mid_in_use = mid; 1665 mid = mid->FreeNext; 1666 } 1667 } 1668 return deflated_count; 1669 } 1670 1671 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1672 counters->nInuse = 0; // currently associated with objects 1673 counters->nInCirculation = 0; // extant 1674 counters->nScavenged = 0; // reclaimed 1675 } 1676 1677 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 1678 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1679 bool deflated = false; 1680 1681 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1682 ObjectMonitor * freeTailp = NULL; 1683 1684 // Prevent omFlush from changing mids in Thread dtor's during deflation 1685 // And in case the vm thread is acquiring a lock during a safepoint 1686 // See e.g. 6320749 1687 Thread::muxAcquire(&gListLock, "scavenge - return"); 1688 1689 if (MonitorInUseLists) { 1690 // Note: the thread-local monitors lists get deflated in 1691 // a separate pass. See deflate_thread_local_monitors(). 1692 1693 // For moribund threads, scan gOmInUseList 1694 if (gOmInUseList) { 1695 counters->nInCirculation += gOmInUseCount; 1696 int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1697 gOmInUseCount -= deflated_count; 1698 counters->nScavenged += deflated_count; 1699 counters->nInuse += gOmInUseCount; 1700 } 1701 1702 } else { 1703 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 1704 for (; block != NULL; block = next(block)) { 1705 // Iterate over all extant monitors - Scavenge all idle monitors. 1706 assert(block->object() == CHAINMARKER, "must be a block header"); 1707 counters->nInCirculation += _BLOCKSIZE; 1708 for (int i = 1; i < _BLOCKSIZE; i++) { 1709 ObjectMonitor* mid = (ObjectMonitor*)&block[i]; 1710 oop obj = (oop)mid->object(); 1711 1712 if (obj == NULL) { 1713 // The monitor is not associated with an object. 1714 // The monitor should either be a thread-specific private 1715 // free list or the global free list. 1716 // obj == NULL IMPLIES mid->is_busy() == 0 1717 guarantee(!mid->is_busy(), "invariant"); 1718 continue; 1719 } 1720 deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp); 1721 1722 if (deflated) { 1723 mid->FreeNext = NULL; 1724 counters->nScavenged++; 1725 } else { 1726 counters->nInuse++; 1727 } 1728 } 1729 } 1730 } 1731 1732 // Move the scavenged monitors back to the global free list. 1733 if (freeHeadp != NULL) { 1734 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant"); 1735 assert(freeTailp->FreeNext == NULL, "invariant"); 1736 // constant-time list splice - prepend scavenged segment to gFreeList 1737 freeTailp->FreeNext = gFreeList; 1738 gFreeList = freeHeadp; 1739 } 1740 Thread::muxRelease(&gListLock); 1741 1742 } 1743 1744 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1745 gMonitorFreeCount += counters->nScavenged; 1746 1747 // Consider: audit gFreeList to ensure that gMonitorFreeCount and list agree. 1748 1749 ForceMonitorScavenge = 0; // Reset 1750 1751 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); 1752 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); 1753 1754 // TODO: Add objectMonitor leak detection. 1755 // Audit/inventory the objectMonitors -- make sure they're all accounted for. 1756 GVars.stwRandom = os::random(); 1757 GVars.stwCycle++; 1758 } 1759 1760 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 1761 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1762 if (!MonitorInUseLists) return; 1763 1764 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1765 ObjectMonitor * freeTailp = NULL; 1766 1767 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp); 1768 1769 Thread::muxAcquire(&gListLock, "scavenge - return"); 1770 1771 // Adjust counters 1772 counters->nInCirculation += thread->omInUseCount; 1773 thread->omInUseCount -= deflated_count; 1774 counters->nScavenged += deflated_count; 1775 counters->nInuse += thread->omInUseCount; 1776 1777 // Move the scavenged monitors back to the global free list. 1778 if (freeHeadp != NULL) { 1779 guarantee(freeTailp != NULL && deflated_count > 0, "invariant"); 1780 assert(freeTailp->FreeNext == NULL, "invariant"); 1781 1782 // constant-time list splice - prepend scavenged segment to gFreeList 1783 freeTailp->FreeNext = gFreeList; 1784 gFreeList = freeHeadp; 1785 } 1786 Thread::muxRelease(&gListLock); 1787 } 1788 1789 // Monitor cleanup on JavaThread::exit 1790 1791 // Iterate through monitor cache and attempt to release thread's monitors 1792 // Gives up on a particular monitor if an exception occurs, but continues 1793 // the overall iteration, swallowing the exception. 1794 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1795 private: 1796 TRAPS; 1797 1798 public: 1799 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1800 void do_monitor(ObjectMonitor* mid) { 1801 if (mid->owner() == THREAD) { 1802 (void)mid->complete_exit(CHECK); 1803 } 1804 } 1805 }; 1806 1807 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1808 // ignored. This is meant to be called during JNI thread detach which assumes 1809 // all remaining monitors are heavyweight. All exceptions are swallowed. 1810 // Scanning the extant monitor list can be time consuming. 1811 // A simple optimization is to add a per-thread flag that indicates a thread 1812 // called jni_monitorenter() during its lifetime. 1813 // 1814 // Instead of No_Savepoint_Verifier it might be cheaper to 1815 // use an idiom of the form: 1816 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1817 // <code that must not run at safepoint> 1818 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1819 // Since the tests are extremely cheap we could leave them enabled 1820 // for normal product builds. 1821 1822 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1823 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1824 NoSafepointVerifier nsv; 1825 ReleaseJavaMonitorsClosure rjmc(THREAD); 1826 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1827 ObjectSynchronizer::monitors_iterate(&rjmc); 1828 Thread::muxRelease(&gListLock); 1829 THREAD->clear_pending_exception(); 1830 } 1831 1832 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1833 switch (cause) { 1834 case inflate_cause_vm_internal: return "VM Internal"; 1835 case inflate_cause_monitor_enter: return "Monitor Enter"; 1836 case inflate_cause_wait: return "Monitor Wait"; 1837 case inflate_cause_notify: return "Monitor Notify"; 1838 case inflate_cause_hash_code: return "Monitor Hash Code"; 1839 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1840 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1841 default: 1842 ShouldNotReachHere(); 1843 } 1844 return "Unknown"; 1845 } 1846 1847 //------------------------------------------------------------------------------ 1848 // Debugging code 1849 1850 u_char* ObjectSynchronizer::get_gvars_addr() { 1851 return (u_char*)&GVars; 1852 } 1853 1854 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() { 1855 return (u_char*)&GVars.hcSequence; 1856 } 1857 1858 size_t ObjectSynchronizer::get_gvars_size() { 1859 return sizeof(SharedGlobals); 1860 } 1861 1862 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() { 1863 return (u_char*)&GVars.stwRandom; 1864 } 1865 1866 #ifndef PRODUCT 1867 1868 // Check if monitor belongs to the monitor cache 1869 // The list is grow-only so it's *relatively* safe to traverse 1870 // the list of extant blocks without taking a lock. 1871 1872 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 1873 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 1874 while (block != NULL) { 1875 assert(block->object() == CHAINMARKER, "must be a block header"); 1876 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 1877 address mon = (address)monitor; 1878 address blk = (address)block; 1879 size_t diff = mon - blk; 1880 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 1881 return 1; 1882 } 1883 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 1884 } 1885 return 0; 1886 } 1887 1888 #endif