1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "oops/markOop.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/atomic.hpp" 37 #include "runtime/biasedLocking.hpp" 38 #include "runtime/handles.inline.hpp" 39 #include "runtime/interfaceSupport.inline.hpp" 40 #include "runtime/mutexLocker.hpp" 41 #include "runtime/objectMonitor.hpp" 42 #include "runtime/objectMonitor.inline.hpp" 43 #include "runtime/osThread.hpp" 44 #include "runtime/safepointVerifiers.hpp" 45 #include "runtime/sharedRuntime.hpp" 46 #include "runtime/stubRoutines.hpp" 47 #include "runtime/synchronizer.hpp" 48 #include "runtime/thread.inline.hpp" 49 #include "runtime/timer.hpp" 50 #include "runtime/vframe.hpp" 51 #include "runtime/vmThread.hpp" 52 #include "utilities/align.hpp" 53 #include "utilities/dtrace.hpp" 54 #include "utilities/events.hpp" 55 #include "utilities/preserveException.hpp" 56 57 // The "core" versions of monitor enter and exit reside in this file. 58 // The interpreter and compilers contain specialized transliterated 59 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 60 // for instance. If you make changes here, make sure to modify the 61 // interpreter, and both C1 and C2 fast-path inline locking code emission. 62 // 63 // ----------------------------------------------------------------------------- 64 65 #ifdef DTRACE_ENABLED 66 67 // Only bother with this argument setup if dtrace is available 68 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 69 70 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 71 char* bytes = NULL; \ 72 int len = 0; \ 73 jlong jtid = SharedRuntime::get_java_tid(thread); \ 74 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 75 if (klassname != NULL) { \ 76 bytes = (char*)klassname->bytes(); \ 77 len = klassname->utf8_length(); \ 78 } 79 80 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 81 { \ 82 if (DTraceMonitorProbes) { \ 83 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 84 HOTSPOT_MONITOR_WAIT(jtid, \ 85 (uintptr_t)(monitor), bytes, len, (millis)); \ 86 } \ 87 } 88 89 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 90 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 91 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 92 93 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 94 { \ 95 if (DTraceMonitorProbes) { \ 96 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 97 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 98 (uintptr_t)(monitor), bytes, len); \ 99 } \ 100 } 101 102 #else // ndef DTRACE_ENABLED 103 104 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 105 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 106 107 #endif // ndef DTRACE_ENABLED 108 109 // This exists only as a workaround of dtrace bug 6254741 110 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 111 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 112 return 0; 113 } 114 115 #define NINFLATIONLOCKS 256 116 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 117 118 // global list of blocks of monitors 119 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL; 120 // global monitor free list 121 ObjectMonitor * volatile ObjectSynchronizer::gFreeList = NULL; 122 // global monitor in-use list, for moribund threads, 123 // monitors they inflated need to be scanned for deflation 124 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList = NULL; 125 // count of entries in gOmInUseList 126 int ObjectSynchronizer::gOmInUseCount = 0; 127 128 static volatile intptr_t gListLock = 0; // protects global monitor lists 129 static volatile int gMonitorFreeCount = 0; // # on gFreeList 130 static volatile int gMonitorPopulation = 0; // # Extant -- in circulation 131 132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 133 134 135 // =====================> Quick functions 136 137 // The quick_* forms are special fast-path variants used to improve 138 // performance. In the simplest case, a "quick_*" implementation could 139 // simply return false, in which case the caller will perform the necessary 140 // state transitions and call the slow-path form. 141 // The fast-path is designed to handle frequently arising cases in an efficient 142 // manner and is just a degenerate "optimistic" variant of the slow-path. 143 // returns true -- to indicate the call was satisfied. 144 // returns false -- to indicate the call needs the services of the slow-path. 145 // A no-loitering ordinance is in effect for code in the quick_* family 146 // operators: safepoints or indefinite blocking (blocking that might span a 147 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 148 // entry. 149 // 150 // Consider: An interesting optimization is to have the JIT recognize the 151 // following common idiom: 152 // synchronized (someobj) { .... ; notify(); } 153 // That is, we find a notify() or notifyAll() call that immediately precedes 154 // the monitorexit operation. In that case the JIT could fuse the operations 155 // into a single notifyAndExit() runtime primitive. 156 157 bool ObjectSynchronizer::quick_notify(oopDesc * obj, Thread * self, bool all) { 158 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 159 assert(self->is_Java_thread(), "invariant"); 160 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 161 NoSafepointVerifier nsv; 162 if (obj == NULL) return false; // slow-path for invalid obj 163 const markOop mark = obj->mark(); 164 165 if (mark->has_locker() && self->is_lock_owned((address)mark->locker())) { 166 // Degenerate notify 167 // stack-locked by caller so by definition the implied waitset is empty. 168 return true; 169 } 170 171 if (mark->has_monitor()) { 172 ObjectMonitor * const mon = mark->monitor(); 173 assert(oopDesc::equals((oop) mon->object(), obj), "invariant"); 174 if (mon->owner() != self) return false; // slow-path for IMS exception 175 176 if (mon->first_waiter() != NULL) { 177 // We have one or more waiters. Since this is an inflated monitor 178 // that we own, we can transfer one or more threads from the waitset 179 // to the entrylist here and now, avoiding the slow-path. 180 if (all) { 181 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 182 } else { 183 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 184 } 185 int tally = 0; 186 do { 187 mon->INotify(self); 188 ++tally; 189 } while (mon->first_waiter() != NULL && all); 190 OM_PERFDATA_OP(Notifications, inc(tally)); 191 } 192 return true; 193 } 194 195 // biased locking and any other IMS exception states take the slow-path 196 return false; 197 } 198 199 200 // The LockNode emitted directly at the synchronization site would have 201 // been too big if it were to have included support for the cases of inflated 202 // recursive enter and exit, so they go here instead. 203 // Note that we can't safely call AsyncPrintJavaStack() from within 204 // quick_enter() as our thread state remains _in_Java. 205 206 bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self, 207 BasicLock * lock) { 208 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 209 assert(Self->is_Java_thread(), "invariant"); 210 assert(((JavaThread *) Self)->thread_state() == _thread_in_Java, "invariant"); 211 NoSafepointVerifier nsv; 212 if (obj == NULL) return false; // Need to throw NPE 213 const markOop mark = obj->mark(); 214 215 if (mark->has_monitor()) { 216 ObjectMonitor * const m = mark->monitor(); 217 assert(oopDesc::equals((oop) m->object(), obj), "invariant"); 218 Thread * const owner = (Thread *) m->_owner; 219 220 // Lock contention and Transactional Lock Elision (TLE) diagnostics 221 // and observability 222 // Case: light contention possibly amenable to TLE 223 // Case: TLE inimical operations such as nested/recursive synchronization 224 225 if (owner == Self) { 226 m->_recursions++; 227 return true; 228 } 229 230 // This Java Monitor is inflated so obj's header will never be 231 // displaced to this thread's BasicLock. Make the displaced header 232 // non-NULL so this BasicLock is not seen as recursive nor as 233 // being locked. We do this unconditionally so that this thread's 234 // BasicLock cannot be mis-interpreted by any stack walkers. For 235 // performance reasons, stack walkers generally first check for 236 // Biased Locking in the object's header, the second check is for 237 // stack-locking in the object's header, the third check is for 238 // recursive stack-locking in the displaced header in the BasicLock, 239 // and last are the inflated Java Monitor (ObjectMonitor) checks. 240 lock->set_displaced_header(markOopDesc::unused_mark()); 241 242 if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) { 243 assert(m->_recursions == 0, "invariant"); 244 assert(m->_owner == Self, "invariant"); 245 return true; 246 } 247 } 248 249 // Note that we could inflate in quick_enter. 250 // This is likely a useful optimization 251 // Critically, in quick_enter() we must not: 252 // -- perform bias revocation, or 253 // -- block indefinitely, or 254 // -- reach a safepoint 255 256 return false; // revert to slow-path 257 } 258 259 // ----------------------------------------------------------------------------- 260 // Fast Monitor Enter/Exit 261 // This the fast monitor enter. The interpreter and compiler use 262 // some assembly copies of this code. Make sure update those code 263 // if the following function is changed. The implementation is 264 // extremely sensitive to race condition. Be careful. 265 266 void ObjectSynchronizer::fast_enter(Handle obj, BasicLock* lock, 267 bool attempt_rebias, TRAPS) { 268 if (UseBiasedLocking) { 269 if (!SafepointSynchronize::is_at_safepoint()) { 270 BiasedLocking::Condition cond = BiasedLocking::revoke_and_rebias(obj, attempt_rebias, THREAD); 271 if (cond == BiasedLocking::BIAS_REVOKED_AND_REBIASED) { 272 return; 273 } 274 } else { 275 assert(!attempt_rebias, "can not rebias toward VM thread"); 276 BiasedLocking::revoke_at_safepoint(obj); 277 } 278 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 279 } 280 281 slow_enter(obj, lock, THREAD); 282 } 283 284 void ObjectSynchronizer::fast_exit(oop object, BasicLock* lock, TRAPS) { 285 markOop mark = object->mark(); 286 // We cannot check for Biased Locking if we are racing an inflation. 287 assert(mark == markOopDesc::INFLATING() || 288 !mark->has_bias_pattern(), "should not see bias pattern here"); 289 290 markOop dhw = lock->displaced_header(); 291 if (dhw == NULL) { 292 // If the displaced header is NULL, then this exit matches up with 293 // a recursive enter. No real work to do here except for diagnostics. 294 #ifndef PRODUCT 295 if (mark != markOopDesc::INFLATING()) { 296 // Only do diagnostics if we are not racing an inflation. Simply 297 // exiting a recursive enter of a Java Monitor that is being 298 // inflated is safe; see the has_monitor() comment below. 299 assert(!mark->is_neutral(), "invariant"); 300 assert(!mark->has_locker() || 301 THREAD->is_lock_owned((address)mark->locker()), "invariant"); 302 if (mark->has_monitor()) { 303 // The BasicLock's displaced_header is marked as a recursive 304 // enter and we have an inflated Java Monitor (ObjectMonitor). 305 // This is a special case where the Java Monitor was inflated 306 // after this thread entered the stack-lock recursively. When a 307 // Java Monitor is inflated, we cannot safely walk the Java 308 // Monitor owner's stack and update the BasicLocks because a 309 // Java Monitor can be asynchronously inflated by a thread that 310 // does not own the Java Monitor. 311 ObjectMonitor * m = mark->monitor(); 312 assert(((oop)(m->object()))->mark() == mark, "invariant"); 313 assert(m->is_entered(THREAD), "invariant"); 314 } 315 } 316 #endif 317 return; 318 } 319 320 if (mark == (markOop) lock) { 321 // If the object is stack-locked by the current thread, try to 322 // swing the displaced header from the BasicLock back to the mark. 323 assert(dhw->is_neutral(), "invariant"); 324 if (object->cas_set_mark(dhw, mark) == mark) { 325 return; 326 } 327 } 328 329 // We have to take the slow-path of possible inflation and then exit. 330 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); 331 } 332 333 // ----------------------------------------------------------------------------- 334 // Interpreter/Compiler Slow Case 335 // This routine is used to handle interpreter/compiler slow case 336 // We don't need to use fast path here, because it must have been 337 // failed in the interpreter/compiler code. 338 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) { 339 markOop mark = obj->mark(); 340 assert(!mark->has_bias_pattern(), "should not see bias pattern here"); 341 342 if (mark->is_neutral()) { 343 // Anticipate successful CAS -- the ST of the displaced mark must 344 // be visible <= the ST performed by the CAS. 345 lock->set_displaced_header(mark); 346 if (mark == obj()->cas_set_mark((markOop) lock, mark)) { 347 return; 348 } 349 // Fall through to inflate() ... 350 } else if (mark->has_locker() && 351 THREAD->is_lock_owned((address)mark->locker())) { 352 assert(lock != mark->locker(), "must not re-lock the same lock"); 353 assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock"); 354 lock->set_displaced_header(NULL); 355 return; 356 } 357 358 // The object header will never be displaced to this lock, 359 // so it does not matter what the value is, except that it 360 // must be non-zero to avoid looking like a re-entrant lock, 361 // and must not look locked either. 362 lock->set_displaced_header(markOopDesc::unused_mark()); 363 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD); 364 } 365 366 // This routine is used to handle interpreter/compiler slow case 367 // We don't need to use fast path here, because it must have 368 // failed in the interpreter/compiler code. Simply use the heavy 369 // weight monitor should be ok, unless someone find otherwise. 370 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) { 371 fast_exit(object, lock, THREAD); 372 } 373 374 // ----------------------------------------------------------------------------- 375 // Class Loader support to workaround deadlocks on the class loader lock objects 376 // Also used by GC 377 // complete_exit()/reenter() are used to wait on a nested lock 378 // i.e. to give up an outer lock completely and then re-enter 379 // Used when holding nested locks - lock acquisition order: lock1 then lock2 380 // 1) complete_exit lock1 - saving recursion count 381 // 2) wait on lock2 382 // 3) when notified on lock2, unlock lock2 383 // 4) reenter lock1 with original recursion count 384 // 5) lock lock2 385 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 386 intptr_t ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 387 if (UseBiasedLocking) { 388 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 389 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 390 } 391 392 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 393 394 return monitor->complete_exit(THREAD); 395 } 396 397 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 398 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) { 399 if (UseBiasedLocking) { 400 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 401 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 402 } 403 404 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 405 406 monitor->reenter(recursion, THREAD); 407 } 408 // ----------------------------------------------------------------------------- 409 // JNI locks on java objects 410 // NOTE: must use heavy weight monitor to handle jni monitor enter 411 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 412 // the current locking is from JNI instead of Java code 413 if (UseBiasedLocking) { 414 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 415 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 416 } 417 THREAD->set_current_pending_monitor_is_from_java(false); 418 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 419 THREAD->set_current_pending_monitor_is_from_java(true); 420 } 421 422 // NOTE: must use heavy weight monitor to handle jni monitor exit 423 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 424 if (UseBiasedLocking) { 425 Handle h_obj(THREAD, obj); 426 BiasedLocking::revoke_and_rebias(h_obj, false, THREAD); 427 obj = h_obj(); 428 } 429 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 430 431 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); 432 // If this thread has locked the object, exit the monitor. Note: can't use 433 // monitor->check(CHECK); must exit even if an exception is pending. 434 if (monitor->check(THREAD)) { 435 monitor->exit(true, THREAD); 436 } 437 } 438 439 // ----------------------------------------------------------------------------- 440 // Internal VM locks on java objects 441 // standard constructor, allows locking failures 442 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool doLock) { 443 _dolock = doLock; 444 _thread = thread; 445 debug_only(if (StrictSafepointChecks) _thread->check_for_valid_safepoint_state(false);) 446 _obj = obj; 447 448 if (_dolock) { 449 ObjectSynchronizer::fast_enter(_obj, &_lock, false, _thread); 450 } 451 } 452 453 ObjectLocker::~ObjectLocker() { 454 if (_dolock) { 455 ObjectSynchronizer::fast_exit(_obj(), &_lock, _thread); 456 } 457 } 458 459 460 // ----------------------------------------------------------------------------- 461 // Wait/Notify/NotifyAll 462 // NOTE: must use heavy weight monitor to handle wait() 463 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 464 if (UseBiasedLocking) { 465 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 466 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 467 } 468 if (millis < 0) { 469 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 470 } 471 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 472 473 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 474 monitor->wait(millis, true, THREAD); 475 476 // This dummy call is in place to get around dtrace bug 6254741. Once 477 // that's fixed we can uncomment the following line, remove the call 478 // and change this function back into a "void" func. 479 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 480 return dtrace_waited_probe(monitor, obj, THREAD); 481 } 482 483 void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) { 484 if (UseBiasedLocking) { 485 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 486 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 487 } 488 if (millis < 0) { 489 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 490 } 491 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD); 492 } 493 494 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 495 if (UseBiasedLocking) { 496 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 497 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 498 } 499 500 markOop mark = obj->mark(); 501 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 502 return; 503 } 504 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD); 505 } 506 507 // NOTE: see comment of notify() 508 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 509 if (UseBiasedLocking) { 510 BiasedLocking::revoke_and_rebias(obj, false, THREAD); 511 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 512 } 513 514 markOop mark = obj->mark(); 515 if (mark->has_locker() && THREAD->is_lock_owned((address)mark->locker())) { 516 return; 517 } 518 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD); 519 } 520 521 // ----------------------------------------------------------------------------- 522 // Hash Code handling 523 // 524 // Performance concern: 525 // OrderAccess::storestore() calls release() which at one time stored 0 526 // into the global volatile OrderAccess::dummy variable. This store was 527 // unnecessary for correctness. Many threads storing into a common location 528 // causes considerable cache migration or "sloshing" on large SMP systems. 529 // As such, I avoided using OrderAccess::storestore(). In some cases 530 // OrderAccess::fence() -- which incurs local latency on the executing 531 // processor -- is a better choice as it scales on SMP systems. 532 // 533 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 534 // a discussion of coherency costs. Note that all our current reference 535 // platforms provide strong ST-ST order, so the issue is moot on IA32, 536 // x64, and SPARC. 537 // 538 // As a general policy we use "volatile" to control compiler-based reordering 539 // and explicit fences (barriers) to control for architectural reordering 540 // performed by the CPU(s) or platform. 541 542 struct SharedGlobals { 543 char _pad_prefix[DEFAULT_CACHE_LINE_SIZE]; 544 // These are highly shared mostly-read variables. 545 // To avoid false-sharing they need to be the sole occupants of a cache line. 546 volatile int stwRandom; 547 volatile int stwCycle; 548 DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 549 // Hot RW variable -- Sequester to avoid false-sharing 550 volatile int hcSequence; 551 DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile int)); 552 }; 553 554 static SharedGlobals GVars; 555 static int MonitorScavengeThreshold = 1000000; 556 static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending 557 558 static markOop ReadStableMark(oop obj) { 559 markOop mark = obj->mark(); 560 if (!mark->is_being_inflated()) { 561 return mark; // normal fast-path return 562 } 563 564 int its = 0; 565 for (;;) { 566 markOop mark = obj->mark(); 567 if (!mark->is_being_inflated()) { 568 return mark; // normal fast-path return 569 } 570 571 // The object is being inflated by some other thread. 572 // The caller of ReadStableMark() must wait for inflation to complete. 573 // Avoid live-lock 574 // TODO: consider calling SafepointSynchronize::do_call_back() while 575 // spinning to see if there's a safepoint pending. If so, immediately 576 // yielding or blocking would be appropriate. Avoid spinning while 577 // there is a safepoint pending. 578 // TODO: add inflation contention performance counters. 579 // TODO: restrict the aggregate number of spinners. 580 581 ++its; 582 if (its > 10000 || !os::is_MP()) { 583 if (its & 1) { 584 os::naked_yield(); 585 } else { 586 // Note that the following code attenuates the livelock problem but is not 587 // a complete remedy. A more complete solution would require that the inflating 588 // thread hold the associated inflation lock. The following code simply restricts 589 // the number of spinners to at most one. We'll have N-2 threads blocked 590 // on the inflationlock, 1 thread holding the inflation lock and using 591 // a yield/park strategy, and 1 thread in the midst of inflation. 592 // A more refined approach would be to change the encoding of INFLATING 593 // to allow encapsulation of a native thread pointer. Threads waiting for 594 // inflation to complete would use CAS to push themselves onto a singly linked 595 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 596 // and calling park(). When inflation was complete the thread that accomplished inflation 597 // would detach the list and set the markword to inflated with a single CAS and 598 // then for each thread on the list, set the flag and unpark() the thread. 599 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 600 // wakes at most one thread whereas we need to wake the entire list. 601 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 602 int YieldThenBlock = 0; 603 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 604 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 605 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 606 while (obj->mark() == markOopDesc::INFLATING()) { 607 // Beware: NakedYield() is advisory and has almost no effect on some platforms 608 // so we periodically call Self->_ParkEvent->park(1). 609 // We use a mixed spin/yield/block mechanism. 610 if ((YieldThenBlock++) >= 16) { 611 Thread::current()->_ParkEvent->park(1); 612 } else { 613 os::naked_yield(); 614 } 615 } 616 Thread::muxRelease(gInflationLocks + ix); 617 } 618 } else { 619 SpinPause(); // SMP-polite spinning 620 } 621 } 622 } 623 624 // hashCode() generation : 625 // 626 // Possibilities: 627 // * MD5Digest of {obj,stwRandom} 628 // * CRC32 of {obj,stwRandom} or any linear-feedback shift register function. 629 // * A DES- or AES-style SBox[] mechanism 630 // * One of the Phi-based schemes, such as: 631 // 2654435761 = 2^32 * Phi (golden ratio) 632 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stwRandom ; 633 // * A variation of Marsaglia's shift-xor RNG scheme. 634 // * (obj ^ stwRandom) is appealing, but can result 635 // in undesirable regularity in the hashCode values of adjacent objects 636 // (objects allocated back-to-back, in particular). This could potentially 637 // result in hashtable collisions and reduced hashtable efficiency. 638 // There are simple ways to "diffuse" the middle address bits over the 639 // generated hashCode values: 640 641 static inline intptr_t get_next_hash(Thread * Self, oop obj) { 642 intptr_t value = 0; 643 if (hashCode == 0) { 644 // This form uses global Park-Miller RNG. 645 // On MP system we'll have lots of RW access to a global, so the 646 // mechanism induces lots of coherency traffic. 647 value = os::random(); 648 } else if (hashCode == 1) { 649 // This variation has the property of being stable (idempotent) 650 // between STW operations. This can be useful in some of the 1-0 651 // synchronization schemes. 652 intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3; 653 value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom; 654 } else if (hashCode == 2) { 655 value = 1; // for sensitivity testing 656 } else if (hashCode == 3) { 657 value = ++GVars.hcSequence; 658 } else if (hashCode == 4) { 659 value = cast_from_oop<intptr_t>(obj); 660 } else { 661 // Marsaglia's xor-shift scheme with thread-specific state 662 // This is probably the best overall implementation -- we'll 663 // likely make this the default in future releases. 664 unsigned t = Self->_hashStateX; 665 t ^= (t << 11); 666 Self->_hashStateX = Self->_hashStateY; 667 Self->_hashStateY = Self->_hashStateZ; 668 Self->_hashStateZ = Self->_hashStateW; 669 unsigned v = Self->_hashStateW; 670 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 671 Self->_hashStateW = v; 672 value = v; 673 } 674 675 value &= markOopDesc::hash_mask; 676 if (value == 0) value = 0xBAD; 677 assert(value != markOopDesc::no_hash, "invariant"); 678 return value; 679 } 680 681 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) { 682 if (UseBiasedLocking) { 683 // NOTE: many places throughout the JVM do not expect a safepoint 684 // to be taken here, in particular most operations on perm gen 685 // objects. However, we only ever bias Java instances and all of 686 // the call sites of identity_hash that might revoke biases have 687 // been checked to make sure they can handle a safepoint. The 688 // added check of the bias pattern is to avoid useless calls to 689 // thread-local storage. 690 if (obj->mark()->has_bias_pattern()) { 691 // Handle for oop obj in case of STW safepoint 692 Handle hobj(Self, obj); 693 // Relaxing assertion for bug 6320749. 694 assert(Universe::verify_in_progress() || 695 !SafepointSynchronize::is_at_safepoint(), 696 "biases should not be seen by VM thread here"); 697 BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current()); 698 obj = hobj(); 699 assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 700 } 701 } 702 703 // hashCode() is a heap mutator ... 704 // Relaxing assertion for bug 6320749. 705 assert(Universe::verify_in_progress() || DumpSharedSpaces || 706 !SafepointSynchronize::is_at_safepoint(), "invariant"); 707 assert(Universe::verify_in_progress() || DumpSharedSpaces || 708 Self->is_Java_thread() , "invariant"); 709 assert(Universe::verify_in_progress() || DumpSharedSpaces || 710 ((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant"); 711 712 ObjectMonitor* monitor = NULL; 713 markOop temp, test; 714 intptr_t hash; 715 markOop mark = ReadStableMark(obj); 716 717 // object should remain ineligible for biased locking 718 assert(!mark->has_bias_pattern(), "invariant"); 719 720 if (mark->is_neutral()) { 721 hash = mark->hash(); // this is a normal header 722 if (hash != 0) { // if it has hash, just return it 723 return hash; 724 } 725 hash = get_next_hash(Self, obj); // allocate a new hash code 726 temp = mark->copy_set_hash(hash); // merge the hash code into header 727 // use (machine word version) atomic operation to install the hash 728 test = obj->cas_set_mark(temp, mark); 729 if (test == mark) { 730 return hash; 731 } 732 // If atomic operation failed, we must inflate the header 733 // into heavy weight monitor. We could add more code here 734 // for fast path, but it does not worth the complexity. 735 } else if (mark->has_monitor()) { 736 monitor = mark->monitor(); 737 temp = monitor->header(); 738 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp)); 739 hash = temp->hash(); 740 if (hash != 0) { 741 return hash; 742 } 743 // Skip to the following code to reduce code size 744 } else if (Self->is_lock_owned((address)mark->locker())) { 745 temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned 746 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp)); 747 hash = temp->hash(); // by current thread, check if the displaced 748 if (hash != 0) { // header contains hash code 749 return hash; 750 } 751 // WARNING: 752 // The displaced header is strictly immutable. 753 // It can NOT be changed in ANY cases. So we have 754 // to inflate the header into heavyweight monitor 755 // even the current thread owns the lock. The reason 756 // is the BasicLock (stack slot) will be asynchronously 757 // read by other threads during the inflate() function. 758 // Any change to stack may not propagate to other threads 759 // correctly. 760 } 761 762 // Inflate the monitor to set hash code 763 monitor = inflate(Self, obj, inflate_cause_hash_code); 764 // Load displaced header and check it has hash code 765 mark = monitor->header(); 766 assert(mark->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)mark)); 767 hash = mark->hash(); 768 if (hash == 0) { 769 hash = get_next_hash(Self, obj); 770 temp = mark->copy_set_hash(hash); // merge hash code into header 771 assert(temp->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)temp)); 772 test = Atomic::cmpxchg(temp, monitor->header_addr(), mark); 773 if (test != mark) { 774 // The only update to the header in the monitor (outside GC) 775 // is install the hash code. If someone add new usage of 776 // displaced header, please update this code 777 hash = test->hash(); 778 assert(test->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)test)); 779 assert(hash != 0, "Trivial unexpected object/monitor header usage."); 780 } 781 } 782 // We finally get the hash 783 return hash; 784 } 785 786 // Deprecated -- use FastHashCode() instead. 787 788 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 789 return FastHashCode(Thread::current(), obj()); 790 } 791 792 793 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 794 Handle h_obj) { 795 if (UseBiasedLocking) { 796 BiasedLocking::revoke_and_rebias(h_obj, false, thread); 797 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 798 } 799 800 assert(thread == JavaThread::current(), "Can only be called on current thread"); 801 oop obj = h_obj(); 802 803 markOop mark = ReadStableMark(obj); 804 805 // Uncontended case, header points to stack 806 if (mark->has_locker()) { 807 return thread->is_lock_owned((address)mark->locker()); 808 } 809 // Contended case, header points to ObjectMonitor (tagged pointer) 810 if (mark->has_monitor()) { 811 ObjectMonitor* monitor = mark->monitor(); 812 return monitor->is_entered(thread) != 0; 813 } 814 // Unlocked case, header in place 815 assert(mark->is_neutral(), "sanity check"); 816 return false; 817 } 818 819 // Be aware of this method could revoke bias of the lock object. 820 // This method queries the ownership of the lock handle specified by 'h_obj'. 821 // If the current thread owns the lock, it returns owner_self. If no 822 // thread owns the lock, it returns owner_none. Otherwise, it will return 823 // owner_other. 824 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 825 (JavaThread *self, Handle h_obj) { 826 // The caller must beware this method can revoke bias, and 827 // revocation can result in a safepoint. 828 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 829 assert(self->thread_state() != _thread_blocked, "invariant"); 830 831 // Possible mark states: neutral, biased, stack-locked, inflated 832 833 if (UseBiasedLocking && h_obj()->mark()->has_bias_pattern()) { 834 // CASE: biased 835 BiasedLocking::revoke_and_rebias(h_obj, false, self); 836 assert(!h_obj->mark()->has_bias_pattern(), 837 "biases should be revoked by now"); 838 } 839 840 assert(self == JavaThread::current(), "Can only be called on current thread"); 841 oop obj = h_obj(); 842 markOop mark = ReadStableMark(obj); 843 844 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 845 if (mark->has_locker()) { 846 return self->is_lock_owned((address)mark->locker()) ? 847 owner_self : owner_other; 848 } 849 850 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 851 // The Object:ObjectMonitor relationship is stable as long as we're 852 // not at a safepoint. 853 if (mark->has_monitor()) { 854 void * owner = mark->monitor()->_owner; 855 if (owner == NULL) return owner_none; 856 return (owner == self || 857 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 858 } 859 860 // CASE: neutral 861 assert(mark->is_neutral(), "sanity check"); 862 return owner_none; // it's unlocked 863 } 864 865 // FIXME: jvmti should call this 866 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 867 if (UseBiasedLocking) { 868 if (SafepointSynchronize::is_at_safepoint()) { 869 BiasedLocking::revoke_at_safepoint(h_obj); 870 } else { 871 BiasedLocking::revoke_and_rebias(h_obj, false, JavaThread::current()); 872 } 873 assert(!h_obj->mark()->has_bias_pattern(), "biases should be revoked by now"); 874 } 875 876 oop obj = h_obj(); 877 address owner = NULL; 878 879 markOop mark = ReadStableMark(obj); 880 881 // Uncontended case, header points to stack 882 if (mark->has_locker()) { 883 owner = (address) mark->locker(); 884 } 885 886 // Contended case, header points to ObjectMonitor (tagged pointer) 887 else if (mark->has_monitor()) { 888 ObjectMonitor* monitor = mark->monitor(); 889 assert(monitor != NULL, "monitor should be non-null"); 890 owner = (address) monitor->owner(); 891 } 892 893 if (owner != NULL) { 894 // owning_thread_from_monitor_owner() may also return NULL here 895 return Threads::owning_thread_from_monitor_owner(t_list, owner); 896 } 897 898 // Unlocked case, header in place 899 // Cannot have assertion since this object may have been 900 // locked by another thread when reaching here. 901 // assert(mark->is_neutral(), "sanity check"); 902 903 return NULL; 904 } 905 906 // Visitors ... 907 908 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 909 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 910 while (block != NULL) { 911 assert(block->object() == CHAINMARKER, "must be a block header"); 912 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 913 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 914 oop object = (oop)mid->object(); 915 if (object != NULL) { 916 closure->do_monitor(mid); 917 } 918 } 919 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 920 } 921 } 922 923 // Get the next block in the block list. 924 static inline PaddedEnd<ObjectMonitor>* next(PaddedEnd<ObjectMonitor>* block) { 925 assert(block->object() == CHAINMARKER, "must be a block header"); 926 block = (PaddedEnd<ObjectMonitor>*) block->FreeNext; 927 assert(block == NULL || block->object() == CHAINMARKER, "must be a block header"); 928 return block; 929 } 930 931 static bool monitors_used_above_threshold() { 932 if (gMonitorPopulation == 0) { 933 return false; 934 } 935 int monitors_used = gMonitorPopulation - gMonitorFreeCount; 936 int monitor_usage = (monitors_used * 100LL) / gMonitorPopulation; 937 return monitor_usage > MonitorUsedDeflationThreshold; 938 } 939 940 bool ObjectSynchronizer::is_cleanup_needed() { 941 if (MonitorUsedDeflationThreshold > 0) { 942 return monitors_used_above_threshold(); 943 } 944 return false; 945 } 946 947 void ObjectSynchronizer::oops_do(OopClosure* f) { 948 // We only scan the global used list here (for moribund threads), and 949 // the thread-local monitors in Thread::oops_do(). 950 global_used_oops_do(f); 951 } 952 953 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 954 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 955 list_oops_do(gOmInUseList, f); 956 } 957 958 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 959 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 960 list_oops_do(thread->omInUseList, f); 961 } 962 963 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 964 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 965 ObjectMonitor* mid; 966 for (mid = list; mid != NULL; mid = mid->FreeNext) { 967 if (mid->object() != NULL) { 968 f->do_oop((oop*)mid->object_addr()); 969 } 970 } 971 } 972 973 974 // ----------------------------------------------------------------------------- 975 // ObjectMonitor Lifecycle 976 // ----------------------- 977 // Inflation unlinks monitors from the global gFreeList and 978 // associates them with objects. Deflation -- which occurs at 979 // STW-time -- disassociates idle monitors from objects. Such 980 // scavenged monitors are returned to the gFreeList. 981 // 982 // The global list is protected by gListLock. All the critical sections 983 // are short and operate in constant-time. 984 // 985 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 986 // 987 // Lifecycle: 988 // -- unassigned and on the global free list 989 // -- unassigned and on a thread's private omFreeList 990 // -- assigned to an object. The object is inflated and the mark refers 991 // to the objectmonitor. 992 993 994 // Constraining monitor pool growth via MonitorBound ... 995 // 996 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 997 // the rate of scavenging is driven primarily by GC. As such, we can find 998 // an inordinate number of monitors in circulation. 999 // To avoid that scenario we can artificially induce a STW safepoint 1000 // if the pool appears to be growing past some reasonable bound. 1001 // Generally we favor time in space-time tradeoffs, but as there's no 1002 // natural back-pressure on the # of extant monitors we need to impose some 1003 // type of limit. Beware that if MonitorBound is set to too low a value 1004 // we could just loop. In addition, if MonitorBound is set to a low value 1005 // we'll incur more safepoints, which are harmful to performance. 1006 // See also: GuaranteedSafepointInterval 1007 // 1008 // The current implementation uses asynchronous VM operations. 1009 1010 static void InduceScavenge(Thread * Self, const char * Whence) { 1011 // Induce STW safepoint to trim monitors 1012 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1013 // More precisely, trigger an asynchronous STW safepoint as the number 1014 // of active monitors passes the specified threshold. 1015 // TODO: assert thread state is reasonable 1016 1017 if (ForceMonitorScavenge == 0 && Atomic::xchg (1, &ForceMonitorScavenge) == 0) { 1018 // Induce a 'null' safepoint to scavenge monitors 1019 // Must VM_Operation instance be heap allocated as the op will be enqueue and posted 1020 // to the VMthread and have a lifespan longer than that of this activation record. 1021 // The VMThread will delete the op when completed. 1022 VMThread::execute(new VM_ScavengeMonitors()); 1023 } 1024 } 1025 1026 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) { 1027 // A large MAXPRIVATE value reduces both list lock contention 1028 // and list coherency traffic, but also tends to increase the 1029 // number of objectMonitors in circulation as well as the STW 1030 // scavenge costs. As usual, we lean toward time in space-time 1031 // tradeoffs. 1032 const int MAXPRIVATE = 1024; 1033 for (;;) { 1034 ObjectMonitor * m; 1035 1036 // 1: try to allocate from the thread's local omFreeList. 1037 // Threads will attempt to allocate first from their local list, then 1038 // from the global list, and only after those attempts fail will the thread 1039 // attempt to instantiate new monitors. Thread-local free lists take 1040 // heat off the gListLock and improve allocation latency, as well as reducing 1041 // coherency traffic on the shared global list. 1042 m = Self->omFreeList; 1043 if (m != NULL) { 1044 Self->omFreeList = m->FreeNext; 1045 Self->omFreeCount--; 1046 guarantee(m->object() == NULL, "invariant"); 1047 m->FreeNext = Self->omInUseList; 1048 Self->omInUseList = m; 1049 Self->omInUseCount++; 1050 return m; 1051 } 1052 1053 // 2: try to allocate from the global gFreeList 1054 // CONSIDER: use muxTry() instead of muxAcquire(). 1055 // If the muxTry() fails then drop immediately into case 3. 1056 // If we're using thread-local free lists then try 1057 // to reprovision the caller's free list. 1058 if (gFreeList != NULL) { 1059 // Reprovision the thread's omFreeList. 1060 // Use bulk transfers to reduce the allocation rate and heat 1061 // on various locks. 1062 Thread::muxAcquire(&gListLock, "omAlloc(1)"); 1063 for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) { 1064 gMonitorFreeCount--; 1065 ObjectMonitor * take = gFreeList; 1066 gFreeList = take->FreeNext; 1067 guarantee(take->object() == NULL, "invariant"); 1068 guarantee(!take->is_busy(), "invariant"); 1069 take->Recycle(); 1070 omRelease(Self, take, false); 1071 } 1072 Thread::muxRelease(&gListLock); 1073 Self->omFreeProvision += 1 + (Self->omFreeProvision/2); 1074 if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE; 1075 1076 const int mx = MonitorBound; 1077 if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) { 1078 // We can't safely induce a STW safepoint from omAlloc() as our thread 1079 // state may not be appropriate for such activities and callers may hold 1080 // naked oops, so instead we defer the action. 1081 InduceScavenge(Self, "omAlloc"); 1082 } 1083 continue; 1084 } 1085 1086 // 3: allocate a block of new ObjectMonitors 1087 // Both the local and global free lists are empty -- resort to malloc(). 1088 // In the current implementation objectMonitors are TSM - immortal. 1089 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1090 // each ObjectMonitor to start at the beginning of a cache line, 1091 // so we use align_up(). 1092 // A better solution would be to use C++ placement-new. 1093 // BEWARE: As it stands currently, we don't run the ctors! 1094 assert(_BLOCKSIZE > 1, "invariant"); 1095 size_t neededsize = sizeof(PaddedEnd<ObjectMonitor>) * _BLOCKSIZE; 1096 PaddedEnd<ObjectMonitor> * temp; 1097 size_t aligned_size = neededsize + (DEFAULT_CACHE_LINE_SIZE - 1); 1098 void* real_malloc_addr = (void *)NEW_C_HEAP_ARRAY(char, aligned_size, 1099 mtInternal); 1100 temp = (PaddedEnd<ObjectMonitor> *) 1101 align_up(real_malloc_addr, DEFAULT_CACHE_LINE_SIZE); 1102 1103 // NOTE: (almost) no way to recover if allocation failed. 1104 // We might be able to induce a STW safepoint and scavenge enough 1105 // objectMonitors to permit progress. 1106 if (temp == NULL) { 1107 vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR, 1108 "Allocate ObjectMonitors"); 1109 } 1110 (void)memset((void *) temp, 0, neededsize); 1111 1112 // Format the block. 1113 // initialize the linked list, each monitor points to its next 1114 // forming the single linked free list, the very first monitor 1115 // will points to next block, which forms the block list. 1116 // The trick of using the 1st element in the block as gBlockList 1117 // linkage should be reconsidered. A better implementation would 1118 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1119 1120 for (int i = 1; i < _BLOCKSIZE; i++) { 1121 temp[i].FreeNext = (ObjectMonitor *)&temp[i+1]; 1122 } 1123 1124 // terminate the last monitor as the end of list 1125 temp[_BLOCKSIZE - 1].FreeNext = NULL; 1126 1127 // Element [0] is reserved for global list linkage 1128 temp[0].set_object(CHAINMARKER); 1129 1130 // Consider carving out this thread's current request from the 1131 // block in hand. This avoids some lock traffic and redundant 1132 // list activity. 1133 1134 // Acquire the gListLock to manipulate gBlockList and gFreeList. 1135 // An Oyama-Taura-Yonezawa scheme might be more efficient. 1136 Thread::muxAcquire(&gListLock, "omAlloc(2)"); 1137 gMonitorPopulation += _BLOCKSIZE-1; 1138 gMonitorFreeCount += _BLOCKSIZE-1; 1139 1140 // Add the new block to the list of extant blocks (gBlockList). 1141 // The very first objectMonitor in a block is reserved and dedicated. 1142 // It serves as blocklist "next" linkage. 1143 temp[0].FreeNext = gBlockList; 1144 // There are lock-free uses of gBlockList so make sure that 1145 // the previous stores happen before we update gBlockList. 1146 OrderAccess::release_store(&gBlockList, temp); 1147 1148 // Add the new string of objectMonitors to the global free list 1149 temp[_BLOCKSIZE - 1].FreeNext = gFreeList; 1150 gFreeList = temp + 1; 1151 Thread::muxRelease(&gListLock); 1152 } 1153 } 1154 1155 // Place "m" on the caller's private per-thread omFreeList. 1156 // In practice there's no need to clamp or limit the number of 1157 // monitors on a thread's omFreeList as the only time we'll call 1158 // omRelease is to return a monitor to the free list after a CAS 1159 // attempt failed. This doesn't allow unbounded #s of monitors to 1160 // accumulate on a thread's free list. 1161 // 1162 // Key constraint: all ObjectMonitors on a thread's free list and the global 1163 // free list must have their object field set to null. This prevents the 1164 // scavenger -- deflate_monitor_list() -- from reclaiming them. 1165 1166 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m, 1167 bool fromPerThreadAlloc) { 1168 guarantee(m->header() == NULL, "invariant"); 1169 guarantee(m->object() == NULL, "invariant"); 1170 guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor"); 1171 // Remove from omInUseList 1172 if (fromPerThreadAlloc) { 1173 ObjectMonitor* cur_mid_in_use = NULL; 1174 bool extracted = false; 1175 for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) { 1176 if (m == mid) { 1177 // extract from per-thread in-use list 1178 if (mid == Self->omInUseList) { 1179 Self->omInUseList = mid->FreeNext; 1180 } else if (cur_mid_in_use != NULL) { 1181 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1182 } 1183 extracted = true; 1184 Self->omInUseCount--; 1185 break; 1186 } 1187 } 1188 assert(extracted, "Should have extracted from in-use list"); 1189 } 1190 1191 // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new 1192 m->FreeNext = Self->omFreeList; 1193 Self->omFreeList = m; 1194 Self->omFreeCount++; 1195 } 1196 1197 // Return the monitors of a moribund thread's local free list to 1198 // the global free list. Typically a thread calls omFlush() when 1199 // it's dying. We could also consider having the VM thread steal 1200 // monitors from threads that have not run java code over a few 1201 // consecutive STW safepoints. Relatedly, we might decay 1202 // omFreeProvision at STW safepoints. 1203 // 1204 // Also return the monitors of a moribund thread's omInUseList to 1205 // a global gOmInUseList under the global list lock so these 1206 // will continue to be scanned. 1207 // 1208 // We currently call omFlush() from Threads::remove() _before the thread 1209 // has been excised from the thread list and is no longer a mutator. 1210 // This means that omFlush() cannot run concurrently with a safepoint and 1211 // interleave with the deflate_idle_monitors scavenge operator. In particular, 1212 // this ensures that the thread's monitors are scanned by a GC safepoint, 1213 // either via Thread::oops_do() (if safepoint happens before omFlush()) or via 1214 // ObjectSynchronizer::oops_do() (if it happens after omFlush() and the thread's 1215 // monitors have been transferred to the global in-use list). 1216 1217 void ObjectSynchronizer::omFlush(Thread * Self) { 1218 ObjectMonitor * list = Self->omFreeList; // Null-terminated SLL 1219 ObjectMonitor * tail = NULL; 1220 int tally = 0; 1221 if (list != NULL) { 1222 ObjectMonitor * s; 1223 // The thread is going away, the per-thread free monitors 1224 // are freed via set_owner(NULL) 1225 // Link them to tail, which will be linked into the global free list 1226 // gFreeList below, under the gListLock 1227 for (s = list; s != NULL; s = s->FreeNext) { 1228 tally++; 1229 tail = s; 1230 guarantee(s->object() == NULL, "invariant"); 1231 guarantee(!s->is_busy(), "invariant"); 1232 s->set_owner(NULL); // redundant but good hygiene 1233 } 1234 guarantee(tail != NULL, "invariant"); 1235 assert(Self->omFreeCount == tally, "free-count off"); 1236 Self->omFreeList = NULL; 1237 Self->omFreeCount = 0; 1238 } 1239 1240 ObjectMonitor * inUseList = Self->omInUseList; 1241 ObjectMonitor * inUseTail = NULL; 1242 int inUseTally = 0; 1243 if (inUseList != NULL) { 1244 ObjectMonitor *cur_om; 1245 // The thread is going away, however the omInUseList inflated 1246 // monitors may still be in-use by other threads. 1247 // Link them to inUseTail, which will be linked into the global in-use list 1248 // gOmInUseList below, under the gListLock 1249 for (cur_om = inUseList; cur_om != NULL; cur_om = cur_om->FreeNext) { 1250 inUseTail = cur_om; 1251 inUseTally++; 1252 } 1253 guarantee(inUseTail != NULL, "invariant"); 1254 assert(Self->omInUseCount == inUseTally, "in-use count off"); 1255 Self->omInUseList = NULL; 1256 Self->omInUseCount = 0; 1257 } 1258 1259 Thread::muxAcquire(&gListLock, "omFlush"); 1260 if (tail != NULL) { 1261 tail->FreeNext = gFreeList; 1262 gFreeList = list; 1263 gMonitorFreeCount += tally; 1264 } 1265 1266 if (inUseTail != NULL) { 1267 inUseTail->FreeNext = gOmInUseList; 1268 gOmInUseList = inUseList; 1269 gOmInUseCount += inUseTally; 1270 } 1271 1272 Thread::muxRelease(&gListLock); 1273 } 1274 1275 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1276 const oop obj, 1277 ObjectSynchronizer::InflateCause cause) { 1278 assert(event != NULL, "invariant"); 1279 assert(event->should_commit(), "invariant"); 1280 event->set_monitorClass(obj->klass()); 1281 event->set_address((uintptr_t)(void*)obj); 1282 event->set_cause((u1)cause); 1283 event->commit(); 1284 } 1285 1286 // Fast path code shared by multiple functions 1287 void ObjectSynchronizer::inflate_helper(oop obj) { 1288 markOop mark = obj->mark(); 1289 if (mark->has_monitor()) { 1290 assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid"); 1291 assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header"); 1292 return; 1293 } 1294 inflate(Thread::current(), obj, inflate_cause_vm_internal); 1295 } 1296 1297 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self, 1298 oop object, 1299 const InflateCause cause) { 1300 // Inflate mutates the heap ... 1301 // Relaxing assertion for bug 6320749. 1302 assert(Universe::verify_in_progress() || 1303 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1304 1305 EventJavaMonitorInflate event; 1306 1307 for (;;) { 1308 const markOop mark = object->mark(); 1309 assert(!mark->has_bias_pattern(), "invariant"); 1310 1311 // The mark can be in one of the following states: 1312 // * Inflated - just return 1313 // * Stack-locked - coerce it to inflated 1314 // * INFLATING - busy wait for conversion to complete 1315 // * Neutral - aggressively inflate the object. 1316 // * BIASED - Illegal. We should never see this 1317 1318 // CASE: inflated 1319 if (mark->has_monitor()) { 1320 ObjectMonitor * inf = mark->monitor(); 1321 markOop dmw = inf->header(); 1322 assert(dmw->is_neutral(), "invariant: header=" INTPTR_FORMAT, p2i((address)dmw)); 1323 assert(oopDesc::equals((oop) inf->object(), object), "invariant"); 1324 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1325 return inf; 1326 } 1327 1328 // CASE: inflation in progress - inflating over a stack-lock. 1329 // Some other thread is converting from stack-locked to inflated. 1330 // Only that thread can complete inflation -- other threads must wait. 1331 // The INFLATING value is transient. 1332 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1333 // We could always eliminate polling by parking the thread on some auxiliary list. 1334 if (mark == markOopDesc::INFLATING()) { 1335 ReadStableMark(object); 1336 continue; 1337 } 1338 1339 // CASE: stack-locked 1340 // Could be stack-locked either by this thread or by some other thread. 1341 // 1342 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1343 // to install INFLATING into the mark word. We originally installed INFLATING, 1344 // allocated the objectmonitor, and then finally STed the address of the 1345 // objectmonitor into the mark. This was correct, but artificially lengthened 1346 // the interval in which INFLATED appeared in the mark, thus increasing 1347 // the odds of inflation contention. 1348 // 1349 // We now use per-thread private objectmonitor free lists. 1350 // These list are reprovisioned from the global free list outside the 1351 // critical INFLATING...ST interval. A thread can transfer 1352 // multiple objectmonitors en-mass from the global free list to its local free list. 1353 // This reduces coherency traffic and lock contention on the global free list. 1354 // Using such local free lists, it doesn't matter if the omAlloc() call appears 1355 // before or after the CAS(INFLATING) operation. 1356 // See the comments in omAlloc(). 1357 1358 LogStreamHandle(Trace, monitorinflation) lsh; 1359 1360 if (mark->has_locker()) { 1361 ObjectMonitor * m = omAlloc(Self); 1362 // Optimistically prepare the objectmonitor - anticipate successful CAS 1363 // We do this before the CAS in order to minimize the length of time 1364 // in which INFLATING appears in the mark. 1365 m->Recycle(); 1366 m->_Responsible = NULL; 1367 m->_recursions = 0; 1368 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1369 1370 markOop cmp = object->cas_set_mark(markOopDesc::INFLATING(), mark); 1371 if (cmp != mark) { 1372 omRelease(Self, m, true); 1373 continue; // Interference -- just retry 1374 } 1375 1376 // We've successfully installed INFLATING (0) into the mark-word. 1377 // This is the only case where 0 will appear in a mark-word. 1378 // Only the singular thread that successfully swings the mark-word 1379 // to 0 can perform (or more precisely, complete) inflation. 1380 // 1381 // Why do we CAS a 0 into the mark-word instead of just CASing the 1382 // mark-word from the stack-locked value directly to the new inflated state? 1383 // Consider what happens when a thread unlocks a stack-locked object. 1384 // It attempts to use CAS to swing the displaced header value from the 1385 // on-stack basiclock back into the object header. Recall also that the 1386 // header value (hashcode, etc) can reside in (a) the object header, or 1387 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1388 // header in an objectMonitor. The inflate() routine must copy the header 1389 // value from the basiclock on the owner's stack to the objectMonitor, all 1390 // the while preserving the hashCode stability invariants. If the owner 1391 // decides to release the lock while the value is 0, the unlock will fail 1392 // and control will eventually pass from slow_exit() to inflate. The owner 1393 // will then spin, waiting for the 0 value to disappear. Put another way, 1394 // the 0 causes the owner to stall if the owner happens to try to 1395 // drop the lock (restoring the header from the basiclock to the object) 1396 // while inflation is in-progress. This protocol avoids races that might 1397 // would otherwise permit hashCode values to change or "flicker" for an object. 1398 // Critically, while object->mark is 0 mark->displaced_mark_helper() is stable. 1399 // 0 serves as a "BUSY" inflate-in-progress indicator. 1400 1401 1402 // fetch the displaced mark from the owner's stack. 1403 // The owner can't die or unwind past the lock while our INFLATING 1404 // object is in the mark. Furthermore the owner can't complete 1405 // an unlock on the object, either. 1406 markOop dmw = mark->displaced_mark_helper(); 1407 assert(dmw->is_neutral(), "invariant"); 1408 1409 // Setup monitor fields to proper values -- prepare the monitor 1410 m->set_header(dmw); 1411 1412 // Optimization: if the mark->locker stack address is associated 1413 // with this thread we could simply set m->_owner = Self. 1414 // Note that a thread can inflate an object 1415 // that it has stack-locked -- as might happen in wait() -- directly 1416 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1417 m->set_owner(mark->locker()); 1418 m->set_object(object); 1419 // TODO-FIXME: assert BasicLock->dhw != 0. 1420 1421 // Must preserve store ordering. The monitor state must 1422 // be stable at the time of publishing the monitor address. 1423 guarantee(object->mark() == markOopDesc::INFLATING(), "invariant"); 1424 object->release_set_mark(markOopDesc::encode(m)); 1425 1426 // Hopefully the performance counters are allocated on distinct cache lines 1427 // to avoid false sharing on MP systems ... 1428 OM_PERFDATA_OP(Inflations, inc()); 1429 if (log_is_enabled(Trace, monitorinflation)) { 1430 ResourceMark rm(Self); 1431 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1432 INTPTR_FORMAT ", type='%s'", p2i(object), 1433 p2i(object->mark()), object->klass()->external_name()); 1434 } 1435 if (event.should_commit()) { 1436 post_monitor_inflate_event(&event, object, cause); 1437 } 1438 return m; 1439 } 1440 1441 // CASE: neutral 1442 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1443 // If we know we're inflating for entry it's better to inflate by swinging a 1444 // pre-locked objectMonitor pointer into the object header. A successful 1445 // CAS inflates the object *and* confers ownership to the inflating thread. 1446 // In the current implementation we use a 2-step mechanism where we CAS() 1447 // to inflate and then CAS() again to try to swing _owner from NULL to Self. 1448 // An inflateTry() method that we could call from fast_enter() and slow_enter() 1449 // would be useful. 1450 1451 assert(mark->is_neutral(), "invariant"); 1452 ObjectMonitor * m = omAlloc(Self); 1453 // prepare m for installation - set monitor to initial state 1454 m->Recycle(); 1455 m->set_header(mark); 1456 m->set_owner(NULL); 1457 m->set_object(object); 1458 m->_recursions = 0; 1459 m->_Responsible = NULL; 1460 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1461 1462 if (object->cas_set_mark(markOopDesc::encode(m), mark) != mark) { 1463 m->set_header(NULL); 1464 m->set_object(NULL); 1465 m->Recycle(); 1466 omRelease(Self, m, true); 1467 m = NULL; 1468 continue; 1469 // interference - the markword changed - just retry. 1470 // The state-transitions are one-way, so there's no chance of 1471 // live-lock -- "Inflated" is an absorbing state. 1472 } 1473 1474 // Hopefully the performance counters are allocated on distinct 1475 // cache lines to avoid false sharing on MP systems ... 1476 OM_PERFDATA_OP(Inflations, inc()); 1477 if (log_is_enabled(Trace, monitorinflation)) { 1478 ResourceMark rm(Self); 1479 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1480 INTPTR_FORMAT ", type='%s'", p2i(object), 1481 p2i(object->mark()), object->klass()->external_name()); 1482 } 1483 if (event.should_commit()) { 1484 post_monitor_inflate_event(&event, object, cause); 1485 } 1486 return m; 1487 } 1488 } 1489 1490 1491 // We create a list of in-use monitors for each thread. 1492 // 1493 // deflate_thread_local_monitors() scans a single thread's in-use list, while 1494 // deflate_idle_monitors() scans only a global list of in-use monitors which 1495 // is populated only as a thread dies (see omFlush()). 1496 // 1497 // These operations are called at all safepoints, immediately after mutators 1498 // are stopped, but before any objects have moved. Collectively they traverse 1499 // the population of in-use monitors, deflating where possible. The scavenged 1500 // monitors are returned to the monitor free list. 1501 // 1502 // Beware that we scavenge at *every* stop-the-world point. Having a large 1503 // number of monitors in-use could negatively impact performance. We also want 1504 // to minimize the total # of monitors in circulation, as they incur a small 1505 // footprint penalty. 1506 // 1507 // Perversely, the heap size -- and thus the STW safepoint rate -- 1508 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1509 // which in turn can mean large(r) numbers of objectmonitors in circulation. 1510 // This is an unfortunate aspect of this design. 1511 1512 // Deflate a single monitor if not in-use 1513 // Return true if deflated, false if in-use 1514 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1515 ObjectMonitor** freeHeadp, 1516 ObjectMonitor** freeTailp) { 1517 bool deflated; 1518 // Normal case ... The monitor is associated with obj. 1519 guarantee(obj->mark() == markOopDesc::encode(mid), "invariant"); 1520 guarantee(mid == obj->mark()->monitor(), "invariant"); 1521 guarantee(mid->header()->is_neutral(), "invariant"); 1522 1523 if (mid->is_busy()) { 1524 deflated = false; 1525 } else { 1526 // Deflate the monitor if it is no longer being used 1527 // It's idle - scavenge and return to the global free list 1528 // plain old deflation ... 1529 if (log_is_enabled(Trace, monitorinflation)) { 1530 ResourceMark rm; 1531 log_trace(monitorinflation)("deflate_monitor: " 1532 "object=" INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", type='%s'", 1533 p2i(obj), p2i(obj->mark()), 1534 obj->klass()->external_name()); 1535 } 1536 1537 // Restore the header back to obj 1538 obj->release_set_mark(mid->header()); 1539 mid->clear(); 1540 1541 assert(mid->object() == NULL, "invariant"); 1542 1543 // Move the object to the working free list defined by freeHeadp, freeTailp 1544 if (*freeHeadp == NULL) *freeHeadp = mid; 1545 if (*freeTailp != NULL) { 1546 ObjectMonitor * prevtail = *freeTailp; 1547 assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); 1548 prevtail->FreeNext = mid; 1549 } 1550 *freeTailp = mid; 1551 deflated = true; 1552 } 1553 return deflated; 1554 } 1555 1556 // Walk a given monitor list, and deflate idle monitors 1557 // The given list could be a per-thread list or a global list 1558 // Caller acquires gListLock as needed. 1559 // 1560 // In the case of parallel processing of thread local monitor lists, 1561 // work is done by Threads::parallel_threads_do() which ensures that 1562 // each Java thread is processed by exactly one worker thread, and 1563 // thus avoid conflicts that would arise when worker threads would 1564 // process the same monitor lists concurrently. 1565 // 1566 // See also ParallelSPCleanupTask and 1567 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 1568 // Threads::parallel_java_threads_do() in thread.cpp. 1569 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp, 1570 ObjectMonitor** freeHeadp, 1571 ObjectMonitor** freeTailp) { 1572 ObjectMonitor* mid; 1573 ObjectMonitor* next; 1574 ObjectMonitor* cur_mid_in_use = NULL; 1575 int deflated_count = 0; 1576 1577 for (mid = *listHeadp; mid != NULL;) { 1578 oop obj = (oop) mid->object(); 1579 if (obj != NULL && deflate_monitor(mid, obj, freeHeadp, freeTailp)) { 1580 // if deflate_monitor succeeded, 1581 // extract from per-thread in-use list 1582 if (mid == *listHeadp) { 1583 *listHeadp = mid->FreeNext; 1584 } else if (cur_mid_in_use != NULL) { 1585 cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list 1586 } 1587 next = mid->FreeNext; 1588 mid->FreeNext = NULL; // This mid is current tail in the freeHeadp list 1589 mid = next; 1590 deflated_count++; 1591 } else { 1592 cur_mid_in_use = mid; 1593 mid = mid->FreeNext; 1594 } 1595 } 1596 return deflated_count; 1597 } 1598 1599 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1600 counters->nInuse = 0; // currently associated with objects 1601 counters->nInCirculation = 0; // extant 1602 counters->nScavenged = 0; // reclaimed (global and per-thread) 1603 counters->perThreadScavenged = 0; // per-thread scavenge total 1604 counters->perThreadTimes = 0.0; // per-thread scavenge times 1605 } 1606 1607 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 1608 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1609 bool deflated = false; 1610 1611 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1612 ObjectMonitor * freeTailp = NULL; 1613 elapsedTimer timer; 1614 1615 if (log_is_enabled(Info, monitorinflation)) { 1616 timer.start(); 1617 } 1618 1619 // Prevent omFlush from changing mids in Thread dtor's during deflation 1620 // And in case the vm thread is acquiring a lock during a safepoint 1621 // See e.g. 6320749 1622 Thread::muxAcquire(&gListLock, "deflate_idle_monitors"); 1623 1624 // Note: the thread-local monitors lists get deflated in 1625 // a separate pass. See deflate_thread_local_monitors(). 1626 1627 // For moribund threads, scan gOmInUseList 1628 int deflated_count = 0; 1629 if (gOmInUseList) { 1630 counters->nInCirculation += gOmInUseCount; 1631 deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp); 1632 gOmInUseCount -= deflated_count; 1633 counters->nScavenged += deflated_count; 1634 counters->nInuse += gOmInUseCount; 1635 } 1636 1637 // Move the scavenged monitors back to the global free list. 1638 if (freeHeadp != NULL) { 1639 guarantee(freeTailp != NULL && counters->nScavenged > 0, "invariant"); 1640 assert(freeTailp->FreeNext == NULL, "invariant"); 1641 // constant-time list splice - prepend scavenged segment to gFreeList 1642 freeTailp->FreeNext = gFreeList; 1643 gFreeList = freeHeadp; 1644 } 1645 Thread::muxRelease(&gListLock); 1646 timer.stop(); 1647 1648 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1649 LogStreamHandle(Info, monitorinflation) lsh_info; 1650 LogStream * ls = NULL; 1651 if (log_is_enabled(Debug, monitorinflation)) { 1652 ls = &lsh_debug; 1653 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 1654 ls = &lsh_info; 1655 } 1656 if (ls != NULL) { 1657 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 1658 } 1659 } 1660 1661 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1662 // Report the cumulative time for deflating each thread's idle 1663 // monitors. Note: if the work is split among more than one 1664 // worker thread, then the reported time will likely be more 1665 // than a beginning to end measurement of the phase. 1666 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->perThreadTimes, counters->perThreadScavenged); 1667 1668 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1669 LogStreamHandle(Info, monitorinflation) lsh_info; 1670 LogStream * ls = NULL; 1671 if (log_is_enabled(Debug, monitorinflation)) { 1672 ls = &lsh_debug; 1673 } else if (counters->perThreadScavenged != 0 && log_is_enabled(Info, monitorinflation)) { 1674 ls = &lsh_info; 1675 } 1676 if (ls != NULL) { 1677 ls->print_cr("deflating per-thread idle monitors, %3.7f secs, %d monitors", counters->perThreadTimes, counters->perThreadScavenged); 1678 } 1679 1680 gMonitorFreeCount += counters->nScavenged; 1681 1682 if (log_is_enabled(Debug, monitorinflation)) { 1683 // exit_globals()'s call to audit_and_print_stats() is done 1684 // at the Info level. 1685 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 1686 } 1687 1688 ForceMonitorScavenge = 0; // Reset 1689 1690 OM_PERFDATA_OP(Deflations, inc(counters->nScavenged)); 1691 OM_PERFDATA_OP(MonExtant, set_value(counters->nInCirculation)); 1692 1693 GVars.stwRandom = os::random(); 1694 GVars.stwCycle++; 1695 } 1696 1697 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 1698 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1699 1700 ObjectMonitor * freeHeadp = NULL; // Local SLL of scavenged monitors 1701 ObjectMonitor * freeTailp = NULL; 1702 elapsedTimer timer; 1703 1704 if (log_is_enabled(Info, safepoint, cleanup) || 1705 log_is_enabled(Info, monitorinflation)) { 1706 timer.start(); 1707 } 1708 1709 int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp); 1710 1711 timer.stop(); 1712 1713 Thread::muxAcquire(&gListLock, "deflate_thread_local_monitors"); 1714 1715 // Adjust counters 1716 counters->nInCirculation += thread->omInUseCount; 1717 thread->omInUseCount -= deflated_count; 1718 counters->nScavenged += deflated_count; 1719 counters->nInuse += thread->omInUseCount; 1720 counters->perThreadScavenged += deflated_count; 1721 // For now, we only care about cumulative per-thread deflation time. 1722 counters->perThreadTimes += timer.seconds(); 1723 1724 // Move the scavenged monitors back to the global free list. 1725 if (freeHeadp != NULL) { 1726 guarantee(freeTailp != NULL && deflated_count > 0, "invariant"); 1727 assert(freeTailp->FreeNext == NULL, "invariant"); 1728 1729 // constant-time list splice - prepend scavenged segment to gFreeList 1730 freeTailp->FreeNext = gFreeList; 1731 gFreeList = freeHeadp; 1732 } 1733 Thread::muxRelease(&gListLock); 1734 } 1735 1736 // Monitor cleanup on JavaThread::exit 1737 1738 // Iterate through monitor cache and attempt to release thread's monitors 1739 // Gives up on a particular monitor if an exception occurs, but continues 1740 // the overall iteration, swallowing the exception. 1741 class ReleaseJavaMonitorsClosure: public MonitorClosure { 1742 private: 1743 TRAPS; 1744 1745 public: 1746 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 1747 void do_monitor(ObjectMonitor* mid) { 1748 if (mid->owner() == THREAD) { 1749 (void)mid->complete_exit(CHECK); 1750 } 1751 } 1752 }; 1753 1754 // Release all inflated monitors owned by THREAD. Lightweight monitors are 1755 // ignored. This is meant to be called during JNI thread detach which assumes 1756 // all remaining monitors are heavyweight. All exceptions are swallowed. 1757 // Scanning the extant monitor list can be time consuming. 1758 // A simple optimization is to add a per-thread flag that indicates a thread 1759 // called jni_monitorenter() during its lifetime. 1760 // 1761 // Instead of No_Savepoint_Verifier it might be cheaper to 1762 // use an idiom of the form: 1763 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 1764 // <code that must not run at safepoint> 1765 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 1766 // Since the tests are extremely cheap we could leave them enabled 1767 // for normal product builds. 1768 1769 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 1770 assert(THREAD == JavaThread::current(), "must be current Java thread"); 1771 NoSafepointVerifier nsv; 1772 ReleaseJavaMonitorsClosure rjmc(THREAD); 1773 Thread::muxAcquire(&gListLock, "release_monitors_owned_by_thread"); 1774 ObjectSynchronizer::monitors_iterate(&rjmc); 1775 Thread::muxRelease(&gListLock); 1776 THREAD->clear_pending_exception(); 1777 } 1778 1779 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 1780 switch (cause) { 1781 case inflate_cause_vm_internal: return "VM Internal"; 1782 case inflate_cause_monitor_enter: return "Monitor Enter"; 1783 case inflate_cause_wait: return "Monitor Wait"; 1784 case inflate_cause_notify: return "Monitor Notify"; 1785 case inflate_cause_hash_code: return "Monitor Hash Code"; 1786 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 1787 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 1788 default: 1789 ShouldNotReachHere(); 1790 } 1791 return "Unknown"; 1792 } 1793 1794 //------------------------------------------------------------------------------ 1795 // Debugging code 1796 1797 u_char* ObjectSynchronizer::get_gvars_addr() { 1798 return (u_char*)&GVars; 1799 } 1800 1801 u_char* ObjectSynchronizer::get_gvars_hcSequence_addr() { 1802 return (u_char*)&GVars.hcSequence; 1803 } 1804 1805 size_t ObjectSynchronizer::get_gvars_size() { 1806 return sizeof(SharedGlobals); 1807 } 1808 1809 u_char* ObjectSynchronizer::get_gvars_stwRandom_addr() { 1810 return (u_char*)&GVars.stwRandom; 1811 } 1812 1813 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 1814 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 1815 1816 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1817 LogStreamHandle(Info, monitorinflation) lsh_info; 1818 LogStreamHandle(Trace, monitorinflation) lsh_trace; 1819 LogStream * ls = NULL; 1820 if (log_is_enabled(Trace, monitorinflation)) { 1821 ls = &lsh_trace; 1822 } else if (log_is_enabled(Debug, monitorinflation)) { 1823 ls = &lsh_debug; 1824 } else if (log_is_enabled(Info, monitorinflation)) { 1825 ls = &lsh_info; 1826 } 1827 assert(ls != NULL, "sanity check"); 1828 1829 if (!on_exit) { 1830 // Not at VM exit so grab the global list lock. 1831 Thread::muxAcquire(&gListLock, "audit_and_print_stats"); 1832 } 1833 1834 // Log counts for the global and per-thread monitor lists: 1835 int chkMonitorPopulation = log_monitor_list_counts(ls); 1836 int error_cnt = 0; 1837 1838 ls->print_cr("Checking global lists:"); 1839 1840 // Check gMonitorPopulation: 1841 if (gMonitorPopulation == chkMonitorPopulation) { 1842 ls->print_cr("gMonitorPopulation=%d equals chkMonitorPopulation=%d", 1843 gMonitorPopulation, chkMonitorPopulation); 1844 } else { 1845 ls->print_cr("ERROR: gMonitorPopulation=%d is not equal to " 1846 "chkMonitorPopulation=%d", gMonitorPopulation, 1847 chkMonitorPopulation); 1848 error_cnt++; 1849 } 1850 1851 // Check gOmInUseList and gOmInUseCount: 1852 chk_global_in_use_list_and_count(ls, &error_cnt); 1853 1854 // Check gFreeList and gMonitorFreeCount: 1855 chk_global_free_list_and_count(ls, &error_cnt); 1856 1857 if (!on_exit) { 1858 Thread::muxRelease(&gListLock); 1859 } 1860 1861 ls->print_cr("Checking per-thread lists:"); 1862 1863 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 1864 // Check omInUseList and omInUseCount: 1865 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 1866 1867 // Check omFreeList and omFreeCount: 1868 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 1869 } 1870 1871 if (error_cnt == 0) { 1872 ls->print_cr("No errors found in monitor list checks."); 1873 } else { 1874 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 1875 } 1876 1877 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 1878 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 1879 // When exiting this log output is at the Info level. When called 1880 // at a safepoint, this log output is at the Trace level since 1881 // there can be a lot of it. 1882 log_in_use_monitor_details(ls, on_exit); 1883 } 1884 1885 ls->flush(); 1886 1887 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 1888 } 1889 1890 // Check a free monitor entry; log any errors. 1891 void ObjectSynchronizer::chk_free_entry(JavaThread * jt, ObjectMonitor * n, 1892 outputStream * out, int *error_cnt_p) { 1893 if (n->is_busy()) { 1894 if (jt != NULL) { 1895 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1896 ": free per-thread monitor must not be busy.", p2i(jt), 1897 p2i(n)); 1898 } else { 1899 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 1900 "must not be busy.", p2i(n)); 1901 } 1902 *error_cnt_p = *error_cnt_p + 1; 1903 } 1904 if (n->header() != NULL) { 1905 if (jt != NULL) { 1906 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1907 ": free per-thread monitor must have NULL _header " 1908 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 1909 p2i(n->header())); 1910 } else { 1911 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 1912 "must have NULL _header field: _header=" INTPTR_FORMAT, 1913 p2i(n), p2i(n->header())); 1914 } 1915 *error_cnt_p = *error_cnt_p + 1; 1916 } 1917 if (n->object() != NULL) { 1918 if (jt != NULL) { 1919 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1920 ": free per-thread monitor must have NULL _object " 1921 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 1922 p2i(n->object())); 1923 } else { 1924 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 1925 "must have NULL _object field: _object=" INTPTR_FORMAT, 1926 p2i(n), p2i(n->object())); 1927 } 1928 *error_cnt_p = *error_cnt_p + 1; 1929 } 1930 } 1931 1932 // Check the global free list and count; log the results of the checks. 1933 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 1934 int *error_cnt_p) { 1935 int chkMonitorFreeCount = 0; 1936 for (ObjectMonitor * n = gFreeList; n != NULL; n = n->FreeNext) { 1937 chk_free_entry(NULL /* jt */, n, out, error_cnt_p); 1938 chkMonitorFreeCount++; 1939 } 1940 if (gMonitorFreeCount == chkMonitorFreeCount) { 1941 out->print_cr("gMonitorFreeCount=%d equals chkMonitorFreeCount=%d", 1942 gMonitorFreeCount, chkMonitorFreeCount); 1943 } else { 1944 out->print_cr("ERROR: gMonitorFreeCount=%d is not equal to " 1945 "chkMonitorFreeCount=%d", gMonitorFreeCount, 1946 chkMonitorFreeCount); 1947 *error_cnt_p = *error_cnt_p + 1; 1948 } 1949 } 1950 1951 // Check the global in-use list and count; log the results of the checks. 1952 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 1953 int *error_cnt_p) { 1954 int chkOmInUseCount = 0; 1955 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 1956 chk_in_use_entry(NULL /* jt */, n, out, error_cnt_p); 1957 chkOmInUseCount++; 1958 } 1959 if (gOmInUseCount == chkOmInUseCount) { 1960 out->print_cr("gOmInUseCount=%d equals chkOmInUseCount=%d", gOmInUseCount, 1961 chkOmInUseCount); 1962 } else { 1963 out->print_cr("ERROR: gOmInUseCount=%d is not equal to chkOmInUseCount=%d", 1964 gOmInUseCount, chkOmInUseCount); 1965 *error_cnt_p = *error_cnt_p + 1; 1966 } 1967 } 1968 1969 // Check an in-use monitor entry; log any errors. 1970 void ObjectSynchronizer::chk_in_use_entry(JavaThread * jt, ObjectMonitor * n, 1971 outputStream * out, int *error_cnt_p) { 1972 if (n->header() == NULL) { 1973 if (jt != NULL) { 1974 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1975 ": in-use per-thread monitor must have non-NULL _header " 1976 "field.", p2i(jt), p2i(n)); 1977 } else { 1978 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 1979 "must have non-NULL _header field.", p2i(n)); 1980 } 1981 *error_cnt_p = *error_cnt_p + 1; 1982 } 1983 if (n->object() == NULL) { 1984 if (jt != NULL) { 1985 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1986 ": in-use per-thread monitor must have non-NULL _object " 1987 "field.", p2i(jt), p2i(n)); 1988 } else { 1989 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 1990 "must have non-NULL _object field.", p2i(n)); 1991 } 1992 *error_cnt_p = *error_cnt_p + 1; 1993 } 1994 const oop obj = (oop)n->object(); 1995 const markOop mark = obj->mark(); 1996 if (!mark->has_monitor()) { 1997 if (jt != NULL) { 1998 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 1999 ": in-use per-thread monitor's object does not think " 2000 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 2001 INTPTR_FORMAT, p2i(jt), p2i(n), p2i((address)obj), 2002 p2i((address)mark)); 2003 } else { 2004 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2005 "monitor's object does not think it has a monitor: obj=" 2006 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 2007 p2i((address)obj), p2i((address)mark)); 2008 } 2009 *error_cnt_p = *error_cnt_p + 1; 2010 } 2011 ObjectMonitor * const obj_mon = mark->monitor(); 2012 if (n != obj_mon) { 2013 if (jt != NULL) { 2014 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2015 ": in-use per-thread monitor's object does not refer " 2016 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 2017 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 2018 p2i(n), p2i((address)obj), p2i((address)mark), 2019 p2i((address)obj_mon)); 2020 } else { 2021 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2022 "monitor's object does not refer to the same monitor: obj=" 2023 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 2024 INTPTR_FORMAT, p2i(n), p2i((address)obj), 2025 p2i((address)mark), p2i((address)obj_mon)); 2026 } 2027 *error_cnt_p = *error_cnt_p + 1; 2028 } 2029 } 2030 2031 // Check the thread's free list and count; log the results of the checks. 2032 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 2033 outputStream * out, 2034 int *error_cnt_p) { 2035 int chkOmFreeCount = 0; 2036 for (ObjectMonitor * n = jt->omFreeList; n != NULL; n = n->FreeNext) { 2037 chk_free_entry(jt, n, out, error_cnt_p); 2038 chkOmFreeCount++; 2039 } 2040 if (jt->omFreeCount == chkOmFreeCount) { 2041 out->print_cr("jt=" INTPTR_FORMAT ": omFreeCount=%d equals " 2042 "chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, chkOmFreeCount); 2043 } else { 2044 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omFreeCount=%d is not " 2045 "equal to chkOmFreeCount=%d", p2i(jt), jt->omFreeCount, 2046 chkOmFreeCount); 2047 *error_cnt_p = *error_cnt_p + 1; 2048 } 2049 } 2050 2051 // Check the thread's in-use list and count; log the results of the checks. 2052 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 2053 outputStream * out, 2054 int *error_cnt_p) { 2055 int chkOmInUseCount = 0; 2056 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2057 chk_in_use_entry(jt, n, out, error_cnt_p); 2058 chkOmInUseCount++; 2059 } 2060 if (jt->omInUseCount == chkOmInUseCount) { 2061 out->print_cr("jt=" INTPTR_FORMAT ": omInUseCount=%d equals " 2062 "chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2063 chkOmInUseCount); 2064 } else { 2065 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": omInUseCount=%d is not " 2066 "equal to chkOmInUseCount=%d", p2i(jt), jt->omInUseCount, 2067 chkOmInUseCount); 2068 *error_cnt_p = *error_cnt_p + 1; 2069 } 2070 } 2071 2072 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 2073 // flags indicate why the entry is in-use, 'object' and 'object type' 2074 // indicate the associated object and its type. 2075 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out, 2076 bool on_exit) { 2077 if (!on_exit) { 2078 // Not at VM exit so grab the global list lock. 2079 Thread::muxAcquire(&gListLock, "log_in_use_monitor_details"); 2080 } 2081 2082 if (gOmInUseCount > 0) { 2083 out->print_cr("In-use global monitor info:"); 2084 out->print_cr("(B -> is_busy, H -> has hashcode, L -> lock status)"); 2085 out->print_cr("%18s %s %18s %18s", 2086 "monitor", "BHL", "object", "object type"); 2087 out->print_cr("================== === ================== =================="); 2088 for (ObjectMonitor * n = gOmInUseList; n != NULL; n = n->FreeNext) { 2089 const oop obj = (oop) n->object(); 2090 const markOop mark = n->header(); 2091 ResourceMark rm; 2092 out->print_cr(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(n), 2093 n->is_busy() != 0, mark->hash() != 0, n->owner() != NULL, 2094 p2i(obj), obj->klass()->external_name()); 2095 } 2096 } 2097 2098 if (!on_exit) { 2099 Thread::muxRelease(&gListLock); 2100 } 2101 2102 out->print_cr("In-use per-thread monitor info:"); 2103 out->print_cr("(B -> is_busy, H -> has hashcode, L -> lock status)"); 2104 out->print_cr("%18s %18s %s %18s %18s", 2105 "jt", "monitor", "BHL", "object", "object type"); 2106 out->print_cr("================== ================== === ================== =================="); 2107 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2108 for (ObjectMonitor * n = jt->omInUseList; n != NULL; n = n->FreeNext) { 2109 const oop obj = (oop) n->object(); 2110 const markOop mark = n->header(); 2111 ResourceMark rm; 2112 out->print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT 2113 " %s", p2i(jt), p2i(n), n->is_busy() != 0, 2114 mark->hash() != 0, n->owner() != NULL, p2i(obj), 2115 obj->klass()->external_name()); 2116 } 2117 } 2118 2119 out->flush(); 2120 } 2121 2122 // Log counts for the global and per-thread monitor lists and return 2123 // the population count. 2124 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 2125 int popCount = 0; 2126 out->print_cr("%18s %10s %10s %10s", 2127 "Global Lists:", "InUse", "Free", "Total"); 2128 out->print_cr("================== ========== ========== =========="); 2129 out->print_cr("%18s %10d %10d %10d", "", 2130 gOmInUseCount, gMonitorFreeCount, gMonitorPopulation); 2131 popCount += gOmInUseCount + gMonitorFreeCount; 2132 2133 out->print_cr("%18s %10s %10s %10s", 2134 "Per-Thread Lists:", "InUse", "Free", "Provision"); 2135 out->print_cr("================== ========== ========== =========="); 2136 2137 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2138 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 2139 jt->omInUseCount, jt->omFreeCount, jt->omFreeProvision); 2140 popCount += jt->omInUseCount + jt->omFreeCount; 2141 } 2142 return popCount; 2143 } 2144 2145 #ifndef PRODUCT 2146 2147 // Check if monitor belongs to the monitor cache 2148 // The list is grow-only so it's *relatively* safe to traverse 2149 // the list of extant blocks without taking a lock. 2150 2151 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 2152 PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList); 2153 while (block != NULL) { 2154 assert(block->object() == CHAINMARKER, "must be a block header"); 2155 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 2156 address mon = (address)monitor; 2157 address blk = (address)block; 2158 size_t diff = mon - blk; 2159 assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned"); 2160 return 1; 2161 } 2162 block = (PaddedEnd<ObjectMonitor> *)block->FreeNext; 2163 } 2164 return 0; 2165 } 2166 2167 #endif