1 /* 2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/interfaceSupport.inline.hpp" 41 #include "runtime/mutexLocker.hpp" 42 #include "runtime/objectMonitor.hpp" 43 #include "runtime/objectMonitor.inline.hpp" 44 #include "runtime/osThread.hpp" 45 #include "runtime/safepointVerifiers.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubRoutines.hpp" 48 #include "runtime/synchronizer.hpp" 49 #include "runtime/thread.inline.hpp" 50 #include "runtime/timer.hpp" 51 #include "runtime/vframe.hpp" 52 #include "runtime/vmThread.hpp" 53 #include "utilities/align.hpp" 54 #include "utilities/dtrace.hpp" 55 #include "utilities/events.hpp" 56 #include "utilities/preserveException.hpp" 57 58 // The "core" versions of monitor enter and exit reside in this file. 59 // The interpreter and compilers contain specialized transliterated 60 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 61 // for instance. If you make changes here, make sure to modify the 62 // interpreter, and both C1 and C2 fast-path inline locking code emission. 63 // 64 // ----------------------------------------------------------------------------- 65 66 #ifdef DTRACE_ENABLED 67 68 // Only bother with this argument setup if dtrace is available 69 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 70 71 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 72 char* bytes = NULL; \ 73 int len = 0; \ 74 jlong jtid = SharedRuntime::get_java_tid(thread); \ 75 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 76 if (klassname != NULL) { \ 77 bytes = (char*)klassname->bytes(); \ 78 len = klassname->utf8_length(); \ 79 } 80 81 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 82 { \ 83 if (DTraceMonitorProbes) { \ 84 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 85 HOTSPOT_MONITOR_WAIT(jtid, \ 86 (uintptr_t)(monitor), bytes, len, (millis)); \ 87 } \ 88 } 89 90 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 91 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 92 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 93 94 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 95 { \ 96 if (DTraceMonitorProbes) { \ 97 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 98 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 99 (uintptr_t)(monitor), bytes, len); \ 100 } \ 101 } 102 103 #else // ndef DTRACE_ENABLED 104 105 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 106 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 107 108 #endif // ndef DTRACE_ENABLED 109 110 // This exists only as a workaround of dtrace bug 6254741 111 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 112 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 113 return 0; 114 } 115 116 #define NINFLATIONLOCKS 256 117 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 118 119 // global list of blocks of monitors 120 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; 121 122 struct ObjectMonitorListGlobals { 123 char _pad_prefix[OM_CACHE_LINE_SIZE]; 124 // These are highly shared list related variables. 125 // To avoid false-sharing they need to be the sole occupants of a cache line. 126 127 // Global ObjectMonitor free list. Newly allocated and deflated 128 // ObjectMonitors are prepended here. 129 ObjectMonitor* _free_list; 130 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 131 132 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 133 // ObjectMonitors on its per-thread in-use list are prepended here. 134 ObjectMonitor* _in_use_list; 135 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 136 137 int _free_count; // # on free_list 138 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(int)); 139 140 int _in_use_count; // # on in_use_list 141 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); 142 143 int _population; // # Extant -- in circulation 144 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); 145 }; 146 static ObjectMonitorListGlobals om_list_globals; 147 148 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 149 150 151 // =====================> Spin-lock functions 152 153 // ObjectMonitors are not lockable outside of this file. We use spin-locks 154 // implemented using a bit in the _next_om field instead of the heavier 155 // weight locking mechanisms for faster list management. 156 157 #define OM_LOCK_BIT 0x1 158 159 // Return true if the ObjectMonitor is locked. 160 // Otherwise returns false. 161 static bool is_locked(ObjectMonitor* om) { 162 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT; 163 } 164 165 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. 166 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { 167 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); 168 } 169 170 // Return the unmarked next field in an ObjectMonitor. Note: the next 171 // field may or may not have been marked with OM_LOCK_BIT originally. 172 static ObjectMonitor* unmarked_next(ObjectMonitor* om) { 173 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT); 174 } 175 176 // Try to lock an ObjectMonitor. Returns true if locking was successful. 177 // Otherwise returns false. 178 static bool try_om_lock(ObjectMonitor* om) { 179 // Get current next field without any OM_LOCK_BIT value. 180 ObjectMonitor* next = unmarked_next(om); 181 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) { 182 return false; // Cannot lock the ObjectMonitor. 183 } 184 return true; 185 } 186 187 // Lock an ObjectMonitor. 188 static void om_lock(ObjectMonitor* om) { 189 while (true) { 190 if (try_om_lock(om)) { 191 return; 192 } 193 } 194 } 195 196 // Unlock an ObjectMonitor. 197 static void om_unlock(ObjectMonitor* om) { 198 ObjectMonitor* next = om->next_om(); 199 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT 200 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); 201 202 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. 203 om->set_next_om(next); 204 } 205 206 // Get the list head after locking it. Returns the list head or NULL 207 // if the list is empty. 208 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { 209 while (true) { 210 ObjectMonitor* mid = Atomic::load(list_p); 211 if (mid == NULL) { 212 return NULL; // The list is empty. 213 } 214 if (try_om_lock(mid)) { 215 if (Atomic::load(list_p) != mid) { 216 // The list head changed before we could lock it so we have to retry. 217 om_unlock(mid); 218 continue; 219 } 220 return mid; 221 } 222 } 223 } 224 225 #undef OM_LOCK_BIT 226 227 228 // =====================> List Management functions 229 230 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is 231 // the last ObjectMonitor in the list and there are 'count' on the list. 232 // Also updates the specified *count_p. 233 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, 234 int count, ObjectMonitor** list_p, 235 int* count_p) { 236 while (true) { 237 ObjectMonitor* cur = Atomic::load(list_p); 238 // Prepend list to *list_p. 239 if (!try_om_lock(tail)) { 240 // Failed to lock tail due to a list walker so try it all again. 241 continue; 242 } 243 tail->set_next_om(cur); // tail now points to cur (and unlocks tail) 244 if (cur == NULL) { 245 // No potential race with takers or other prependers since 246 // *list_p is empty. 247 if (Atomic::cmpxchg(list_p, cur, list) == cur) { 248 // Successfully switched *list_p to the list value. 249 Atomic::add(count_p, count); 250 break; 251 } 252 // Implied else: try it all again 253 } else { 254 if (!try_om_lock(cur)) { 255 continue; // failed to lock cur so try it all again 256 } 257 // We locked cur so try to switch *list_p to the list value. 258 if (Atomic::cmpxchg(list_p, cur, list) != cur) { 259 // The list head has changed so unlock cur and try again: 260 om_unlock(cur); 261 continue; 262 } 263 Atomic::add(count_p, count); 264 om_unlock(cur); 265 break; 266 } 267 } 268 } 269 270 // Prepend a newly allocated block of ObjectMonitors to g_block_list and 271 // om_list_globals._free_list. Also updates om_list_globals._population 272 // and om_list_globals._free_count. 273 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { 274 // First we handle g_block_list: 275 while (true) { 276 PaddedObjectMonitor* cur = Atomic::load(&g_block_list); 277 // Prepend new_blk to g_block_list. The first ObjectMonitor in 278 // a block is reserved for use as linkage to the next block. 279 new_blk[0].set_next_om(cur); 280 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { 281 // Successfully switched g_block_list to the new_blk value. 282 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1); 283 break; 284 } 285 // Implied else: try it all again 286 } 287 288 // Second we handle om_list_globals._free_list: 289 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, 290 &om_list_globals._free_list, &om_list_globals._free_count); 291 } 292 293 // Prepend a list of ObjectMonitors to om_list_globals._free_list. 294 // 'tail' is the last ObjectMonitor in the list and there are 'count' 295 // on the list. Also updates om_list_globals._free_count. 296 static void prepend_list_to_global_free_list(ObjectMonitor* list, 297 ObjectMonitor* tail, int count) { 298 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 299 &om_list_globals._free_count); 300 } 301 302 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list. 303 // 'tail' is the last ObjectMonitor in the list and there are 'count' 304 // on the list. Also updates om_list_globals._in_use_list. 305 static void prepend_list_to_global_in_use_list(ObjectMonitor* list, 306 ObjectMonitor* tail, int count) { 307 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list, 308 &om_list_globals._in_use_count); 309 } 310 311 // Prepend an ObjectMonitor to the specified list. Also updates 312 // the specified counter. 313 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, 314 int* count_p) { 315 while (true) { 316 om_lock(m); // Lock m so we can safely update its next field. 317 ObjectMonitor* cur = NULL; 318 // Lock the list head to guard against races with a list walker 319 // thread: 320 if ((cur = get_list_head_locked(list_p)) != NULL) { 321 // List head is now locked so we can safely switch it. 322 m->set_next_om(cur); // m now points to cur (and unlocks m) 323 Atomic::store(list_p, m); // Switch list head to unlocked m. 324 om_unlock(cur); 325 break; 326 } 327 // The list is empty so try to set the list head. 328 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); 329 m->set_next_om(cur); // m now points to NULL (and unlocks m) 330 if (Atomic::cmpxchg(list_p, cur, m) == cur) { 331 // List head is now unlocked m. 332 break; 333 } 334 // Implied else: try it all again 335 } 336 Atomic::inc(count_p); 337 } 338 339 // Prepend an ObjectMonitor to a per-thread om_free_list. 340 // Also updates the per-thread om_free_count. 341 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) { 342 prepend_to_common(m, &self->om_free_list, &self->om_free_count); 343 } 344 345 // Prepend an ObjectMonitor to a per-thread om_in_use_list. 346 // Also updates the per-thread om_in_use_count. 347 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) { 348 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count); 349 } 350 351 // Take an ObjectMonitor from the start of the specified list. Also 352 // decrements the specified counter. Returns NULL if none are available. 353 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, 354 int* count_p) { 355 ObjectMonitor* take = NULL; 356 // Lock the list head to guard against races with a list walker 357 // thread: 358 if ((take = get_list_head_locked(list_p)) == NULL) { 359 return NULL; // None are available. 360 } 361 ObjectMonitor* next = unmarked_next(take); 362 // Switch locked list head to next (which unlocks the list head, but 363 // leaves take locked): 364 Atomic::store(list_p, next); 365 Atomic::dec(count_p); 366 // Unlock take, but leave the next value for any lagging list 367 // walkers. It will get cleaned up when take is prepended to 368 // the in-use list: 369 om_unlock(take); 370 return take; 371 } 372 373 // Take an ObjectMonitor from the start of the om_list_globals._free_list. 374 // Also updates om_list_globals._free_count. Returns NULL if none are 375 // available. 376 static ObjectMonitor* take_from_start_of_global_free_list() { 377 return take_from_start_of_common(&om_list_globals._free_list, 378 &om_list_globals._free_count); 379 } 380 381 // Take an ObjectMonitor from the start of a per-thread free-list. 382 // Also updates om_free_count. Returns NULL if none are available. 383 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) { 384 return take_from_start_of_common(&self->om_free_list, &self->om_free_count); 385 } 386 387 388 // =====================> Quick functions 389 390 // The quick_* forms are special fast-path variants used to improve 391 // performance. In the simplest case, a "quick_*" implementation could 392 // simply return false, in which case the caller will perform the necessary 393 // state transitions and call the slow-path form. 394 // The fast-path is designed to handle frequently arising cases in an efficient 395 // manner and is just a degenerate "optimistic" variant of the slow-path. 396 // returns true -- to indicate the call was satisfied. 397 // returns false -- to indicate the call needs the services of the slow-path. 398 // A no-loitering ordinance is in effect for code in the quick_* family 399 // operators: safepoints or indefinite blocking (blocking that might span a 400 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 401 // entry. 402 // 403 // Consider: An interesting optimization is to have the JIT recognize the 404 // following common idiom: 405 // synchronized (someobj) { .... ; notify(); } 406 // That is, we find a notify() or notifyAll() call that immediately precedes 407 // the monitorexit operation. In that case the JIT could fuse the operations 408 // into a single notifyAndExit() runtime primitive. 409 410 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 411 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 412 assert(self->is_Java_thread(), "invariant"); 413 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 414 NoSafepointVerifier nsv; 415 if (obj == NULL) return false; // slow-path for invalid obj 416 const markWord mark = obj->mark(); 417 418 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 419 // Degenerate notify 420 // stack-locked by caller so by definition the implied waitset is empty. 421 return true; 422 } 423 424 if (mark.has_monitor()) { 425 ObjectMonitor* const mon = mark.monitor(); 426 assert(mon->object() == obj, "invariant"); 427 if (mon->owner() != self) return false; // slow-path for IMS exception 428 429 if (mon->first_waiter() != NULL) { 430 // We have one or more waiters. Since this is an inflated monitor 431 // that we own, we can transfer one or more threads from the waitset 432 // to the entrylist here and now, avoiding the slow-path. 433 if (all) { 434 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 435 } else { 436 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 437 } 438 int free_count = 0; 439 do { 440 mon->INotify(self); 441 ++free_count; 442 } while (mon->first_waiter() != NULL && all); 443 OM_PERFDATA_OP(Notifications, inc(free_count)); 444 } 445 return true; 446 } 447 448 // biased locking and any other IMS exception states take the slow-path 449 return false; 450 } 451 452 453 // The LockNode emitted directly at the synchronization site would have 454 // been too big if it were to have included support for the cases of inflated 455 // recursive enter and exit, so they go here instead. 456 // Note that we can't safely call AsyncPrintJavaStack() from within 457 // quick_enter() as our thread state remains _in_Java. 458 459 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 460 BasicLock * lock) { 461 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 462 assert(self->is_Java_thread(), "invariant"); 463 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 464 NoSafepointVerifier nsv; 465 if (obj == NULL) return false; // Need to throw NPE 466 const markWord mark = obj->mark(); 467 468 if (mark.has_monitor()) { 469 ObjectMonitor* const m = mark.monitor(); 470 assert(m->object() == obj, "invariant"); 471 Thread* const owner = (Thread *) m->_owner; 472 473 // Lock contention and Transactional Lock Elision (TLE) diagnostics 474 // and observability 475 // Case: light contention possibly amenable to TLE 476 // Case: TLE inimical operations such as nested/recursive synchronization 477 478 if (owner == self) { 479 m->_recursions++; 480 return true; 481 } 482 483 // This Java Monitor is inflated so obj's header will never be 484 // displaced to this thread's BasicLock. Make the displaced header 485 // non-NULL so this BasicLock is not seen as recursive nor as 486 // being locked. We do this unconditionally so that this thread's 487 // BasicLock cannot be mis-interpreted by any stack walkers. For 488 // performance reasons, stack walkers generally first check for 489 // Biased Locking in the object's header, the second check is for 490 // stack-locking in the object's header, the third check is for 491 // recursive stack-locking in the displaced header in the BasicLock, 492 // and last are the inflated Java Monitor (ObjectMonitor) checks. 493 lock->set_displaced_header(markWord::unused_mark()); 494 495 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) { 496 assert(m->_recursions == 0, "invariant"); 497 return true; 498 } 499 } 500 501 // Note that we could inflate in quick_enter. 502 // This is likely a useful optimization 503 // Critically, in quick_enter() we must not: 504 // -- perform bias revocation, or 505 // -- block indefinitely, or 506 // -- reach a safepoint 507 508 return false; // revert to slow-path 509 } 510 511 // ----------------------------------------------------------------------------- 512 // Monitor Enter/Exit 513 // The interpreter and compiler assembly code tries to lock using the fast path 514 // of this algorithm. Make sure to update that code if the following function is 515 // changed. The implementation is extremely sensitive to race condition. Be careful. 516 517 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 518 if (UseBiasedLocking) { 519 if (!SafepointSynchronize::is_at_safepoint()) { 520 BiasedLocking::revoke(obj, THREAD); 521 } else { 522 BiasedLocking::revoke_at_safepoint(obj); 523 } 524 } 525 526 markWord mark = obj->mark(); 527 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 528 529 if (mark.is_neutral()) { 530 // Anticipate successful CAS -- the ST of the displaced mark must 531 // be visible <= the ST performed by the CAS. 532 lock->set_displaced_header(mark); 533 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 534 return; 535 } 536 // Fall through to inflate() ... 537 } else if (mark.has_locker() && 538 THREAD->is_lock_owned((address)mark.locker())) { 539 assert(lock != mark.locker(), "must not re-lock the same lock"); 540 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 541 lock->set_displaced_header(markWord::from_pointer(NULL)); 542 return; 543 } 544 545 // The object header will never be displaced to this lock, 546 // so it does not matter what the value is, except that it 547 // must be non-zero to avoid looking like a re-entrant lock, 548 // and must not look locked either. 549 lock->set_displaced_header(markWord::unused_mark()); 550 inflate(THREAD, obj(), inflate_cause_monitor_enter)->enter(THREAD); 551 } 552 553 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 554 markWord mark = object->mark(); 555 // We cannot check for Biased Locking if we are racing an inflation. 556 assert(mark == markWord::INFLATING() || 557 !mark.has_bias_pattern(), "should not see bias pattern here"); 558 559 markWord dhw = lock->displaced_header(); 560 if (dhw.value() == 0) { 561 // If the displaced header is NULL, then this exit matches up with 562 // a recursive enter. No real work to do here except for diagnostics. 563 #ifndef PRODUCT 564 if (mark != markWord::INFLATING()) { 565 // Only do diagnostics if we are not racing an inflation. Simply 566 // exiting a recursive enter of a Java Monitor that is being 567 // inflated is safe; see the has_monitor() comment below. 568 assert(!mark.is_neutral(), "invariant"); 569 assert(!mark.has_locker() || 570 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 571 if (mark.has_monitor()) { 572 // The BasicLock's displaced_header is marked as a recursive 573 // enter and we have an inflated Java Monitor (ObjectMonitor). 574 // This is a special case where the Java Monitor was inflated 575 // after this thread entered the stack-lock recursively. When a 576 // Java Monitor is inflated, we cannot safely walk the Java 577 // Monitor owner's stack and update the BasicLocks because a 578 // Java Monitor can be asynchronously inflated by a thread that 579 // does not own the Java Monitor. 580 ObjectMonitor* m = mark.monitor(); 581 assert(((oop)(m->object()))->mark() == mark, "invariant"); 582 assert(m->is_entered(THREAD), "invariant"); 583 } 584 } 585 #endif 586 return; 587 } 588 589 if (mark == markWord::from_pointer(lock)) { 590 // If the object is stack-locked by the current thread, try to 591 // swing the displaced header from the BasicLock back to the mark. 592 assert(dhw.is_neutral(), "invariant"); 593 if (object->cas_set_mark(dhw, mark) == mark) { 594 return; 595 } 596 } 597 598 // We have to take the slow-path of possible inflation and then exit. 599 inflate(THREAD, object, inflate_cause_vm_internal)->exit(true, THREAD); 600 } 601 602 // ----------------------------------------------------------------------------- 603 // Class Loader support to workaround deadlocks on the class loader lock objects 604 // Also used by GC 605 // complete_exit()/reenter() are used to wait on a nested lock 606 // i.e. to give up an outer lock completely and then re-enter 607 // Used when holding nested locks - lock acquisition order: lock1 then lock2 608 // 1) complete_exit lock1 - saving recursion count 609 // 2) wait on lock2 610 // 3) when notified on lock2, unlock lock2 611 // 4) reenter lock1 with original recursion count 612 // 5) lock lock2 613 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 614 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 615 if (UseBiasedLocking) { 616 BiasedLocking::revoke(obj, THREAD); 617 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 618 } 619 620 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 621 622 return monitor->complete_exit(THREAD); 623 } 624 625 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 626 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) { 627 if (UseBiasedLocking) { 628 BiasedLocking::revoke(obj, THREAD); 629 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 630 } 631 632 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 633 634 monitor->reenter(recursions, THREAD); 635 } 636 // ----------------------------------------------------------------------------- 637 // JNI locks on java objects 638 // NOTE: must use heavy weight monitor to handle jni monitor enter 639 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 640 // the current locking is from JNI instead of Java code 641 if (UseBiasedLocking) { 642 BiasedLocking::revoke(obj, THREAD); 643 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 644 } 645 THREAD->set_current_pending_monitor_is_from_java(false); 646 inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD); 647 THREAD->set_current_pending_monitor_is_from_java(true); 648 } 649 650 // NOTE: must use heavy weight monitor to handle jni monitor exit 651 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 652 if (UseBiasedLocking) { 653 Handle h_obj(THREAD, obj); 654 BiasedLocking::revoke(h_obj, THREAD); 655 obj = h_obj(); 656 } 657 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 658 659 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); 660 // If this thread has locked the object, exit the monitor. We 661 // intentionally do not use CHECK here because we must exit the 662 // monitor even if an exception is pending. 663 if (monitor->check_owner(THREAD)) { 664 monitor->exit(true, THREAD); 665 } 666 } 667 668 // ----------------------------------------------------------------------------- 669 // Internal VM locks on java objects 670 // standard constructor, allows locking failures 671 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 672 _dolock = do_lock; 673 _thread = thread; 674 _thread->check_for_valid_safepoint_state(); 675 _obj = obj; 676 677 if (_dolock) { 678 ObjectSynchronizer::enter(_obj, &_lock, _thread); 679 } 680 } 681 682 ObjectLocker::~ObjectLocker() { 683 if (_dolock) { 684 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 685 } 686 } 687 688 689 // ----------------------------------------------------------------------------- 690 // Wait/Notify/NotifyAll 691 // NOTE: must use heavy weight monitor to handle wait() 692 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 693 if (UseBiasedLocking) { 694 BiasedLocking::revoke(obj, THREAD); 695 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 696 } 697 if (millis < 0) { 698 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 699 } 700 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 701 702 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 703 monitor->wait(millis, true, THREAD); 704 705 // This dummy call is in place to get around dtrace bug 6254741. Once 706 // that's fixed we can uncomment the following line, remove the call 707 // and change this function back into a "void" func. 708 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 709 return dtrace_waited_probe(monitor, obj, THREAD); 710 } 711 712 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 713 if (UseBiasedLocking) { 714 BiasedLocking::revoke(obj, THREAD); 715 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 716 } 717 if (millis < 0) { 718 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 719 } 720 inflate(THREAD, obj(), inflate_cause_wait)->wait(millis, false, THREAD); 721 } 722 723 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 724 if (UseBiasedLocking) { 725 BiasedLocking::revoke(obj, THREAD); 726 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 727 } 728 729 markWord mark = obj->mark(); 730 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 731 return; 732 } 733 inflate(THREAD, obj(), inflate_cause_notify)->notify(THREAD); 734 } 735 736 // NOTE: see comment of notify() 737 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 738 if (UseBiasedLocking) { 739 BiasedLocking::revoke(obj, THREAD); 740 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 741 } 742 743 markWord mark = obj->mark(); 744 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 745 return; 746 } 747 inflate(THREAD, obj(), inflate_cause_notify)->notifyAll(THREAD); 748 } 749 750 // ----------------------------------------------------------------------------- 751 // Hash Code handling 752 // 753 // Performance concern: 754 // OrderAccess::storestore() calls release() which at one time stored 0 755 // into the global volatile OrderAccess::dummy variable. This store was 756 // unnecessary for correctness. Many threads storing into a common location 757 // causes considerable cache migration or "sloshing" on large SMP systems. 758 // As such, I avoided using OrderAccess::storestore(). In some cases 759 // OrderAccess::fence() -- which incurs local latency on the executing 760 // processor -- is a better choice as it scales on SMP systems. 761 // 762 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 763 // a discussion of coherency costs. Note that all our current reference 764 // platforms provide strong ST-ST order, so the issue is moot on IA32, 765 // x64, and SPARC. 766 // 767 // As a general policy we use "volatile" to control compiler-based reordering 768 // and explicit fences (barriers) to control for architectural reordering 769 // performed by the CPU(s) or platform. 770 771 struct SharedGlobals { 772 char _pad_prefix[OM_CACHE_LINE_SIZE]; 773 // These are highly shared mostly-read variables. 774 // To avoid false-sharing they need to be the sole occupants of a cache line. 775 volatile int stw_random; 776 volatile int stw_cycle; 777 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 778 // Hot RW variable -- Sequester to avoid false-sharing 779 volatile int hc_sequence; 780 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 781 }; 782 783 static SharedGlobals GVars; 784 785 static markWord read_stable_mark(oop obj) { 786 markWord mark = obj->mark(); 787 if (!mark.is_being_inflated()) { 788 return mark; // normal fast-path return 789 } 790 791 int its = 0; 792 for (;;) { 793 markWord mark = obj->mark(); 794 if (!mark.is_being_inflated()) { 795 return mark; // normal fast-path return 796 } 797 798 // The object is being inflated by some other thread. 799 // The caller of read_stable_mark() must wait for inflation to complete. 800 // Avoid live-lock 801 // TODO: consider calling SafepointSynchronize::do_call_back() while 802 // spinning to see if there's a safepoint pending. If so, immediately 803 // yielding or blocking would be appropriate. Avoid spinning while 804 // there is a safepoint pending. 805 // TODO: add inflation contention performance counters. 806 // TODO: restrict the aggregate number of spinners. 807 808 ++its; 809 if (its > 10000 || !os::is_MP()) { 810 if (its & 1) { 811 os::naked_yield(); 812 } else { 813 // Note that the following code attenuates the livelock problem but is not 814 // a complete remedy. A more complete solution would require that the inflating 815 // thread hold the associated inflation lock. The following code simply restricts 816 // the number of spinners to at most one. We'll have N-2 threads blocked 817 // on the inflationlock, 1 thread holding the inflation lock and using 818 // a yield/park strategy, and 1 thread in the midst of inflation. 819 // A more refined approach would be to change the encoding of INFLATING 820 // to allow encapsulation of a native thread pointer. Threads waiting for 821 // inflation to complete would use CAS to push themselves onto a singly linked 822 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 823 // and calling park(). When inflation was complete the thread that accomplished inflation 824 // would detach the list and set the markword to inflated with a single CAS and 825 // then for each thread on the list, set the flag and unpark() the thread. 826 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 827 // wakes at most one thread whereas we need to wake the entire list. 828 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 829 int YieldThenBlock = 0; 830 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 831 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 832 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 833 while (obj->mark() == markWord::INFLATING()) { 834 // Beware: NakedYield() is advisory and has almost no effect on some platforms 835 // so we periodically call self->_ParkEvent->park(1). 836 // We use a mixed spin/yield/block mechanism. 837 if ((YieldThenBlock++) >= 16) { 838 Thread::current()->_ParkEvent->park(1); 839 } else { 840 os::naked_yield(); 841 } 842 } 843 Thread::muxRelease(gInflationLocks + ix); 844 } 845 } else { 846 SpinPause(); // SMP-polite spinning 847 } 848 } 849 } 850 851 // hashCode() generation : 852 // 853 // Possibilities: 854 // * MD5Digest of {obj,stw_random} 855 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 856 // * A DES- or AES-style SBox[] mechanism 857 // * One of the Phi-based schemes, such as: 858 // 2654435761 = 2^32 * Phi (golden ratio) 859 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 860 // * A variation of Marsaglia's shift-xor RNG scheme. 861 // * (obj ^ stw_random) is appealing, but can result 862 // in undesirable regularity in the hashCode values of adjacent objects 863 // (objects allocated back-to-back, in particular). This could potentially 864 // result in hashtable collisions and reduced hashtable efficiency. 865 // There are simple ways to "diffuse" the middle address bits over the 866 // generated hashCode values: 867 868 static inline intptr_t get_next_hash(Thread* self, oop obj) { 869 intptr_t value = 0; 870 if (hashCode == 0) { 871 // This form uses global Park-Miller RNG. 872 // On MP system we'll have lots of RW access to a global, so the 873 // mechanism induces lots of coherency traffic. 874 value = os::random(); 875 } else if (hashCode == 1) { 876 // This variation has the property of being stable (idempotent) 877 // between STW operations. This can be useful in some of the 1-0 878 // synchronization schemes. 879 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 880 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 881 } else if (hashCode == 2) { 882 value = 1; // for sensitivity testing 883 } else if (hashCode == 3) { 884 value = ++GVars.hc_sequence; 885 } else if (hashCode == 4) { 886 value = cast_from_oop<intptr_t>(obj); 887 } else { 888 // Marsaglia's xor-shift scheme with thread-specific state 889 // This is probably the best overall implementation -- we'll 890 // likely make this the default in future releases. 891 unsigned t = self->_hashStateX; 892 t ^= (t << 11); 893 self->_hashStateX = self->_hashStateY; 894 self->_hashStateY = self->_hashStateZ; 895 self->_hashStateZ = self->_hashStateW; 896 unsigned v = self->_hashStateW; 897 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 898 self->_hashStateW = v; 899 value = v; 900 } 901 902 value &= markWord::hash_mask; 903 if (value == 0) value = 0xBAD; 904 assert(value != markWord::no_hash, "invariant"); 905 return value; 906 } 907 908 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 909 if (UseBiasedLocking) { 910 // NOTE: many places throughout the JVM do not expect a safepoint 911 // to be taken here, in particular most operations on perm gen 912 // objects. However, we only ever bias Java instances and all of 913 // the call sites of identity_hash that might revoke biases have 914 // been checked to make sure they can handle a safepoint. The 915 // added check of the bias pattern is to avoid useless calls to 916 // thread-local storage. 917 if (obj->mark().has_bias_pattern()) { 918 // Handle for oop obj in case of STW safepoint 919 Handle hobj(self, obj); 920 // Relaxing assertion for bug 6320749. 921 assert(Universe::verify_in_progress() || 922 !SafepointSynchronize::is_at_safepoint(), 923 "biases should not be seen by VM thread here"); 924 BiasedLocking::revoke(hobj, JavaThread::current()); 925 obj = hobj(); 926 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 927 } 928 } 929 930 // hashCode() is a heap mutator ... 931 // Relaxing assertion for bug 6320749. 932 assert(Universe::verify_in_progress() || DumpSharedSpaces || 933 !SafepointSynchronize::is_at_safepoint(), "invariant"); 934 assert(Universe::verify_in_progress() || DumpSharedSpaces || 935 self->is_Java_thread() , "invariant"); 936 assert(Universe::verify_in_progress() || DumpSharedSpaces || 937 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 938 939 ObjectMonitor* monitor = NULL; 940 markWord temp, test; 941 intptr_t hash; 942 markWord mark = read_stable_mark(obj); 943 944 // object should remain ineligible for biased locking 945 assert(!mark.has_bias_pattern(), "invariant"); 946 947 if (mark.is_neutral()) { // if this is a normal header 948 hash = mark.hash(); 949 if (hash != 0) { // if it has a hash, just return it 950 return hash; 951 } 952 hash = get_next_hash(self, obj); // get a new hash 953 temp = mark.copy_set_hash(hash); // merge the hash into header 954 // try to install the hash 955 test = obj->cas_set_mark(temp, mark); 956 if (test == mark) { // if the hash was installed, return it 957 return hash; 958 } 959 // Failed to install the hash. It could be that another thread 960 // installed the hash just before our attempt or inflation has 961 // occurred or... so we fall thru to inflate the monitor for 962 // stability and then install the hash. 963 } else if (mark.has_monitor()) { 964 monitor = mark.monitor(); 965 temp = monitor->header(); 966 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 967 hash = temp.hash(); 968 if (hash != 0) { // if it has a hash, just return it 969 return hash; 970 } 971 // Fall thru so we only have one place that installs the hash in 972 // the ObjectMonitor. 973 } else if (self->is_lock_owned((address)mark.locker())) { 974 // This is a stack lock owned by the calling thread so fetch the 975 // displaced markWord from the BasicLock on the stack. 976 temp = mark.displaced_mark_helper(); 977 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 978 hash = temp.hash(); 979 if (hash != 0) { // if it has a hash, just return it 980 return hash; 981 } 982 // WARNING: 983 // The displaced header in the BasicLock on a thread's stack 984 // is strictly immutable. It CANNOT be changed in ANY cases. 985 // So we have to inflate the stack lock into an ObjectMonitor 986 // even if the current thread owns the lock. The BasicLock on 987 // a thread's stack can be asynchronously read by other threads 988 // during an inflate() call so any change to that stack memory 989 // may not propagate to other threads correctly. 990 } 991 992 // Inflate the monitor to set the hash. 993 monitor = inflate(self, obj, inflate_cause_hash_code); 994 // Load ObjectMonitor's header/dmw field and see if it has a hash. 995 mark = monitor->header(); 996 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 997 hash = mark.hash(); 998 if (hash == 0) { // if it does not have a hash 999 hash = get_next_hash(self, obj); // get a new hash 1000 temp = mark.copy_set_hash(hash); // merge the hash into header 1001 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1002 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1003 test = markWord(v); 1004 if (test != mark) { 1005 // The attempt to update the ObjectMonitor's header/dmw field 1006 // did not work. This can happen if another thread managed to 1007 // merge in the hash just before our cmpxchg(). 1008 // If we add any new usages of the header/dmw field, this code 1009 // will need to be updated. 1010 hash = test.hash(); 1011 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1012 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1013 } 1014 } 1015 // We finally get the hash. 1016 return hash; 1017 } 1018 1019 // Deprecated -- use FastHashCode() instead. 1020 1021 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1022 return FastHashCode(Thread::current(), obj()); 1023 } 1024 1025 1026 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1027 Handle h_obj) { 1028 if (UseBiasedLocking) { 1029 BiasedLocking::revoke(h_obj, thread); 1030 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1031 } 1032 1033 assert(thread == JavaThread::current(), "Can only be called on current thread"); 1034 oop obj = h_obj(); 1035 1036 markWord mark = read_stable_mark(obj); 1037 1038 // Uncontended case, header points to stack 1039 if (mark.has_locker()) { 1040 return thread->is_lock_owned((address)mark.locker()); 1041 } 1042 // Contended case, header points to ObjectMonitor (tagged pointer) 1043 if (mark.has_monitor()) { 1044 ObjectMonitor* monitor = mark.monitor(); 1045 return monitor->is_entered(thread) != 0; 1046 } 1047 // Unlocked case, header in place 1048 assert(mark.is_neutral(), "sanity check"); 1049 return false; 1050 } 1051 1052 // Be aware of this method could revoke bias of the lock object. 1053 // This method queries the ownership of the lock handle specified by 'h_obj'. 1054 // If the current thread owns the lock, it returns owner_self. If no 1055 // thread owns the lock, it returns owner_none. Otherwise, it will return 1056 // owner_other. 1057 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1058 (JavaThread *self, Handle h_obj) { 1059 // The caller must beware this method can revoke bias, and 1060 // revocation can result in a safepoint. 1061 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 1062 assert(self->thread_state() != _thread_blocked, "invariant"); 1063 1064 // Possible mark states: neutral, biased, stack-locked, inflated 1065 1066 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 1067 // CASE: biased 1068 BiasedLocking::revoke(h_obj, self); 1069 assert(!h_obj->mark().has_bias_pattern(), 1070 "biases should be revoked by now"); 1071 } 1072 1073 assert(self == JavaThread::current(), "Can only be called on current thread"); 1074 oop obj = h_obj(); 1075 markWord mark = read_stable_mark(obj); 1076 1077 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1078 if (mark.has_locker()) { 1079 return self->is_lock_owned((address)mark.locker()) ? 1080 owner_self : owner_other; 1081 } 1082 1083 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 1084 // The Object:ObjectMonitor relationship is stable as long as we're 1085 // not at a safepoint. 1086 if (mark.has_monitor()) { 1087 void* owner = mark.monitor()->_owner; 1088 if (owner == NULL) return owner_none; 1089 return (owner == self || 1090 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1091 } 1092 1093 // CASE: neutral 1094 assert(mark.is_neutral(), "sanity check"); 1095 return owner_none; // it's unlocked 1096 } 1097 1098 // FIXME: jvmti should call this 1099 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1100 if (UseBiasedLocking) { 1101 if (SafepointSynchronize::is_at_safepoint()) { 1102 BiasedLocking::revoke_at_safepoint(h_obj); 1103 } else { 1104 BiasedLocking::revoke(h_obj, JavaThread::current()); 1105 } 1106 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1107 } 1108 1109 oop obj = h_obj(); 1110 address owner = NULL; 1111 1112 markWord mark = read_stable_mark(obj); 1113 1114 // Uncontended case, header points to stack 1115 if (mark.has_locker()) { 1116 owner = (address) mark.locker(); 1117 } 1118 1119 // Contended case, header points to ObjectMonitor (tagged pointer) 1120 else if (mark.has_monitor()) { 1121 ObjectMonitor* monitor = mark.monitor(); 1122 assert(monitor != NULL, "monitor should be non-null"); 1123 owner = (address) monitor->owner(); 1124 } 1125 1126 if (owner != NULL) { 1127 // owning_thread_from_monitor_owner() may also return NULL here 1128 return Threads::owning_thread_from_monitor_owner(t_list, owner); 1129 } 1130 1131 // Unlocked case, header in place 1132 // Cannot have assertion since this object may have been 1133 // locked by another thread when reaching here. 1134 // assert(mark.is_neutral(), "sanity check"); 1135 1136 return NULL; 1137 } 1138 1139 // Visitors ... 1140 1141 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1142 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 1143 while (block != NULL) { 1144 assert(block->object() == CHAINMARKER, "must be a block header"); 1145 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1146 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1147 oop object = (oop)mid->object(); 1148 if (object != NULL) { 1149 // Only process with closure if the object is set. 1150 closure->do_monitor(mid); 1151 } 1152 } 1153 // unmarked_next() is not needed with g_block_list (no locking 1154 // used with block linkage _next_om fields). 1155 block = (PaddedObjectMonitor*)block->next_om(); 1156 } 1157 } 1158 1159 static bool monitors_used_above_threshold() { 1160 int population = Atomic::load(&om_list_globals._population); 1161 if (population == 0) { 1162 return false; 1163 } 1164 if (MonitorUsedDeflationThreshold > 0) { 1165 int monitors_used = population - Atomic::load(&om_list_globals._free_count); 1166 int monitor_usage = (monitors_used * 100LL) / population; 1167 return monitor_usage > MonitorUsedDeflationThreshold; 1168 } 1169 return false; 1170 } 1171 1172 bool ObjectSynchronizer::is_cleanup_needed() { 1173 return monitors_used_above_threshold(); 1174 } 1175 1176 void ObjectSynchronizer::oops_do(OopClosure* f) { 1177 // We only scan the global used list here (for moribund threads), and 1178 // the thread-local monitors in Thread::oops_do(). 1179 global_used_oops_do(f); 1180 } 1181 1182 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1183 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1184 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f); 1185 } 1186 1187 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1188 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1189 list_oops_do(thread->om_in_use_list, f); 1190 } 1191 1192 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1193 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1194 // The oops_do() phase does not overlap with monitor deflation 1195 // so no need to lock ObjectMonitors for the list traversal. 1196 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { 1197 if (mid->object() != NULL) { 1198 f->do_oop((oop*)mid->object_addr()); 1199 } 1200 } 1201 } 1202 1203 1204 // ----------------------------------------------------------------------------- 1205 // ObjectMonitor Lifecycle 1206 // ----------------------- 1207 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread 1208 // free list and associates them with objects. Deflation -- which occurs at 1209 // STW-time -- disassociates idle monitors from objects. 1210 // Such scavenged monitors are returned to the om_list_globals._free_list. 1211 // 1212 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1213 // 1214 // Lifecycle: 1215 // -- unassigned and on the om_list_globals._free_list 1216 // -- unassigned and on a per-thread free list 1217 // -- assigned to an object. The object is inflated and the mark refers 1218 // to the ObjectMonitor. 1219 1220 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { 1221 // A large MAXPRIVATE value reduces both list lock contention 1222 // and list coherency traffic, but also tends to increase the 1223 // number of ObjectMonitors in circulation as well as the STW 1224 // scavenge costs. As usual, we lean toward time in space-time 1225 // tradeoffs. 1226 const int MAXPRIVATE = 1024; 1227 NoSafepointVerifier nsv; 1228 1229 for (;;) { 1230 ObjectMonitor* m; 1231 1232 // 1: try to allocate from the thread's local om_free_list. 1233 // Threads will attempt to allocate first from their local list, then 1234 // from the global list, and only after those attempts fail will the 1235 // thread attempt to instantiate new monitors. Thread-local free lists 1236 // improve allocation latency, as well as reducing coherency traffic 1237 // on the shared global list. 1238 m = take_from_start_of_om_free_list(self); 1239 if (m != NULL) { 1240 guarantee(m->object() == NULL, "invariant"); 1241 prepend_to_om_in_use_list(self, m); 1242 return m; 1243 } 1244 1245 // 2: try to allocate from the global om_list_globals._free_list 1246 // If we're using thread-local free lists then try 1247 // to reprovision the caller's free list. 1248 if (Atomic::load(&om_list_globals._free_list) != NULL) { 1249 // Reprovision the thread's om_free_list. 1250 // Use bulk transfers to reduce the allocation rate and heat 1251 // on various locks. 1252 for (int i = self->om_free_provision; --i >= 0;) { 1253 ObjectMonitor* take = take_from_start_of_global_free_list(); 1254 if (take == NULL) { 1255 break; // No more are available. 1256 } 1257 guarantee(take->object() == NULL, "invariant"); 1258 take->Recycle(); 1259 om_release(self, take, false); 1260 } 1261 self->om_free_provision += 1 + (self->om_free_provision / 2); 1262 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1263 continue; 1264 } 1265 1266 // 3: allocate a block of new ObjectMonitors 1267 // Both the local and global free lists are empty -- resort to malloc(). 1268 // In the current implementation ObjectMonitors are TSM - immortal. 1269 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1270 // each ObjectMonitor to start at the beginning of a cache line, 1271 // so we use align_up(). 1272 // A better solution would be to use C++ placement-new. 1273 // BEWARE: As it stands currently, we don't run the ctors! 1274 assert(_BLOCKSIZE > 1, "invariant"); 1275 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1276 PaddedObjectMonitor* temp; 1277 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1278 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); 1279 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1280 (void)memset((void *) temp, 0, neededsize); 1281 1282 // Format the block. 1283 // initialize the linked list, each monitor points to its next 1284 // forming the single linked free list, the very first monitor 1285 // will points to next block, which forms the block list. 1286 // The trick of using the 1st element in the block as g_block_list 1287 // linkage should be reconsidered. A better implementation would 1288 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1289 1290 for (int i = 1; i < _BLOCKSIZE; i++) { 1291 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]); 1292 } 1293 1294 // terminate the last monitor as the end of list 1295 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL); 1296 1297 // Element [0] is reserved for global list linkage 1298 temp[0].set_object(CHAINMARKER); 1299 1300 // Consider carving out this thread's current request from the 1301 // block in hand. This avoids some lock traffic and redundant 1302 // list activity. 1303 1304 prepend_block_to_lists(temp); 1305 } 1306 } 1307 1308 // Place "m" on the caller's private per-thread om_free_list. 1309 // In practice there's no need to clamp or limit the number of 1310 // monitors on a thread's om_free_list as the only non-allocation time 1311 // we'll call om_release() is to return a monitor to the free list after 1312 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1313 // accumulate on a thread's free list. 1314 // 1315 // Key constraint: all ObjectMonitors on a thread's free list and the global 1316 // free list must have their object field set to null. This prevents the 1317 // scavenger -- deflate_monitor_list() -- from reclaiming them while we 1318 // are trying to release them. 1319 1320 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1321 bool from_per_thread_alloc) { 1322 guarantee(m->header().value() == 0, "invariant"); 1323 guarantee(m->object() == NULL, "invariant"); 1324 NoSafepointVerifier nsv; 1325 1326 if ((m->is_busy() | m->_recursions) != 0) { 1327 stringStream ss; 1328 fatal("freeing in-use monitor: %s, recursions=" INTX_FORMAT, 1329 m->is_busy_to_string(&ss), m->_recursions); 1330 } 1331 // _next_om is used for both per-thread in-use and free lists so 1332 // we have to remove 'm' from the in-use list first (as needed). 1333 if (from_per_thread_alloc) { 1334 // Need to remove 'm' from om_in_use_list. 1335 ObjectMonitor* mid = NULL; 1336 ObjectMonitor* next = NULL; 1337 1338 // This list walk can only race with another list walker since 1339 // deflation can only happen at a safepoint so we don't have to 1340 // worry about an ObjectMonitor being removed from this list 1341 // while we are walking it. 1342 1343 // Lock the list head to avoid racing with another list walker. 1344 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { 1345 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); 1346 } 1347 next = unmarked_next(mid); 1348 if (m == mid) { 1349 // First special case: 1350 // 'm' matches mid, is the list head and is locked. Switch the list 1351 // head to next which unlocks the list head, but leaves the extracted 1352 // mid locked: 1353 Atomic::store(&self->om_in_use_list, next); 1354 } else if (m == next) { 1355 // Second special case: 1356 // 'm' matches next after the list head and we already have the list 1357 // head locked so set mid to what we are extracting: 1358 mid = next; 1359 // Lock mid to prevent races with a list walker: 1360 om_lock(mid); 1361 // Update next to what follows mid (if anything): 1362 next = unmarked_next(mid); 1363 // Switch next after the list head to new next which unlocks the 1364 // list head, but leaves the extracted mid locked: 1365 self->om_in_use_list->set_next_om(next); 1366 } else { 1367 // We have to search the list to find 'm'. 1368 om_unlock(mid); // unlock the list head 1369 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT 1370 " is too short.", p2i(self), p2i(self->om_in_use_list)); 1371 // Our starting anchor is next after the list head which is the 1372 // last ObjectMonitor we checked: 1373 ObjectMonitor* anchor = next; 1374 while ((mid = unmarked_next(anchor)) != NULL) { 1375 if (m == mid) { 1376 // We found 'm' on the per-thread in-use list so extract it. 1377 om_lock(anchor); // Lock the anchor so we can safely modify it. 1378 // Update next to what follows mid (if anything): 1379 next = unmarked_next(mid); 1380 // Switch next after the anchor to new next which unlocks the 1381 // anchor, but leaves the extracted mid locked: 1382 anchor->set_next_om(next); 1383 break; 1384 } else { 1385 anchor = mid; 1386 } 1387 } 1388 } 1389 1390 if (mid == NULL) { 1391 // Reached end of the list and didn't find 'm' so: 1392 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list=" 1393 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list)); 1394 } 1395 1396 // At this point mid is disconnected from the in-use list so 1397 // its lock no longer has any effects on the in-use list. 1398 Atomic::dec(&self->om_in_use_count); 1399 // Unlock mid, but leave the next value for any lagging list 1400 // walkers. It will get cleaned up when mid is prepended to 1401 // the thread's free list: 1402 om_unlock(mid); 1403 } 1404 1405 prepend_to_om_free_list(self, m); 1406 } 1407 1408 // Return ObjectMonitors on a moribund thread's free and in-use 1409 // lists to the appropriate global lists. The ObjectMonitors on the 1410 // per-thread in-use list may still be in use by other threads. 1411 // 1412 // We currently call om_flush() from Threads::remove() before the 1413 // thread has been excised from the thread list and is no longer a 1414 // mutator. This means that om_flush() cannot run concurrently with 1415 // a safepoint and interleave with deflate_idle_monitors(). In 1416 // particular, this ensures that the thread's in-use monitors are 1417 // scanned by a GC safepoint, either via Thread::oops_do() (before 1418 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1419 // om_flush() is called). 1420 1421 void ObjectSynchronizer::om_flush(Thread* self) { 1422 // Process the per-thread in-use list first to be consistent. 1423 int in_use_count = 0; 1424 ObjectMonitor* in_use_list = NULL; 1425 ObjectMonitor* in_use_tail = NULL; 1426 NoSafepointVerifier nsv; 1427 1428 // This function can race with a list walker thread so we lock the 1429 // list head to prevent confusion. 1430 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { 1431 // At this point, we have locked the in-use list head so a racing 1432 // thread cannot come in after us. However, a racing thread could 1433 // be ahead of us; we'll detect that and delay to let it finish. 1434 // 1435 // The thread is going away, however the ObjectMonitors on the 1436 // om_in_use_list may still be in-use by other threads. Link 1437 // them to in_use_tail, which will be linked into the global 1438 // in-use list (om_list_globals._in_use_list) below. 1439 // 1440 // Account for the in-use list head before the loop since it is 1441 // already locked (by this thread): 1442 in_use_tail = in_use_list; 1443 in_use_count++; 1444 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL; cur_om = unmarked_next(cur_om)) { 1445 if (is_locked(cur_om)) { 1446 // cur_om is locked so there must be a racing walker thread ahead 1447 // of us so we'll give it a chance to finish. 1448 while (is_locked(cur_om)) { 1449 os::naked_short_sleep(1); 1450 } 1451 } 1452 in_use_tail = cur_om; 1453 in_use_count++; 1454 } 1455 guarantee(in_use_tail != NULL, "invariant"); 1456 int l_om_in_use_count = Atomic::load(&self->om_in_use_count); 1457 assert(l_om_in_use_count == in_use_count, "in-use counts don't match: " 1458 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); 1459 Atomic::store(&self->om_in_use_count, 0); 1460 // Clear the in-use list head (which also unlocks it): 1461 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); 1462 om_unlock(in_use_list); 1463 } 1464 1465 int free_count = 0; 1466 ObjectMonitor* free_list = NULL; 1467 ObjectMonitor* free_tail = NULL; 1468 // This function can race with a list walker thread so we lock the 1469 // list head to prevent confusion. 1470 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) { 1471 // At this point, we have locked the free list head so a racing 1472 // thread cannot come in after us. However, a racing thread could 1473 // be ahead of us; we'll detect that and delay to let it finish. 1474 // 1475 // The thread is going away. Set 'free_tail' to the last per-thread free 1476 // monitor which will be linked to om_list_globals._free_list below. 1477 // 1478 // Account for the free list head before the loop since it is 1479 // already locked (by this thread): 1480 free_tail = free_list; 1481 free_count++; 1482 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) { 1483 if (is_locked(s)) { 1484 // s is locked so there must be a racing walker thread ahead 1485 // of us so we'll give it a chance to finish. 1486 while (is_locked(s)) { 1487 os::naked_short_sleep(1); 1488 } 1489 } 1490 free_tail = s; 1491 free_count++; 1492 guarantee(s->object() == NULL, "invariant"); 1493 if (s->is_busy()) { 1494 stringStream ss; 1495 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss)); 1496 } 1497 } 1498 guarantee(free_tail != NULL, "invariant"); 1499 int l_om_free_count = Atomic::load(&self->om_free_count); 1500 assert(l_om_free_count == free_count, "free counts don't match: " 1501 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); 1502 Atomic::store(&self->om_free_count, 0); 1503 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); 1504 om_unlock(free_list); 1505 } 1506 1507 if (free_tail != NULL) { 1508 prepend_list_to_global_free_list(free_list, free_tail, free_count); 1509 } 1510 1511 if (in_use_tail != NULL) { 1512 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); 1513 } 1514 1515 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1516 LogStreamHandle(Info, monitorinflation) lsh_info; 1517 LogStream* ls = NULL; 1518 if (log_is_enabled(Debug, monitorinflation)) { 1519 ls = &lsh_debug; 1520 } else if ((free_count != 0 || in_use_count != 0) && 1521 log_is_enabled(Info, monitorinflation)) { 1522 ls = &lsh_info; 1523 } 1524 if (ls != NULL) { 1525 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1526 ", in_use_count=%d" ", om_free_provision=%d", 1527 p2i(self), free_count, in_use_count, self->om_free_provision); 1528 } 1529 } 1530 1531 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1532 const oop obj, 1533 ObjectSynchronizer::InflateCause cause) { 1534 assert(event != NULL, "invariant"); 1535 assert(event->should_commit(), "invariant"); 1536 event->set_monitorClass(obj->klass()); 1537 event->set_address((uintptr_t)(void*)obj); 1538 event->set_cause((u1)cause); 1539 event->commit(); 1540 } 1541 1542 // Fast path code shared by multiple functions 1543 void ObjectSynchronizer::inflate_helper(oop obj) { 1544 markWord mark = obj->mark(); 1545 if (mark.has_monitor()) { 1546 assert(ObjectSynchronizer::verify_objmon_isinpool(mark.monitor()), "monitor is invalid"); 1547 assert(mark.monitor()->header().is_neutral(), "monitor must record a good object header"); 1548 return; 1549 } 1550 inflate(Thread::current(), obj, inflate_cause_vm_internal); 1551 } 1552 1553 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, 1554 oop object, const InflateCause cause) { 1555 // Inflate mutates the heap ... 1556 // Relaxing assertion for bug 6320749. 1557 assert(Universe::verify_in_progress() || 1558 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1559 1560 EventJavaMonitorInflate event; 1561 1562 for (;;) { 1563 const markWord mark = object->mark(); 1564 assert(!mark.has_bias_pattern(), "invariant"); 1565 1566 // The mark can be in one of the following states: 1567 // * Inflated - just return 1568 // * Stack-locked - coerce it to inflated 1569 // * INFLATING - busy wait for conversion to complete 1570 // * Neutral - aggressively inflate the object. 1571 // * BIASED - Illegal. We should never see this 1572 1573 // CASE: inflated 1574 if (mark.has_monitor()) { 1575 ObjectMonitor* inf = mark.monitor(); 1576 markWord dmw = inf->header(); 1577 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1578 assert(inf->object() == object, "invariant"); 1579 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1580 return inf; 1581 } 1582 1583 // CASE: inflation in progress - inflating over a stack-lock. 1584 // Some other thread is converting from stack-locked to inflated. 1585 // Only that thread can complete inflation -- other threads must wait. 1586 // The INFLATING value is transient. 1587 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1588 // We could always eliminate polling by parking the thread on some auxiliary list. 1589 if (mark == markWord::INFLATING()) { 1590 read_stable_mark(object); 1591 continue; 1592 } 1593 1594 // CASE: stack-locked 1595 // Could be stack-locked either by this thread or by some other thread. 1596 // 1597 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1598 // to install INFLATING into the mark word. We originally installed INFLATING, 1599 // allocated the objectmonitor, and then finally STed the address of the 1600 // objectmonitor into the mark. This was correct, but artificially lengthened 1601 // the interval in which INFLATED appeared in the mark, thus increasing 1602 // the odds of inflation contention. 1603 // 1604 // We now use per-thread private objectmonitor free lists. 1605 // These list are reprovisioned from the global free list outside the 1606 // critical INFLATING...ST interval. A thread can transfer 1607 // multiple objectmonitors en-mass from the global free list to its local free list. 1608 // This reduces coherency traffic and lock contention on the global free list. 1609 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1610 // before or after the CAS(INFLATING) operation. 1611 // See the comments in om_alloc(). 1612 1613 LogStreamHandle(Trace, monitorinflation) lsh; 1614 1615 if (mark.has_locker()) { 1616 ObjectMonitor* m = om_alloc(self); 1617 // Optimistically prepare the objectmonitor - anticipate successful CAS 1618 // We do this before the CAS in order to minimize the length of time 1619 // in which INFLATING appears in the mark. 1620 m->Recycle(); 1621 m->_Responsible = NULL; 1622 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1623 1624 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1625 if (cmp != mark) { 1626 om_release(self, m, true); 1627 continue; // Interference -- just retry 1628 } 1629 1630 // We've successfully installed INFLATING (0) into the mark-word. 1631 // This is the only case where 0 will appear in a mark-word. 1632 // Only the singular thread that successfully swings the mark-word 1633 // to 0 can perform (or more precisely, complete) inflation. 1634 // 1635 // Why do we CAS a 0 into the mark-word instead of just CASing the 1636 // mark-word from the stack-locked value directly to the new inflated state? 1637 // Consider what happens when a thread unlocks a stack-locked object. 1638 // It attempts to use CAS to swing the displaced header value from the 1639 // on-stack BasicLock back into the object header. Recall also that the 1640 // header value (hash code, etc) can reside in (a) the object header, or 1641 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1642 // header in an ObjectMonitor. The inflate() routine must copy the header 1643 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1644 // the while preserving the hashCode stability invariants. If the owner 1645 // decides to release the lock while the value is 0, the unlock will fail 1646 // and control will eventually pass from slow_exit() to inflate. The owner 1647 // will then spin, waiting for the 0 value to disappear. Put another way, 1648 // the 0 causes the owner to stall if the owner happens to try to 1649 // drop the lock (restoring the header from the BasicLock to the object) 1650 // while inflation is in-progress. This protocol avoids races that might 1651 // would otherwise permit hashCode values to change or "flicker" for an object. 1652 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1653 // 0 serves as a "BUSY" inflate-in-progress indicator. 1654 1655 1656 // fetch the displaced mark from the owner's stack. 1657 // The owner can't die or unwind past the lock while our INFLATING 1658 // object is in the mark. Furthermore the owner can't complete 1659 // an unlock on the object, either. 1660 markWord dmw = mark.displaced_mark_helper(); 1661 // Catch if the object's header is not neutral (not locked and 1662 // not marked is what we care about here). 1663 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1664 1665 // Setup monitor fields to proper values -- prepare the monitor 1666 m->set_header(dmw); 1667 1668 // Optimization: if the mark.locker stack address is associated 1669 // with this thread we could simply set m->_owner = self. 1670 // Note that a thread can inflate an object 1671 // that it has stack-locked -- as might happen in wait() -- directly 1672 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1673 m->set_owner_from(NULL, mark.locker()); 1674 m->set_object(object); 1675 // TODO-FIXME: assert BasicLock->dhw != 0. 1676 1677 // Must preserve store ordering. The monitor state must 1678 // be stable at the time of publishing the monitor address. 1679 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1680 object->release_set_mark(markWord::encode(m)); 1681 1682 // Hopefully the performance counters are allocated on distinct cache lines 1683 // to avoid false sharing on MP systems ... 1684 OM_PERFDATA_OP(Inflations, inc()); 1685 if (log_is_enabled(Trace, monitorinflation)) { 1686 ResourceMark rm(self); 1687 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1688 INTPTR_FORMAT ", type='%s'", p2i(object), 1689 object->mark().value(), object->klass()->external_name()); 1690 } 1691 if (event.should_commit()) { 1692 post_monitor_inflate_event(&event, object, cause); 1693 } 1694 return m; 1695 } 1696 1697 // CASE: neutral 1698 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1699 // If we know we're inflating for entry it's better to inflate by swinging a 1700 // pre-locked ObjectMonitor pointer into the object header. A successful 1701 // CAS inflates the object *and* confers ownership to the inflating thread. 1702 // In the current implementation we use a 2-step mechanism where we CAS() 1703 // to inflate and then CAS() again to try to swing _owner from NULL to self. 1704 // An inflateTry() method that we could call from enter() would be useful. 1705 1706 // Catch if the object's header is not neutral (not locked and 1707 // not marked is what we care about here). 1708 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1709 ObjectMonitor* m = om_alloc(self); 1710 // prepare m for installation - set monitor to initial state 1711 m->Recycle(); 1712 m->set_header(mark); 1713 m->set_object(object); 1714 m->_Responsible = NULL; 1715 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1716 1717 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 1718 m->set_header(markWord::zero()); 1719 m->set_object(NULL); 1720 m->Recycle(); 1721 om_release(self, m, true); 1722 m = NULL; 1723 continue; 1724 // interference - the markword changed - just retry. 1725 // The state-transitions are one-way, so there's no chance of 1726 // live-lock -- "Inflated" is an absorbing state. 1727 } 1728 1729 // Hopefully the performance counters are allocated on distinct 1730 // cache lines to avoid false sharing on MP systems ... 1731 OM_PERFDATA_OP(Inflations, inc()); 1732 if (log_is_enabled(Trace, monitorinflation)) { 1733 ResourceMark rm(self); 1734 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1735 INTPTR_FORMAT ", type='%s'", p2i(object), 1736 object->mark().value(), object->klass()->external_name()); 1737 } 1738 if (event.should_commit()) { 1739 post_monitor_inflate_event(&event, object, cause); 1740 } 1741 return m; 1742 } 1743 } 1744 1745 1746 // We maintain a list of in-use monitors for each thread. 1747 // 1748 // deflate_thread_local_monitors() scans a single thread's in-use list, while 1749 // deflate_idle_monitors() scans only a global list of in-use monitors which 1750 // is populated only as a thread dies (see om_flush()). 1751 // 1752 // These operations are called at all safepoints, immediately after mutators 1753 // are stopped, but before any objects have moved. Collectively they traverse 1754 // the population of in-use monitors, deflating where possible. The scavenged 1755 // monitors are returned to the global monitor free list. 1756 // 1757 // Beware that we scavenge at *every* stop-the-world point. Having a large 1758 // number of monitors in-use could negatively impact performance. We also want 1759 // to minimize the total # of monitors in circulation, as they incur a small 1760 // footprint penalty. 1761 // 1762 // Perversely, the heap size -- and thus the STW safepoint rate -- 1763 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 1764 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 1765 // This is an unfortunate aspect of this design. 1766 1767 // Deflate a single monitor if not in-use 1768 // Return true if deflated, false if in-use 1769 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 1770 ObjectMonitor** free_head_p, 1771 ObjectMonitor** free_tail_p) { 1772 bool deflated; 1773 // Normal case ... The monitor is associated with obj. 1774 const markWord mark = obj->mark(); 1775 guarantee(mark == markWord::encode(mid), "should match: mark=" 1776 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 1777 markWord::encode(mid).value()); 1778 // Make sure that mark.monitor() and markWord::encode() agree: 1779 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 1780 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 1781 const markWord dmw = mid->header(); 1782 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1783 1784 if (mid->is_busy()) { 1785 // Easy checks are first - the ObjectMonitor is busy so no deflation. 1786 deflated = false; 1787 } else { 1788 // Deflate the monitor if it is no longer being used 1789 // It's idle - scavenge and return to the global free list 1790 // plain old deflation ... 1791 if (log_is_enabled(Trace, monitorinflation)) { 1792 ResourceMark rm; 1793 log_trace(monitorinflation)("deflate_monitor: " 1794 "object=" INTPTR_FORMAT ", mark=" 1795 INTPTR_FORMAT ", type='%s'", p2i(obj), 1796 mark.value(), obj->klass()->external_name()); 1797 } 1798 1799 // Restore the header back to obj 1800 obj->release_set_mark(dmw); 1801 mid->clear(); 1802 1803 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 1804 p2i(mid->object())); 1805 1806 // Move the deflated ObjectMonitor to the working free list 1807 // defined by free_head_p and free_tail_p. 1808 if (*free_head_p == NULL) *free_head_p = mid; 1809 if (*free_tail_p != NULL) { 1810 // We append to the list so the caller can use mid->_next_om 1811 // to fix the linkages in its context. 1812 ObjectMonitor* prevtail = *free_tail_p; 1813 // Should have been cleaned up by the caller: 1814 // Note: Should not have to lock prevtail here since we're at a 1815 // safepoint and ObjectMonitors on the local free list should 1816 // not be accessed in parallel. 1817 #ifdef ASSERT 1818 ObjectMonitor* l_next_om = prevtail->next_om(); 1819 #endif 1820 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 1821 prevtail->set_next_om(mid); 1822 } 1823 *free_tail_p = mid; 1824 // At this point, mid->_next_om still refers to its current 1825 // value and another ObjectMonitor's _next_om field still 1826 // refers to this ObjectMonitor. Those linkages have to be 1827 // cleaned up by the caller who has the complete context. 1828 deflated = true; 1829 } 1830 return deflated; 1831 } 1832 1833 // Walk a given monitor list, and deflate idle monitors. 1834 // The given list could be a per-thread list or a global list. 1835 // 1836 // In the case of parallel processing of thread local monitor lists, 1837 // work is done by Threads::parallel_threads_do() which ensures that 1838 // each Java thread is processed by exactly one worker thread, and 1839 // thus avoid conflicts that would arise when worker threads would 1840 // process the same monitor lists concurrently. 1841 // 1842 // See also ParallelSPCleanupTask and 1843 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 1844 // Threads::parallel_java_threads_do() in thread.cpp. 1845 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 1846 int* count_p, 1847 ObjectMonitor** free_head_p, 1848 ObjectMonitor** free_tail_p) { 1849 ObjectMonitor* cur_mid_in_use = NULL; 1850 ObjectMonitor* mid = NULL; 1851 ObjectMonitor* next = NULL; 1852 int deflated_count = 0; 1853 1854 // This list walk executes at a safepoint and does not race with any 1855 // other list walkers. 1856 1857 for (mid = Atomic::load(list_p); mid != NULL; mid = next) { 1858 next = unmarked_next(mid); 1859 oop obj = (oop) mid->object(); 1860 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 1861 // Deflation succeeded and already updated free_head_p and 1862 // free_tail_p as needed. Finish the move to the local free list 1863 // by unlinking mid from the global or per-thread in-use list. 1864 if (cur_mid_in_use == NULL) { 1865 // mid is the list head so switch the list head to next: 1866 Atomic::store(list_p, next); 1867 } else { 1868 // Switch cur_mid_in_use's next field to next: 1869 cur_mid_in_use->set_next_om(next); 1870 } 1871 // At this point mid is disconnected from the in-use list. 1872 deflated_count++; 1873 Atomic::dec(count_p); 1874 // mid is current tail in the free_head_p list so NULL terminate it: 1875 mid->set_next_om(NULL); 1876 } else { 1877 cur_mid_in_use = mid; 1878 } 1879 } 1880 return deflated_count; 1881 } 1882 1883 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1884 counters->n_in_use = 0; // currently associated with objects 1885 counters->n_in_circulation = 0; // extant 1886 counters->n_scavenged = 0; // reclaimed (global and per-thread) 1887 counters->per_thread_scavenged = 0; // per-thread scavenge total 1888 counters->per_thread_times = 0.0; // per-thread scavenge times 1889 } 1890 1891 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 1892 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1893 bool deflated = false; 1894 1895 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 1896 ObjectMonitor* free_tail_p = NULL; 1897 elapsedTimer timer; 1898 1899 if (log_is_enabled(Info, monitorinflation)) { 1900 timer.start(); 1901 } 1902 1903 // Note: the thread-local monitors lists get deflated in 1904 // a separate pass. See deflate_thread_local_monitors(). 1905 1906 // For moribund threads, scan om_list_globals._in_use_list 1907 int deflated_count = 0; 1908 if (Atomic::load(&om_list_globals._in_use_list) != NULL) { 1909 // Update n_in_circulation before om_list_globals._in_use_count is 1910 // updated by deflation. 1911 Atomic::add(&counters->n_in_circulation, 1912 Atomic::load(&om_list_globals._in_use_count)); 1913 1914 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list, 1915 &om_list_globals._in_use_count, 1916 &free_head_p, &free_tail_p); 1917 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count)); 1918 } 1919 1920 if (free_head_p != NULL) { 1921 // Move the deflated ObjectMonitors back to the global free list. 1922 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 1923 #ifdef ASSERT 1924 ObjectMonitor* l_next_om = free_tail_p->next_om(); 1925 #endif 1926 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 1927 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 1928 Atomic::add(&counters->n_scavenged, deflated_count); 1929 } 1930 timer.stop(); 1931 1932 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1933 LogStreamHandle(Info, monitorinflation) lsh_info; 1934 LogStream* ls = NULL; 1935 if (log_is_enabled(Debug, monitorinflation)) { 1936 ls = &lsh_debug; 1937 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 1938 ls = &lsh_info; 1939 } 1940 if (ls != NULL) { 1941 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 1942 } 1943 } 1944 1945 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 1946 // Report the cumulative time for deflating each thread's idle 1947 // monitors. Note: if the work is split among more than one 1948 // worker thread, then the reported time will likely be more 1949 // than a beginning to end measurement of the phase. 1950 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 1951 1952 if (log_is_enabled(Debug, monitorinflation)) { 1953 // exit_globals()'s call to audit_and_print_stats() is done 1954 // at the Info level and not at a safepoint. 1955 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 1956 } else if (log_is_enabled(Info, monitorinflation)) { 1957 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " 1958 "global_free_count=%d", 1959 Atomic::load(&om_list_globals._population), 1960 Atomic::load(&om_list_globals._in_use_count), 1961 Atomic::load(&om_list_globals._free_count)); 1962 } 1963 1964 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 1965 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 1966 1967 GVars.stw_random = os::random(); 1968 GVars.stw_cycle++; 1969 } 1970 1971 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 1972 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1973 1974 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 1975 ObjectMonitor* free_tail_p = NULL; 1976 elapsedTimer timer; 1977 1978 if (log_is_enabled(Info, safepoint, cleanup) || 1979 log_is_enabled(Info, monitorinflation)) { 1980 timer.start(); 1981 } 1982 1983 // Update n_in_circulation before om_in_use_count is updated by deflation. 1984 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count)); 1985 1986 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 1987 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count)); 1988 1989 if (free_head_p != NULL) { 1990 // Move the deflated ObjectMonitors back to the global free list. 1991 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 1992 #ifdef ASSERT 1993 ObjectMonitor* l_next_om = free_tail_p->next_om(); 1994 #endif 1995 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 1996 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 1997 Atomic::add(&counters->n_scavenged, deflated_count); 1998 Atomic::add(&counters->per_thread_scavenged, deflated_count); 1999 } 2000 2001 timer.stop(); 2002 counters->per_thread_times += timer.seconds(); 2003 2004 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2005 LogStreamHandle(Info, monitorinflation) lsh_info; 2006 LogStream* ls = NULL; 2007 if (log_is_enabled(Debug, monitorinflation)) { 2008 ls = &lsh_debug; 2009 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2010 ls = &lsh_info; 2011 } 2012 if (ls != NULL) { 2013 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2014 } 2015 } 2016 2017 // Monitor cleanup on JavaThread::exit 2018 2019 // Iterate through monitor cache and attempt to release thread's monitors 2020 // Gives up on a particular monitor if an exception occurs, but continues 2021 // the overall iteration, swallowing the exception. 2022 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2023 private: 2024 TRAPS; 2025 2026 public: 2027 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2028 void do_monitor(ObjectMonitor* mid) { 2029 if (mid->owner() == THREAD) { 2030 (void)mid->complete_exit(CHECK); 2031 } 2032 } 2033 }; 2034 2035 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2036 // ignored. This is meant to be called during JNI thread detach which assumes 2037 // all remaining monitors are heavyweight. All exceptions are swallowed. 2038 // Scanning the extant monitor list can be time consuming. 2039 // A simple optimization is to add a per-thread flag that indicates a thread 2040 // called jni_monitorenter() during its lifetime. 2041 // 2042 // Instead of No_Savepoint_Verifier it might be cheaper to 2043 // use an idiom of the form: 2044 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2045 // <code that must not run at safepoint> 2046 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2047 // Since the tests are extremely cheap we could leave them enabled 2048 // for normal product builds. 2049 2050 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2051 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2052 NoSafepointVerifier nsv; 2053 ReleaseJavaMonitorsClosure rjmc(THREAD); 2054 ObjectSynchronizer::monitors_iterate(&rjmc); 2055 THREAD->clear_pending_exception(); 2056 } 2057 2058 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2059 switch (cause) { 2060 case inflate_cause_vm_internal: return "VM Internal"; 2061 case inflate_cause_monitor_enter: return "Monitor Enter"; 2062 case inflate_cause_wait: return "Monitor Wait"; 2063 case inflate_cause_notify: return "Monitor Notify"; 2064 case inflate_cause_hash_code: return "Monitor Hash Code"; 2065 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2066 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2067 default: 2068 ShouldNotReachHere(); 2069 } 2070 return "Unknown"; 2071 } 2072 2073 //------------------------------------------------------------------------------ 2074 // Debugging code 2075 2076 u_char* ObjectSynchronizer::get_gvars_addr() { 2077 return (u_char*)&GVars; 2078 } 2079 2080 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2081 return (u_char*)&GVars.hc_sequence; 2082 } 2083 2084 size_t ObjectSynchronizer::get_gvars_size() { 2085 return sizeof(SharedGlobals); 2086 } 2087 2088 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2089 return (u_char*)&GVars.stw_random; 2090 } 2091 2092 // This function can be called at a safepoint or it can be called when 2093 // we are trying to exit the VM. When we are trying to exit the VM, the 2094 // list walker functions can run in parallel with the other list 2095 // operations so spin-locking is used for safety. 2096 // 2097 // Calls to this function can be added in various places as a debugging 2098 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor 2099 // details logged at the Info level and 'false' for the 'on_exit' 2100 // parameter to have in-use monitor details logged at the Trace level. 2101 // deflate_monitor_list() no longer uses spin-locking so be careful 2102 // when adding audit_and_print_stats() calls at a safepoint. 2103 // 2104 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2105 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2106 2107 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2108 LogStreamHandle(Info, monitorinflation) lsh_info; 2109 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2110 LogStream* ls = NULL; 2111 if (log_is_enabled(Trace, monitorinflation)) { 2112 ls = &lsh_trace; 2113 } else if (log_is_enabled(Debug, monitorinflation)) { 2114 ls = &lsh_debug; 2115 } else if (log_is_enabled(Info, monitorinflation)) { 2116 ls = &lsh_info; 2117 } 2118 assert(ls != NULL, "sanity check"); 2119 2120 // Log counts for the global and per-thread monitor lists: 2121 int chk_om_population = log_monitor_list_counts(ls); 2122 int error_cnt = 0; 2123 2124 ls->print_cr("Checking global lists:"); 2125 2126 // Check om_list_globals._population: 2127 if (Atomic::load(&om_list_globals._population) == chk_om_population) { 2128 ls->print_cr("global_population=%d equals chk_om_population=%d", 2129 Atomic::load(&om_list_globals._population), chk_om_population); 2130 } else { 2131 // With fine grained locks on the monitor lists, it is possible for 2132 // log_monitor_list_counts() to return a value that doesn't match 2133 // om_list_globals._population. So far a higher value has been 2134 // seen in testing so something is being double counted by 2135 // log_monitor_list_counts(). 2136 ls->print_cr("WARNING: global_population=%d is not equal to " 2137 "chk_om_population=%d", 2138 Atomic::load(&om_list_globals._population), chk_om_population); 2139 } 2140 2141 // Check om_list_globals._in_use_list and om_list_globals._in_use_count: 2142 chk_global_in_use_list_and_count(ls, &error_cnt); 2143 2144 // Check om_list_globals._free_list and om_list_globals._free_count: 2145 chk_global_free_list_and_count(ls, &error_cnt); 2146 2147 ls->print_cr("Checking per-thread lists:"); 2148 2149 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2150 // Check om_in_use_list and om_in_use_count: 2151 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2152 2153 // Check om_free_list and om_free_count: 2154 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2155 } 2156 2157 if (error_cnt == 0) { 2158 ls->print_cr("No errors found in monitor list checks."); 2159 } else { 2160 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2161 } 2162 2163 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2164 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2165 // When exiting this log output is at the Info level. When called 2166 // at a safepoint, this log output is at the Trace level since 2167 // there can be a lot of it. 2168 log_in_use_monitor_details(ls); 2169 } 2170 2171 ls->flush(); 2172 2173 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2174 } 2175 2176 // Check a free monitor entry; log any errors. 2177 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 2178 outputStream * out, int *error_cnt_p) { 2179 stringStream ss; 2180 if (n->is_busy()) { 2181 if (jt != NULL) { 2182 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2183 ": free per-thread monitor must not be busy: %s", p2i(jt), 2184 p2i(n), n->is_busy_to_string(&ss)); 2185 } else { 2186 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2187 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 2188 } 2189 *error_cnt_p = *error_cnt_p + 1; 2190 } 2191 if (n->header().value() != 0) { 2192 if (jt != NULL) { 2193 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2194 ": free per-thread monitor must have NULL _header " 2195 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 2196 n->header().value()); 2197 } else { 2198 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2199 "must have NULL _header field: _header=" INTPTR_FORMAT, 2200 p2i(n), n->header().value()); 2201 } 2202 *error_cnt_p = *error_cnt_p + 1; 2203 } 2204 if (n->object() != NULL) { 2205 if (jt != NULL) { 2206 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2207 ": free per-thread monitor must have NULL _object " 2208 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 2209 p2i(n->object())); 2210 } else { 2211 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2212 "must have NULL _object field: _object=" INTPTR_FORMAT, 2213 p2i(n), p2i(n->object())); 2214 } 2215 *error_cnt_p = *error_cnt_p + 1; 2216 } 2217 } 2218 2219 // Lock the next ObjectMonitor for traversal and unlock the current 2220 // ObjectMonitor. Returns the next ObjectMonitor if there is one. 2221 // Otherwise returns NULL (after unlocking the current ObjectMonitor). 2222 // This function is used by the various list walker functions to 2223 // safely walk a list without allowing an ObjectMonitor to be moved 2224 // to another list in the middle of a walk. 2225 static ObjectMonitor* lock_next_for_traversal(ObjectMonitor* cur) { 2226 assert(is_locked(cur), "cur=" INTPTR_FORMAT " must be locked", p2i(cur)); 2227 ObjectMonitor* next = unmarked_next(cur); 2228 if (next == NULL) { // Reached the end of the list. 2229 om_unlock(cur); 2230 return NULL; 2231 } 2232 om_lock(next); // Lock next before unlocking current to keep 2233 om_unlock(cur); // from being by-passed by another thread. 2234 return next; 2235 } 2236 2237 // Check the global free list and count; log the results of the checks. 2238 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 2239 int *error_cnt_p) { 2240 int chk_om_free_count = 0; 2241 ObjectMonitor* cur = NULL; 2242 if ((cur = get_list_head_locked(&om_list_globals._free_list)) != NULL) { 2243 // Marked the global free list head so process the list. 2244 while (true) { 2245 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 2246 chk_om_free_count++; 2247 2248 cur = lock_next_for_traversal(cur); 2249 if (cur == NULL) { 2250 break; 2251 } 2252 } 2253 } 2254 int l_free_count = Atomic::load(&om_list_globals._free_count); 2255 if (l_free_count == chk_om_free_count) { 2256 out->print_cr("global_free_count=%d equals chk_om_free_count=%d", 2257 l_free_count, chk_om_free_count); 2258 } else { 2259 // With fine grained locks on om_list_globals._free_list, it 2260 // is possible for an ObjectMonitor to be prepended to 2261 // om_list_globals._free_list after we started calculating 2262 // chk_om_free_count so om_list_globals._free_count may not 2263 // match anymore. 2264 out->print_cr("WARNING: global_free_count=%d is not equal to " 2265 "chk_om_free_count=%d", l_free_count, chk_om_free_count); 2266 } 2267 } 2268 2269 // Check the global in-use list and count; log the results of the checks. 2270 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 2271 int *error_cnt_p) { 2272 int chk_om_in_use_count = 0; 2273 ObjectMonitor* cur = NULL; 2274 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 2275 // Marked the global in-use list head so process the list. 2276 while (true) { 2277 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); 2278 chk_om_in_use_count++; 2279 2280 cur = lock_next_for_traversal(cur); 2281 if (cur == NULL) { 2282 break; 2283 } 2284 } 2285 } 2286 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 2287 if (l_in_use_count == chk_om_in_use_count) { 2288 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", 2289 l_in_use_count, chk_om_in_use_count); 2290 } else { 2291 // With fine grained locks on the monitor lists, it is possible for 2292 // an exiting JavaThread to put its in-use ObjectMonitors on the 2293 // global in-use list after chk_om_in_use_count is calculated above. 2294 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", 2295 l_in_use_count, chk_om_in_use_count); 2296 } 2297 } 2298 2299 // Check an in-use monitor entry; log any errors. 2300 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 2301 outputStream * out, int *error_cnt_p) { 2302 if (n->header().value() == 0) { 2303 if (jt != NULL) { 2304 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2305 ": in-use per-thread monitor must have non-NULL _header " 2306 "field.", p2i(jt), p2i(n)); 2307 } else { 2308 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2309 "must have non-NULL _header field.", p2i(n)); 2310 } 2311 *error_cnt_p = *error_cnt_p + 1; 2312 } 2313 if (n->object() == NULL) { 2314 if (jt != NULL) { 2315 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2316 ": in-use per-thread monitor must have non-NULL _object " 2317 "field.", p2i(jt), p2i(n)); 2318 } else { 2319 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 2320 "must have non-NULL _object field.", p2i(n)); 2321 } 2322 *error_cnt_p = *error_cnt_p + 1; 2323 } 2324 const oop obj = (oop)n->object(); 2325 const markWord mark = obj->mark(); 2326 if (!mark.has_monitor()) { 2327 if (jt != NULL) { 2328 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2329 ": in-use per-thread monitor's object does not think " 2330 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 2331 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 2332 } else { 2333 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2334 "monitor's object does not think it has a monitor: obj=" 2335 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 2336 p2i(obj), mark.value()); 2337 } 2338 *error_cnt_p = *error_cnt_p + 1; 2339 } 2340 ObjectMonitor* const obj_mon = mark.monitor(); 2341 if (n != obj_mon) { 2342 if (jt != NULL) { 2343 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2344 ": in-use per-thread monitor's object does not refer " 2345 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 2346 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 2347 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 2348 } else { 2349 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 2350 "monitor's object does not refer to the same monitor: obj=" 2351 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 2352 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 2353 } 2354 *error_cnt_p = *error_cnt_p + 1; 2355 } 2356 } 2357 2358 // Check the thread's free list and count; log the results of the checks. 2359 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 2360 outputStream * out, 2361 int *error_cnt_p) { 2362 int chk_om_free_count = 0; 2363 ObjectMonitor* cur = NULL; 2364 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { 2365 // Marked the per-thread free list head so process the list. 2366 while (true) { 2367 chk_free_entry(jt, cur, out, error_cnt_p); 2368 chk_om_free_count++; 2369 2370 cur = lock_next_for_traversal(cur); 2371 if (cur == NULL) { 2372 break; 2373 } 2374 } 2375 } 2376 int l_om_free_count = Atomic::load(&jt->om_free_count); 2377 if (l_om_free_count == chk_om_free_count) { 2378 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 2379 "chk_om_free_count=%d", p2i(jt), l_om_free_count, chk_om_free_count); 2380 } else { 2381 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 2382 "equal to chk_om_free_count=%d", p2i(jt), l_om_free_count, 2383 chk_om_free_count); 2384 *error_cnt_p = *error_cnt_p + 1; 2385 } 2386 } 2387 2388 // Check the thread's in-use list and count; log the results of the checks. 2389 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 2390 outputStream * out, 2391 int *error_cnt_p) { 2392 int chk_om_in_use_count = 0; 2393 ObjectMonitor* cur = NULL; 2394 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 2395 // Marked the per-thread in-use list head so process the list. 2396 while (true) { 2397 chk_in_use_entry(jt, cur, out, error_cnt_p); 2398 chk_om_in_use_count++; 2399 2400 cur = lock_next_for_traversal(cur); 2401 if (cur == NULL) { 2402 break; 2403 } 2404 } 2405 } 2406 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 2407 if (l_om_in_use_count == chk_om_in_use_count) { 2408 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 2409 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 2410 chk_om_in_use_count); 2411 } else { 2412 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 2413 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 2414 chk_om_in_use_count); 2415 *error_cnt_p = *error_cnt_p + 1; 2416 } 2417 } 2418 2419 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 2420 // flags indicate why the entry is in-use, 'object' and 'object type' 2421 // indicate the associated object and its type. 2422 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { 2423 stringStream ss; 2424 if (Atomic::load(&om_list_globals._in_use_count) > 0) { 2425 out->print_cr("In-use global monitor info:"); 2426 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2427 out->print_cr("%18s %s %18s %18s", 2428 "monitor", "BHL", "object", "object type"); 2429 out->print_cr("================== === ================== =================="); 2430 ObjectMonitor* cur = NULL; 2431 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 2432 // Marked the global in-use list head so process the list. 2433 while (true) { 2434 const oop obj = (oop) cur->object(); 2435 const markWord mark = cur->header(); 2436 ResourceMark rm; 2437 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(cur), 2438 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL, 2439 p2i(obj), obj->klass()->external_name()); 2440 if (cur->is_busy() != 0) { 2441 out->print(" (%s)", cur->is_busy_to_string(&ss)); 2442 ss.reset(); 2443 } 2444 out->cr(); 2445 2446 cur = lock_next_for_traversal(cur); 2447 if (cur == NULL) { 2448 break; 2449 } 2450 } 2451 } 2452 } 2453 2454 out->print_cr("In-use per-thread monitor info:"); 2455 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 2456 out->print_cr("%18s %18s %s %18s %18s", 2457 "jt", "monitor", "BHL", "object", "object type"); 2458 out->print_cr("================== ================== === ================== =================="); 2459 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2460 ObjectMonitor* cur = NULL; 2461 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 2462 // Marked the global in-use list head so process the list. 2463 while (true) { 2464 const oop obj = (oop) cur->object(); 2465 const markWord mark = cur->header(); 2466 ResourceMark rm; 2467 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT 2468 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, 2469 mark.hash() != 0, cur->owner() != NULL, p2i(obj), 2470 obj->klass()->external_name()); 2471 if (cur->is_busy() != 0) { 2472 out->print(" (%s)", cur->is_busy_to_string(&ss)); 2473 ss.reset(); 2474 } 2475 out->cr(); 2476 2477 cur = lock_next_for_traversal(cur); 2478 if (cur == NULL) { 2479 break; 2480 } 2481 } 2482 } 2483 } 2484 2485 out->flush(); 2486 } 2487 2488 // Log counts for the global and per-thread monitor lists and return 2489 // the population count. 2490 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 2491 int pop_count = 0; 2492 out->print_cr("%18s %10s %10s %10s", 2493 "Global Lists:", "InUse", "Free", "Total"); 2494 out->print_cr("================== ========== ========== =========="); 2495 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 2496 int l_free_count = Atomic::load(&om_list_globals._free_count); 2497 out->print_cr("%18s %10d %10d %10d", "", l_in_use_count, 2498 l_free_count, Atomic::load(&om_list_globals._population)); 2499 pop_count += l_in_use_count + l_free_count; 2500 2501 out->print_cr("%18s %10s %10s %10s", 2502 "Per-Thread Lists:", "InUse", "Free", "Provision"); 2503 out->print_cr("================== ========== ========== =========="); 2504 2505 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2506 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 2507 int l_om_free_count = Atomic::load(&jt->om_free_count); 2508 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 2509 l_om_in_use_count, l_om_free_count, jt->om_free_provision); 2510 pop_count += l_om_in_use_count + l_om_free_count; 2511 } 2512 return pop_count; 2513 } 2514 2515 #ifndef PRODUCT 2516 2517 // Check if monitor belongs to the monitor cache 2518 // The list is grow-only so it's *relatively* safe to traverse 2519 // the list of extant blocks without taking a lock. 2520 2521 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 2522 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 2523 while (block != NULL) { 2524 assert(block->object() == CHAINMARKER, "must be a block header"); 2525 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 2526 address mon = (address)monitor; 2527 address blk = (address)block; 2528 size_t diff = mon - blk; 2529 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 2530 return 1; 2531 } 2532 // unmarked_next() is not needed with g_block_list (no locking 2533 // used with block linkage _next_om fields). 2534 block = (PaddedObjectMonitor*)block->next_om(); 2535 } 2536 return 0; 2537 } 2538 2539 #endif