1 /* 2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/handshake.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/objectMonitor.inline.hpp" 45 #include "runtime/osThread.hpp" 46 #include "runtime/safepointMechanism.inline.hpp" 47 #include "runtime/safepointVerifiers.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "runtime/thread.inline.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/align.hpp" 56 #include "utilities/dtrace.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/preserveException.hpp" 59 60 // The "core" versions of monitor enter and exit reside in this file. 61 // The interpreter and compilers contain specialized transliterated 62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 63 // for instance. If you make changes here, make sure to modify the 64 // interpreter, and both C1 and C2 fast-path inline locking code emission. 65 // 66 // ----------------------------------------------------------------------------- 67 68 #ifdef DTRACE_ENABLED 69 70 // Only bother with this argument setup if dtrace is available 71 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 72 73 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 74 char* bytes = NULL; \ 75 int len = 0; \ 76 jlong jtid = SharedRuntime::get_java_tid(thread); \ 77 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 78 if (klassname != NULL) { \ 79 bytes = (char*)klassname->bytes(); \ 80 len = klassname->utf8_length(); \ 81 } 82 83 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 84 { \ 85 if (DTraceMonitorProbes) { \ 86 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 87 HOTSPOT_MONITOR_WAIT(jtid, \ 88 (uintptr_t)(monitor), bytes, len, (millis)); \ 89 } \ 90 } 91 92 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 93 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 94 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 95 96 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 97 { \ 98 if (DTraceMonitorProbes) { \ 99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 100 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 101 (uintptr_t)(monitor), bytes, len); \ 102 } \ 103 } 104 105 #else // ndef DTRACE_ENABLED 106 107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 109 110 #endif // ndef DTRACE_ENABLED 111 112 // This exists only as a workaround of dtrace bug 6254741 113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 115 return 0; 116 } 117 118 #define NINFLATIONLOCKS 256 119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 120 121 // global list of blocks of monitors 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 124 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 125 126 struct ObjectMonitorListGlobals { 127 char _pad_prefix[OM_CACHE_LINE_SIZE]; 128 // These are highly shared list related variables. 129 // To avoid false-sharing they need to be the sole occupants of a cache line. 130 131 // Global ObjectMonitor free list. Newly allocated and deflated 132 // ObjectMonitors are prepended here. 133 ObjectMonitor* _free_list; 134 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 135 136 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 137 // ObjectMonitors on its per-thread in-use list are prepended here. 138 ObjectMonitor* _in_use_list; 139 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 140 141 // Global ObjectMonitor wait list. Deflated ObjectMonitors wait on 142 // this list until after a handshake or a safepoint for platforms 143 // that don't support handshakes. After the handshake or safepoint, 144 // the deflated ObjectMonitors are prepended to free_list. 145 ObjectMonitor* _wait_list; 146 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 147 148 int _free_count; // # on free_list 149 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); 150 151 int _in_use_count; // # on in_use_list 152 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); 153 154 int _population; // # Extant -- in circulation 155 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int)); 156 157 int _wait_count; // # on wait_list 158 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int)); 159 }; 160 static ObjectMonitorListGlobals om_list_globals; 161 162 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 163 164 165 // =====================> Spin-lock functions 166 167 // ObjectMonitors are not lockable outside of this file. We use spin-locks 168 // implemented using a bit in the _next_om field instead of the heavier 169 // weight locking mechanisms for faster list management. 170 171 #define OM_LOCK_BIT 0x1 172 173 // Return true if the ObjectMonitor is locked. 174 // Otherwise returns false. 175 static bool is_locked(ObjectMonitor* om) { 176 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT; 177 } 178 179 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. 180 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { 181 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); 182 } 183 184 // Return the unmarked next field in an ObjectMonitor. Note: the next 185 // field may or may not have been marked with OM_LOCK_BIT originally. 186 static ObjectMonitor* unmarked_next(ObjectMonitor* om) { 187 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT); 188 } 189 190 // Try to lock an ObjectMonitor. Returns true if locking was successful. 191 // Otherwise returns false. 192 static bool try_om_lock(ObjectMonitor* om) { 193 // Get current next field without any OM_LOCK_BIT value. 194 ObjectMonitor* next = unmarked_next(om); 195 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) { 196 return false; // Cannot lock the ObjectMonitor. 197 } 198 return true; 199 } 200 201 // Lock an ObjectMonitor. 202 static void om_lock(ObjectMonitor* om) { 203 while (true) { 204 if (try_om_lock(om)) { 205 return; 206 } 207 } 208 } 209 210 // Unlock an ObjectMonitor. 211 static void om_unlock(ObjectMonitor* om) { 212 ObjectMonitor* next = om->next_om(); 213 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT 214 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); 215 216 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. 217 om->set_next_om(next); 218 } 219 220 // Get the list head after locking it. Returns the list head or NULL 221 // if the list is empty. 222 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { 223 while (true) { 224 ObjectMonitor* mid = Atomic::load(list_p); 225 if (mid == NULL) { 226 return NULL; // The list is empty. 227 } 228 if (try_om_lock(mid)) { 229 if (Atomic::load(list_p) != mid) { 230 // The list head changed before we could lock it so we have to retry. 231 om_unlock(mid); 232 continue; 233 } 234 return mid; 235 } 236 } 237 } 238 239 #undef OM_LOCK_BIT 240 241 242 // =====================> List Management functions 243 244 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is 245 // the last ObjectMonitor in the list and there are 'count' on the list. 246 // Also updates the specified *count_p. 247 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, 248 int count, ObjectMonitor** list_p, 249 int* count_p) { 250 while (true) { 251 ObjectMonitor* cur = Atomic::load(list_p); 252 // Prepend list to *list_p. 253 if (!try_om_lock(tail)) { 254 // Failed to lock tail due to a list walker so try it all again. 255 continue; 256 } 257 tail->set_next_om(cur); // tail now points to cur (and unlocks tail) 258 if (cur == NULL) { 259 // No potential race with takers or other prependers since 260 // *list_p is empty. 261 if (Atomic::cmpxchg(list_p, cur, list) == cur) { 262 // Successfully switched *list_p to the list value. 263 Atomic::add(count_p, count); 264 break; 265 } 266 // Implied else: try it all again 267 } else { 268 if (!try_om_lock(cur)) { 269 continue; // failed to lock cur so try it all again 270 } 271 // We locked cur so try to switch *list_p to the list value. 272 if (Atomic::cmpxchg(list_p, cur, list) != cur) { 273 // The list head has changed so unlock cur and try again: 274 om_unlock(cur); 275 continue; 276 } 277 Atomic::add(count_p, count); 278 om_unlock(cur); 279 break; 280 } 281 } 282 } 283 284 // Prepend a newly allocated block of ObjectMonitors to g_block_list and 285 // om_list_globals._free_list. Also updates om_list_globals._population 286 // and om_list_globals._free_count. 287 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { 288 // First we handle g_block_list: 289 while (true) { 290 PaddedObjectMonitor* cur = Atomic::load(&g_block_list); 291 // Prepend new_blk to g_block_list. The first ObjectMonitor in 292 // a block is reserved for use as linkage to the next block. 293 new_blk[0].set_next_om(cur); 294 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { 295 // Successfully switched g_block_list to the new_blk value. 296 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1); 297 break; 298 } 299 // Implied else: try it all again 300 } 301 302 // Second we handle om_list_globals._free_list: 303 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, 304 &om_list_globals._free_list, &om_list_globals._free_count); 305 } 306 307 // Prepend a list of ObjectMonitors to om_list_globals._free_list. 308 // 'tail' is the last ObjectMonitor in the list and there are 'count' 309 // on the list. Also updates om_list_globals._free_count. 310 static void prepend_list_to_global_free_list(ObjectMonitor* list, 311 ObjectMonitor* tail, int count) { 312 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 313 &om_list_globals._free_count); 314 } 315 316 // Prepend a list of ObjectMonitors to om_list_globals._wait_list. 317 // 'tail' is the last ObjectMonitor in the list and there are 'count' 318 // on the list. Also updates om_list_globals._wait_count. 319 static void prepend_list_to_global_wait_list(ObjectMonitor* list, 320 ObjectMonitor* tail, int count) { 321 prepend_list_to_common(list, tail, count, &om_list_globals._wait_list, 322 &om_list_globals._wait_count); 323 } 324 325 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list. 326 // 'tail' is the last ObjectMonitor in the list and there are 'count' 327 // on the list. Also updates om_list_globals._in_use_list. 328 static void prepend_list_to_global_in_use_list(ObjectMonitor* list, 329 ObjectMonitor* tail, int count) { 330 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list, 331 &om_list_globals._in_use_count); 332 } 333 334 // Prepend an ObjectMonitor to the specified list. Also updates 335 // the specified counter. 336 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, 337 int* count_p) { 338 while (true) { 339 om_lock(m); // Lock m so we can safely update its next field. 340 ObjectMonitor* cur = NULL; 341 // Lock the list head to guard against races with a list walker 342 // or async deflater thread (which only races in om_in_use_list): 343 if ((cur = get_list_head_locked(list_p)) != NULL) { 344 // List head is now locked so we can safely switch it. 345 m->set_next_om(cur); // m now points to cur (and unlocks m) 346 Atomic::store(list_p, m); // Switch list head to unlocked m. 347 om_unlock(cur); 348 break; 349 } 350 // The list is empty so try to set the list head. 351 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); 352 m->set_next_om(cur); // m now points to NULL (and unlocks m) 353 if (Atomic::cmpxchg(list_p, cur, m) == cur) { 354 // List head is now unlocked m. 355 break; 356 } 357 // Implied else: try it all again 358 } 359 Atomic::inc(count_p); 360 } 361 362 // Prepend an ObjectMonitor to a per-thread om_free_list. 363 // Also updates the per-thread om_free_count. 364 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) { 365 prepend_to_common(m, &self->om_free_list, &self->om_free_count); 366 } 367 368 // Prepend an ObjectMonitor to a per-thread om_in_use_list. 369 // Also updates the per-thread om_in_use_count. 370 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) { 371 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count); 372 } 373 374 // Take an ObjectMonitor from the start of the specified list. Also 375 // decrements the specified counter. Returns NULL if none are available. 376 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, 377 int* count_p) { 378 ObjectMonitor* take = NULL; 379 // Lock the list head to guard against races with a list walker 380 // or async deflater thread (which only races in om_list_globals._free_list): 381 if ((take = get_list_head_locked(list_p)) == NULL) { 382 return NULL; // None are available. 383 } 384 ObjectMonitor* next = unmarked_next(take); 385 // Switch locked list head to next (which unlocks the list head, but 386 // leaves take locked): 387 Atomic::store(list_p, next); 388 Atomic::dec(count_p); 389 // Unlock take, but leave the next value for any lagging list 390 // walkers. It will get cleaned up when take is prepended to 391 // the in-use list: 392 om_unlock(take); 393 return take; 394 } 395 396 // Take an ObjectMonitor from the start of the om_list_globals._free_list. 397 // Also updates om_list_globals._free_count. Returns NULL if none are 398 // available. 399 static ObjectMonitor* take_from_start_of_global_free_list() { 400 return take_from_start_of_common(&om_list_globals._free_list, 401 &om_list_globals._free_count); 402 } 403 404 // Take an ObjectMonitor from the start of a per-thread free-list. 405 // Also updates om_free_count. Returns NULL if none are available. 406 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) { 407 return take_from_start_of_common(&self->om_free_list, &self->om_free_count); 408 } 409 410 411 // =====================> Quick functions 412 413 // The quick_* forms are special fast-path variants used to improve 414 // performance. In the simplest case, a "quick_*" implementation could 415 // simply return false, in which case the caller will perform the necessary 416 // state transitions and call the slow-path form. 417 // The fast-path is designed to handle frequently arising cases in an efficient 418 // manner and is just a degenerate "optimistic" variant of the slow-path. 419 // returns true -- to indicate the call was satisfied. 420 // returns false -- to indicate the call needs the services of the slow-path. 421 // A no-loitering ordinance is in effect for code in the quick_* family 422 // operators: safepoints or indefinite blocking (blocking that might span a 423 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 424 // entry. 425 // 426 // Consider: An interesting optimization is to have the JIT recognize the 427 // following common idiom: 428 // synchronized (someobj) { .... ; notify(); } 429 // That is, we find a notify() or notifyAll() call that immediately precedes 430 // the monitorexit operation. In that case the JIT could fuse the operations 431 // into a single notifyAndExit() runtime primitive. 432 433 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 434 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 435 assert(self->is_Java_thread(), "invariant"); 436 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 437 NoSafepointVerifier nsv; 438 if (obj == NULL) return false; // slow-path for invalid obj 439 const markWord mark = obj->mark(); 440 441 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 442 // Degenerate notify 443 // stack-locked by caller so by definition the implied waitset is empty. 444 return true; 445 } 446 447 if (mark.has_monitor()) { 448 ObjectMonitor* const mon = mark.monitor(); 449 assert(mon->object() == obj, "invariant"); 450 if (mon->owner() != self) return false; // slow-path for IMS exception 451 452 if (mon->first_waiter() != NULL) { 453 // We have one or more waiters. Since this is an inflated monitor 454 // that we own, we can transfer one or more threads from the waitset 455 // to the entrylist here and now, avoiding the slow-path. 456 if (all) { 457 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 458 } else { 459 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 460 } 461 int free_count = 0; 462 do { 463 mon->INotify(self); 464 ++free_count; 465 } while (mon->first_waiter() != NULL && all); 466 OM_PERFDATA_OP(Notifications, inc(free_count)); 467 } 468 return true; 469 } 470 471 // biased locking and any other IMS exception states take the slow-path 472 return false; 473 } 474 475 476 // The LockNode emitted directly at the synchronization site would have 477 // been too big if it were to have included support for the cases of inflated 478 // recursive enter and exit, so they go here instead. 479 // Note that we can't safely call AsyncPrintJavaStack() from within 480 // quick_enter() as our thread state remains _in_Java. 481 482 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 483 BasicLock * lock) { 484 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 485 assert(self->is_Java_thread(), "invariant"); 486 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 487 NoSafepointVerifier nsv; 488 if (obj == NULL) return false; // Need to throw NPE 489 490 const markWord mark = obj->mark(); 491 492 if (mark.has_monitor()) { 493 ObjectMonitor* const m = mark.monitor(); 494 if (AsyncDeflateIdleMonitors) { 495 // An async deflation can race us before we manage to make the 496 // ObjectMonitor busy by setting the owner below. If we detect 497 // that race we just bail out to the slow-path here. 498 if (m->object() == NULL) { 499 return false; 500 } 501 } else { 502 assert(m->object() == obj, "invariant"); 503 } 504 Thread* const owner = (Thread *) m->_owner; 505 506 // Lock contention and Transactional Lock Elision (TLE) diagnostics 507 // and observability 508 // Case: light contention possibly amenable to TLE 509 // Case: TLE inimical operations such as nested/recursive synchronization 510 511 if (owner == self) { 512 m->_recursions++; 513 return true; 514 } 515 516 // This Java Monitor is inflated so obj's header will never be 517 // displaced to this thread's BasicLock. Make the displaced header 518 // non-NULL so this BasicLock is not seen as recursive nor as 519 // being locked. We do this unconditionally so that this thread's 520 // BasicLock cannot be mis-interpreted by any stack walkers. For 521 // performance reasons, stack walkers generally first check for 522 // Biased Locking in the object's header, the second check is for 523 // stack-locking in the object's header, the third check is for 524 // recursive stack-locking in the displaced header in the BasicLock, 525 // and last are the inflated Java Monitor (ObjectMonitor) checks. 526 lock->set_displaced_header(markWord::unused_mark()); 527 528 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) { 529 assert(m->_recursions == 0, "invariant"); 530 return true; 531 } 532 } 533 534 // Note that we could inflate in quick_enter. 535 // This is likely a useful optimization 536 // Critically, in quick_enter() we must not: 537 // -- perform bias revocation, or 538 // -- block indefinitely, or 539 // -- reach a safepoint 540 541 return false; // revert to slow-path 542 } 543 544 // ----------------------------------------------------------------------------- 545 // Monitor Enter/Exit 546 // The interpreter and compiler assembly code tries to lock using the fast path 547 // of this algorithm. Make sure to update that code if the following function is 548 // changed. The implementation is extremely sensitive to race condition. Be careful. 549 550 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 551 if (UseBiasedLocking) { 552 if (!SafepointSynchronize::is_at_safepoint()) { 553 BiasedLocking::revoke(obj, THREAD); 554 } else { 555 BiasedLocking::revoke_at_safepoint(obj); 556 } 557 } 558 559 markWord mark = obj->mark(); 560 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 561 562 if (mark.is_neutral()) { 563 // Anticipate successful CAS -- the ST of the displaced mark must 564 // be visible <= the ST performed by the CAS. 565 lock->set_displaced_header(mark); 566 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 567 return; 568 } 569 // Fall through to inflate() ... 570 } else if (mark.has_locker() && 571 THREAD->is_lock_owned((address)mark.locker())) { 572 assert(lock != mark.locker(), "must not re-lock the same lock"); 573 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 574 lock->set_displaced_header(markWord::from_pointer(NULL)); 575 return; 576 } 577 578 // The object header will never be displaced to this lock, 579 // so it does not matter what the value is, except that it 580 // must be non-zero to avoid looking like a re-entrant lock, 581 // and must not look locked either. 582 lock->set_displaced_header(markWord::unused_mark()); 583 // An async deflation can race after the inflate() call and before 584 // enter() can make the ObjectMonitor busy. enter() returns false if 585 // we have lost the race to async deflation and we simply try again. 586 while (true) { 587 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_monitor_enter); 588 if (monitor->enter(THREAD)) { 589 return; 590 } 591 } 592 } 593 594 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 595 markWord mark = object->mark(); 596 // We cannot check for Biased Locking if we are racing an inflation. 597 assert(mark == markWord::INFLATING() || 598 !mark.has_bias_pattern(), "should not see bias pattern here"); 599 600 markWord dhw = lock->displaced_header(); 601 if (dhw.value() == 0) { 602 // If the displaced header is NULL, then this exit matches up with 603 // a recursive enter. No real work to do here except for diagnostics. 604 #ifndef PRODUCT 605 if (mark != markWord::INFLATING()) { 606 // Only do diagnostics if we are not racing an inflation. Simply 607 // exiting a recursive enter of a Java Monitor that is being 608 // inflated is safe; see the has_monitor() comment below. 609 assert(!mark.is_neutral(), "invariant"); 610 assert(!mark.has_locker() || 611 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 612 if (mark.has_monitor()) { 613 // The BasicLock's displaced_header is marked as a recursive 614 // enter and we have an inflated Java Monitor (ObjectMonitor). 615 // This is a special case where the Java Monitor was inflated 616 // after this thread entered the stack-lock recursively. When a 617 // Java Monitor is inflated, we cannot safely walk the Java 618 // Monitor owner's stack and update the BasicLocks because a 619 // Java Monitor can be asynchronously inflated by a thread that 620 // does not own the Java Monitor. 621 ObjectMonitor* m = mark.monitor(); 622 assert(((oop)(m->object()))->mark() == mark, "invariant"); 623 assert(m->is_entered(THREAD), "invariant"); 624 } 625 } 626 #endif 627 return; 628 } 629 630 if (mark == markWord::from_pointer(lock)) { 631 // If the object is stack-locked by the current thread, try to 632 // swing the displaced header from the BasicLock back to the mark. 633 assert(dhw.is_neutral(), "invariant"); 634 if (object->cas_set_mark(dhw, mark) == mark) { 635 return; 636 } 637 } 638 639 // We have to take the slow-path of possible inflation and then exit. 640 // The ObjectMonitor* can't be async deflated until ownership is 641 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 642 ObjectMonitor* monitor = inflate(THREAD, object, inflate_cause_vm_internal); 643 monitor->exit(true, THREAD); 644 } 645 646 // ----------------------------------------------------------------------------- 647 // Class Loader support to workaround deadlocks on the class loader lock objects 648 // Also used by GC 649 // complete_exit()/reenter() are used to wait on a nested lock 650 // i.e. to give up an outer lock completely and then re-enter 651 // Used when holding nested locks - lock acquisition order: lock1 then lock2 652 // 1) complete_exit lock1 - saving recursion count 653 // 2) wait on lock2 654 // 3) when notified on lock2, unlock lock2 655 // 4) reenter lock1 with original recursion count 656 // 5) lock lock2 657 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 658 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 659 if (UseBiasedLocking) { 660 BiasedLocking::revoke(obj, THREAD); 661 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 662 } 663 664 // The ObjectMonitor* can't be async deflated until ownership is 665 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 666 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 667 intptr_t ret_code = monitor->complete_exit(THREAD); 668 return ret_code; 669 } 670 671 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 672 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) { 673 if (UseBiasedLocking) { 674 BiasedLocking::revoke(obj, THREAD); 675 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 676 } 677 678 // An async deflation can race after the inflate() call and before 679 // reenter() -> enter() can make the ObjectMonitor busy. reenter() -> 680 // enter() returns false if we have lost the race to async deflation 681 // and we simply try again. 682 while (true) { 683 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 684 if (monitor->reenter(recursions, THREAD)) { 685 return; 686 } 687 } 688 } 689 690 // ----------------------------------------------------------------------------- 691 // JNI locks on java objects 692 // NOTE: must use heavy weight monitor to handle jni monitor enter 693 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 694 // the current locking is from JNI instead of Java code 695 if (UseBiasedLocking) { 696 BiasedLocking::revoke(obj, THREAD); 697 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 698 } 699 THREAD->set_current_pending_monitor_is_from_java(false); 700 // An async deflation can race after the inflate() call and before 701 // enter() can make the ObjectMonitor busy. enter() returns false if 702 // we have lost the race to async deflation and we simply try again. 703 while (true) { 704 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_jni_enter); 705 if (monitor->enter(THREAD)) { 706 break; 707 } 708 } 709 THREAD->set_current_pending_monitor_is_from_java(true); 710 } 711 712 // NOTE: must use heavy weight monitor to handle jni monitor exit 713 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 714 if (UseBiasedLocking) { 715 Handle h_obj(THREAD, obj); 716 BiasedLocking::revoke(h_obj, THREAD); 717 obj = h_obj(); 718 } 719 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 720 721 // The ObjectMonitor* can't be async deflated until ownership is 722 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 723 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); 724 // If this thread has locked the object, exit the monitor. We 725 // intentionally do not use CHECK here because we must exit the 726 // monitor even if an exception is pending. 727 if (monitor->check_owner(THREAD)) { 728 monitor->exit(true, THREAD); 729 } 730 } 731 732 // ----------------------------------------------------------------------------- 733 // Internal VM locks on java objects 734 // standard constructor, allows locking failures 735 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 736 _dolock = do_lock; 737 _thread = thread; 738 _thread->check_for_valid_safepoint_state(); 739 _obj = obj; 740 741 if (_dolock) { 742 ObjectSynchronizer::enter(_obj, &_lock, _thread); 743 } 744 } 745 746 ObjectLocker::~ObjectLocker() { 747 if (_dolock) { 748 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 749 } 750 } 751 752 753 // ----------------------------------------------------------------------------- 754 // Wait/Notify/NotifyAll 755 // NOTE: must use heavy weight monitor to handle wait() 756 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 757 if (UseBiasedLocking) { 758 BiasedLocking::revoke(obj, THREAD); 759 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 760 } 761 if (millis < 0) { 762 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 763 } 764 // The ObjectMonitor* can't be async deflated because the _waiters 765 // field is incremented before ownership is dropped and decremented 766 // after ownership is regained. 767 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 768 769 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 770 monitor->wait(millis, true, THREAD); 771 772 // This dummy call is in place to get around dtrace bug 6254741. Once 773 // that's fixed we can uncomment the following line, remove the call 774 // and change this function back into a "void" func. 775 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 776 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 777 return ret_code; 778 } 779 780 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 781 if (UseBiasedLocking) { 782 BiasedLocking::revoke(obj, THREAD); 783 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 784 } 785 if (millis < 0) { 786 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 787 } 788 // The ObjectMonitor* can't be async deflated because the _waiters 789 // field is incremented before ownership is dropped and decremented 790 // after ownership is regained. 791 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 792 monitor->wait(millis, false, THREAD); 793 } 794 795 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 796 if (UseBiasedLocking) { 797 BiasedLocking::revoke(obj, THREAD); 798 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 799 } 800 801 markWord mark = obj->mark(); 802 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 803 return; 804 } 805 // The ObjectMonitor* can't be async deflated until ownership is 806 // dropped by the calling thread. 807 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 808 monitor->notify(THREAD); 809 } 810 811 // NOTE: see comment of notify() 812 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 813 if (UseBiasedLocking) { 814 BiasedLocking::revoke(obj, THREAD); 815 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 816 } 817 818 markWord mark = obj->mark(); 819 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 820 return; 821 } 822 // The ObjectMonitor* can't be async deflated until ownership is 823 // dropped by the calling thread. 824 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 825 monitor->notifyAll(THREAD); 826 } 827 828 // ----------------------------------------------------------------------------- 829 // Hash Code handling 830 // 831 // Performance concern: 832 // OrderAccess::storestore() calls release() which at one time stored 0 833 // into the global volatile OrderAccess::dummy variable. This store was 834 // unnecessary for correctness. Many threads storing into a common location 835 // causes considerable cache migration or "sloshing" on large SMP systems. 836 // As such, I avoided using OrderAccess::storestore(). In some cases 837 // OrderAccess::fence() -- which incurs local latency on the executing 838 // processor -- is a better choice as it scales on SMP systems. 839 // 840 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 841 // a discussion of coherency costs. Note that all our current reference 842 // platforms provide strong ST-ST order, so the issue is moot on IA32, 843 // x64, and SPARC. 844 // 845 // As a general policy we use "volatile" to control compiler-based reordering 846 // and explicit fences (barriers) to control for architectural reordering 847 // performed by the CPU(s) or platform. 848 849 struct SharedGlobals { 850 char _pad_prefix[OM_CACHE_LINE_SIZE]; 851 // These are highly shared mostly-read variables. 852 // To avoid false-sharing they need to be the sole occupants of a cache line. 853 volatile int stw_random; 854 volatile int stw_cycle; 855 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 856 // Hot RW variable -- Sequester to avoid false-sharing 857 volatile int hc_sequence; 858 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 859 }; 860 861 static SharedGlobals GVars; 862 863 static markWord read_stable_mark(oop obj) { 864 markWord mark = obj->mark(); 865 if (!mark.is_being_inflated()) { 866 return mark; // normal fast-path return 867 } 868 869 int its = 0; 870 for (;;) { 871 markWord mark = obj->mark(); 872 if (!mark.is_being_inflated()) { 873 return mark; // normal fast-path return 874 } 875 876 // The object is being inflated by some other thread. 877 // The caller of read_stable_mark() must wait for inflation to complete. 878 // Avoid live-lock 879 // TODO: consider calling SafepointSynchronize::do_call_back() while 880 // spinning to see if there's a safepoint pending. If so, immediately 881 // yielding or blocking would be appropriate. Avoid spinning while 882 // there is a safepoint pending. 883 // TODO: add inflation contention performance counters. 884 // TODO: restrict the aggregate number of spinners. 885 886 ++its; 887 if (its > 10000 || !os::is_MP()) { 888 if (its & 1) { 889 os::naked_yield(); 890 } else { 891 // Note that the following code attenuates the livelock problem but is not 892 // a complete remedy. A more complete solution would require that the inflating 893 // thread hold the associated inflation lock. The following code simply restricts 894 // the number of spinners to at most one. We'll have N-2 threads blocked 895 // on the inflationlock, 1 thread holding the inflation lock and using 896 // a yield/park strategy, and 1 thread in the midst of inflation. 897 // A more refined approach would be to change the encoding of INFLATING 898 // to allow encapsulation of a native thread pointer. Threads waiting for 899 // inflation to complete would use CAS to push themselves onto a singly linked 900 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 901 // and calling park(). When inflation was complete the thread that accomplished inflation 902 // would detach the list and set the markword to inflated with a single CAS and 903 // then for each thread on the list, set the flag and unpark() the thread. 904 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 905 // wakes at most one thread whereas we need to wake the entire list. 906 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 907 int YieldThenBlock = 0; 908 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 909 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 910 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 911 while (obj->mark() == markWord::INFLATING()) { 912 // Beware: NakedYield() is advisory and has almost no effect on some platforms 913 // so we periodically call self->_ParkEvent->park(1). 914 // We use a mixed spin/yield/block mechanism. 915 if ((YieldThenBlock++) >= 16) { 916 Thread::current()->_ParkEvent->park(1); 917 } else { 918 os::naked_yield(); 919 } 920 } 921 Thread::muxRelease(gInflationLocks + ix); 922 } 923 } else { 924 SpinPause(); // SMP-polite spinning 925 } 926 } 927 } 928 929 // hashCode() generation : 930 // 931 // Possibilities: 932 // * MD5Digest of {obj,stw_random} 933 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 934 // * A DES- or AES-style SBox[] mechanism 935 // * One of the Phi-based schemes, such as: 936 // 2654435761 = 2^32 * Phi (golden ratio) 937 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 938 // * A variation of Marsaglia's shift-xor RNG scheme. 939 // * (obj ^ stw_random) is appealing, but can result 940 // in undesirable regularity in the hashCode values of adjacent objects 941 // (objects allocated back-to-back, in particular). This could potentially 942 // result in hashtable collisions and reduced hashtable efficiency. 943 // There are simple ways to "diffuse" the middle address bits over the 944 // generated hashCode values: 945 946 static inline intptr_t get_next_hash(Thread* self, oop obj) { 947 intptr_t value = 0; 948 if (hashCode == 0) { 949 // This form uses global Park-Miller RNG. 950 // On MP system we'll have lots of RW access to a global, so the 951 // mechanism induces lots of coherency traffic. 952 value = os::random(); 953 } else if (hashCode == 1) { 954 // This variation has the property of being stable (idempotent) 955 // between STW operations. This can be useful in some of the 1-0 956 // synchronization schemes. 957 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 958 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 959 } else if (hashCode == 2) { 960 value = 1; // for sensitivity testing 961 } else if (hashCode == 3) { 962 value = ++GVars.hc_sequence; 963 } else if (hashCode == 4) { 964 value = cast_from_oop<intptr_t>(obj); 965 } else { 966 // Marsaglia's xor-shift scheme with thread-specific state 967 // This is probably the best overall implementation -- we'll 968 // likely make this the default in future releases. 969 unsigned t = self->_hashStateX; 970 t ^= (t << 11); 971 self->_hashStateX = self->_hashStateY; 972 self->_hashStateY = self->_hashStateZ; 973 self->_hashStateZ = self->_hashStateW; 974 unsigned v = self->_hashStateW; 975 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 976 self->_hashStateW = v; 977 value = v; 978 } 979 980 value &= markWord::hash_mask; 981 if (value == 0) value = 0xBAD; 982 assert(value != markWord::no_hash, "invariant"); 983 return value; 984 } 985 986 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 987 if (UseBiasedLocking) { 988 // NOTE: many places throughout the JVM do not expect a safepoint 989 // to be taken here, in particular most operations on perm gen 990 // objects. However, we only ever bias Java instances and all of 991 // the call sites of identity_hash that might revoke biases have 992 // been checked to make sure they can handle a safepoint. The 993 // added check of the bias pattern is to avoid useless calls to 994 // thread-local storage. 995 if (obj->mark().has_bias_pattern()) { 996 // Handle for oop obj in case of STW safepoint 997 Handle hobj(self, obj); 998 // Relaxing assertion for bug 6320749. 999 assert(Universe::verify_in_progress() || 1000 !SafepointSynchronize::is_at_safepoint(), 1001 "biases should not be seen by VM thread here"); 1002 BiasedLocking::revoke(hobj, JavaThread::current()); 1003 obj = hobj(); 1004 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1005 } 1006 } 1007 1008 // hashCode() is a heap mutator ... 1009 // Relaxing assertion for bug 6320749. 1010 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1011 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1012 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1013 self->is_Java_thread() , "invariant"); 1014 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1015 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 1016 1017 while (true) { 1018 ObjectMonitor* monitor = NULL; 1019 markWord temp, test; 1020 intptr_t hash; 1021 markWord mark = read_stable_mark(obj); 1022 1023 // object should remain ineligible for biased locking 1024 assert(!mark.has_bias_pattern(), "invariant"); 1025 1026 if (mark.is_neutral()) { // if this is a normal header 1027 hash = mark.hash(); 1028 if (hash != 0) { // if it has a hash, just return it 1029 return hash; 1030 } 1031 hash = get_next_hash(self, obj); // get a new hash 1032 temp = mark.copy_set_hash(hash); // merge the hash into header 1033 // try to install the hash 1034 test = obj->cas_set_mark(temp, mark); 1035 if (test == mark) { // if the hash was installed, return it 1036 return hash; 1037 } 1038 // Failed to install the hash. It could be that another thread 1039 // installed the hash just before our attempt or inflation has 1040 // occurred or... so we fall thru to inflate the monitor for 1041 // stability and then install the hash. 1042 } else if (mark.has_monitor()) { 1043 monitor = mark.monitor(); 1044 temp = monitor->header(); 1045 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1046 hash = temp.hash(); 1047 if (hash != 0) { 1048 // It has a hash. 1049 1050 // Separate load of dmw/header above from the loads in 1051 // is_being_async_deflated(). 1052 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1053 // A non-multiple copy atomic (nMCA) machine needs a bigger 1054 // hammer to separate the load above and the loads below. 1055 OrderAccess::fence(); 1056 } else { 1057 OrderAccess::loadload(); 1058 } 1059 if (monitor->is_being_async_deflated()) { 1060 // But we can't safely use the hash if we detect that async 1061 // deflation has occurred. So we attempt to restore the 1062 // header/dmw to the object's header so that we only retry 1063 // once if the deflater thread happens to be slow. 1064 monitor->install_displaced_markword_in_object(obj); 1065 continue; 1066 } 1067 return hash; 1068 } 1069 // Fall thru so we only have one place that installs the hash in 1070 // the ObjectMonitor. 1071 } else if (self->is_lock_owned((address)mark.locker())) { 1072 // This is a stack lock owned by the calling thread so fetch the 1073 // displaced markWord from the BasicLock on the stack. 1074 temp = mark.displaced_mark_helper(); 1075 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1076 hash = temp.hash(); 1077 if (hash != 0) { // if it has a hash, just return it 1078 return hash; 1079 } 1080 // WARNING: 1081 // The displaced header in the BasicLock on a thread's stack 1082 // is strictly immutable. It CANNOT be changed in ANY cases. 1083 // So we have to inflate the stack lock into an ObjectMonitor 1084 // even if the current thread owns the lock. The BasicLock on 1085 // a thread's stack can be asynchronously read by other threads 1086 // during an inflate() call so any change to that stack memory 1087 // may not propagate to other threads correctly. 1088 } 1089 1090 // Inflate the monitor to set the hash. 1091 1092 // An async deflation can race after the inflate() call and before we 1093 // can update the ObjectMonitor's header with the hash value below. 1094 monitor = inflate(self, obj, inflate_cause_hash_code); 1095 // Load ObjectMonitor's header/dmw field and see if it has a hash. 1096 mark = monitor->header(); 1097 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1098 hash = mark.hash(); 1099 if (hash == 0) { // if it does not have a hash 1100 hash = get_next_hash(self, obj); // get a new hash 1101 temp = mark.copy_set_hash(hash); // merge the hash into header 1102 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1103 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1104 test = markWord(v); 1105 if (test != mark) { 1106 // The attempt to update the ObjectMonitor's header/dmw field 1107 // did not work. This can happen if another thread managed to 1108 // merge in the hash just before our cmpxchg(). 1109 // If we add any new usages of the header/dmw field, this code 1110 // will need to be updated. 1111 hash = test.hash(); 1112 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1113 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1114 } 1115 if (monitor->is_being_async_deflated()) { 1116 // If we detect that async deflation has occurred, then we 1117 // attempt to restore the header/dmw to the object's header 1118 // so that we only retry once if the deflater thread happens 1119 // to be slow. 1120 monitor->install_displaced_markword_in_object(obj); 1121 continue; 1122 } 1123 } 1124 // We finally get the hash. 1125 return hash; 1126 } 1127 } 1128 1129 // Deprecated -- use FastHashCode() instead. 1130 1131 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1132 return FastHashCode(Thread::current(), obj()); 1133 } 1134 1135 1136 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1137 Handle h_obj) { 1138 if (UseBiasedLocking) { 1139 BiasedLocking::revoke(h_obj, thread); 1140 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1141 } 1142 1143 assert(thread == JavaThread::current(), "Can only be called on current thread"); 1144 oop obj = h_obj(); 1145 1146 markWord mark = read_stable_mark(obj); 1147 1148 // Uncontended case, header points to stack 1149 if (mark.has_locker()) { 1150 return thread->is_lock_owned((address)mark.locker()); 1151 } 1152 // Contended case, header points to ObjectMonitor (tagged pointer) 1153 if (mark.has_monitor()) { 1154 // The first stage of async deflation does not affect any field 1155 // used by this comparison so the ObjectMonitor* is usable here. 1156 ObjectMonitor* monitor = mark.monitor(); 1157 return monitor->is_entered(thread) != 0; 1158 } 1159 // Unlocked case, header in place 1160 assert(mark.is_neutral(), "sanity check"); 1161 return false; 1162 } 1163 1164 // Be aware of this method could revoke bias of the lock object. 1165 // This method queries the ownership of the lock handle specified by 'h_obj'. 1166 // If the current thread owns the lock, it returns owner_self. If no 1167 // thread owns the lock, it returns owner_none. Otherwise, it will return 1168 // owner_other. 1169 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1170 (JavaThread *self, Handle h_obj) { 1171 // The caller must beware this method can revoke bias, and 1172 // revocation can result in a safepoint. 1173 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 1174 assert(self->thread_state() != _thread_blocked, "invariant"); 1175 1176 // Possible mark states: neutral, biased, stack-locked, inflated 1177 1178 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 1179 // CASE: biased 1180 BiasedLocking::revoke(h_obj, self); 1181 assert(!h_obj->mark().has_bias_pattern(), 1182 "biases should be revoked by now"); 1183 } 1184 1185 assert(self == JavaThread::current(), "Can only be called on current thread"); 1186 oop obj = h_obj(); 1187 markWord mark = read_stable_mark(obj); 1188 1189 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1190 if (mark.has_locker()) { 1191 return self->is_lock_owned((address)mark.locker()) ? 1192 owner_self : owner_other; 1193 } 1194 1195 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 1196 // The Object:ObjectMonitor relationship is stable as long as we're 1197 // not at a safepoint and AsyncDeflateIdleMonitors is false. 1198 if (mark.has_monitor()) { 1199 // The first stage of async deflation does not affect any field 1200 // used by this comparison so the ObjectMonitor* is usable here. 1201 ObjectMonitor* monitor = mark.monitor(); 1202 void* owner = monitor->owner(); 1203 if (owner == NULL) return owner_none; 1204 return (owner == self || 1205 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1206 } 1207 1208 // CASE: neutral 1209 assert(mark.is_neutral(), "sanity check"); 1210 return owner_none; // it's unlocked 1211 } 1212 1213 // FIXME: jvmti should call this 1214 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1215 if (UseBiasedLocking) { 1216 if (SafepointSynchronize::is_at_safepoint()) { 1217 BiasedLocking::revoke_at_safepoint(h_obj); 1218 } else { 1219 BiasedLocking::revoke(h_obj, JavaThread::current()); 1220 } 1221 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1222 } 1223 1224 oop obj = h_obj(); 1225 address owner = NULL; 1226 1227 markWord mark = read_stable_mark(obj); 1228 1229 // Uncontended case, header points to stack 1230 if (mark.has_locker()) { 1231 owner = (address) mark.locker(); 1232 } 1233 1234 // Contended case, header points to ObjectMonitor (tagged pointer) 1235 else if (mark.has_monitor()) { 1236 // The first stage of async deflation does not affect any field 1237 // used by this comparison so the ObjectMonitor* is usable here. 1238 ObjectMonitor* monitor = mark.monitor(); 1239 assert(monitor != NULL, "monitor should be non-null"); 1240 owner = (address) monitor->owner(); 1241 } 1242 1243 if (owner != NULL) { 1244 // owning_thread_from_monitor_owner() may also return NULL here 1245 return Threads::owning_thread_from_monitor_owner(t_list, owner); 1246 } 1247 1248 // Unlocked case, header in place 1249 // Cannot have assertion since this object may have been 1250 // locked by another thread when reaching here. 1251 // assert(mark.is_neutral(), "sanity check"); 1252 1253 return NULL; 1254 } 1255 1256 // Visitors ... 1257 1258 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1259 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 1260 while (block != NULL) { 1261 assert(block->object() == CHAINMARKER, "must be a block header"); 1262 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1263 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1264 if (mid->object() != NULL) { 1265 // Only process with closure if the object is set. 1266 1267 // monitors_iterate() is only called at a safepoint or when the 1268 // target thread is suspended or when the target thread is 1269 // operating on itself. The current closures in use today are 1270 // only interested in an owned ObjectMonitor and ownership 1271 // cannot be dropped under the calling contexts so the 1272 // ObjectMonitor cannot be async deflated. 1273 closure->do_monitor(mid); 1274 } 1275 } 1276 // unmarked_next() is not needed with g_block_list (no locking 1277 // used with block linkage _next_om fields). 1278 block = (PaddedObjectMonitor*)block->next_om(); 1279 } 1280 } 1281 1282 static bool monitors_used_above_threshold() { 1283 int population = Atomic::load(&om_list_globals._population); 1284 if (population == 0) { 1285 return false; 1286 } 1287 if (MonitorUsedDeflationThreshold > 0) { 1288 int monitors_used = population - Atomic::load(&om_list_globals._free_count) - 1289 Atomic::load(&om_list_globals._wait_count); 1290 int monitor_usage = (monitors_used * 100LL) / population; 1291 return monitor_usage > MonitorUsedDeflationThreshold; 1292 } 1293 return false; 1294 } 1295 1296 bool ObjectSynchronizer::is_async_deflation_needed() { 1297 if (!AsyncDeflateIdleMonitors) { 1298 return false; 1299 } 1300 if (is_async_deflation_requested()) { 1301 // Async deflation request. 1302 return true; 1303 } 1304 if (AsyncDeflationInterval > 0 && 1305 time_since_last_async_deflation_ms() > AsyncDeflationInterval && 1306 monitors_used_above_threshold()) { 1307 // It's been longer than our specified deflate interval and there 1308 // are too many monitors in use. We don't deflate more frequently 1309 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1310 // in order to not swamp the ServiceThread. 1311 return true; 1312 } 1313 return false; 1314 } 1315 1316 bool ObjectSynchronizer::is_safepoint_deflation_needed() { 1317 if (!AsyncDeflateIdleMonitors) { 1318 if (monitors_used_above_threshold()) { 1319 // Too many monitors in use. 1320 return true; 1321 } 1322 return false; 1323 } 1324 return false; 1325 } 1326 1327 bool ObjectSynchronizer::request_deflate_idle_monitors() { 1328 bool is_JavaThread = Thread::current()->is_Java_thread(); 1329 bool ret_code = false; 1330 1331 if (AsyncDeflateIdleMonitors) { 1332 jlong last_time = last_async_deflation_time_ns(); 1333 set_is_async_deflation_requested(true); 1334 { 1335 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 1336 ml.notify_all(); 1337 } 1338 const int N_CHECKS = 5; 1339 for (int i = 0; i < N_CHECKS; i++) { // sleep for at most 5 seconds 1340 if (last_async_deflation_time_ns() > last_time) { 1341 log_info(monitorinflation)("Async Deflation happened after %d check(s).", i); 1342 ret_code = true; 1343 break; 1344 } 1345 if (is_JavaThread) { 1346 // JavaThread has to honor the blocking protocol. 1347 ThreadBlockInVM tbivm(JavaThread::current()); 1348 os::naked_short_sleep(999); // sleep for almost 1 second 1349 } else { 1350 os::naked_short_sleep(999); // sleep for almost 1 second 1351 } 1352 } 1353 if (ret_code == false) { 1354 log_info(monitorinflation)("Async Deflation DID NOT happen after %d checks.", N_CHECKS); 1355 } 1356 } else if (!Thread::current()->is_VM_thread()) { 1357 // The VMThread only calls this at shutdown time before the final 1358 // safepoint so it should not need to force this safepoint. 1359 VM_ForceSafepoint force_safepoint_op; 1360 VMThread::execute(&force_safepoint_op); 1361 ret_code = true; 1362 } 1363 1364 return ret_code; 1365 } 1366 1367 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1368 return (os::javaTimeNanos() - last_async_deflation_time_ns()) / (NANOUNITS / MILLIUNITS); 1369 } 1370 1371 void ObjectSynchronizer::oops_do(OopClosure* f) { 1372 // We only scan the global used list here (for moribund threads), and 1373 // the thread-local monitors in Thread::oops_do(). 1374 global_used_oops_do(f); 1375 } 1376 1377 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1378 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1379 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f); 1380 } 1381 1382 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1383 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1384 list_oops_do(thread->om_in_use_list, f); 1385 } 1386 1387 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1388 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1389 // The oops_do() phase does not overlap with monitor deflation 1390 // so no need to lock ObjectMonitors for the list traversal. 1391 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { 1392 if (mid->object() != NULL) { 1393 f->do_oop((oop*)mid->object_addr()); 1394 } 1395 } 1396 } 1397 1398 1399 // ----------------------------------------------------------------------------- 1400 // ObjectMonitor Lifecycle 1401 // ----------------------- 1402 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread 1403 // free list and associates them with objects. Deflation -- which occurs at 1404 // STW-time or asynchronously -- disassociates idle monitors from objects. 1405 // Such scavenged monitors are returned to the om_list_globals._free_list. 1406 // 1407 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1408 // 1409 // Lifecycle: 1410 // -- unassigned and on the om_list_globals._free_list 1411 // -- unassigned and on a per-thread free list 1412 // -- assigned to an object. The object is inflated and the mark refers 1413 // to the ObjectMonitor. 1414 1415 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { 1416 // A large MAXPRIVATE value reduces both list lock contention 1417 // and list coherency traffic, but also tends to increase the 1418 // number of ObjectMonitors in circulation as well as the STW 1419 // scavenge costs. As usual, we lean toward time in space-time 1420 // tradeoffs. 1421 const int MAXPRIVATE = 1024; 1422 NoSafepointVerifier nsv; 1423 1424 for (;;) { 1425 ObjectMonitor* m; 1426 1427 // 1: try to allocate from the thread's local om_free_list. 1428 // Threads will attempt to allocate first from their local list, then 1429 // from the global list, and only after those attempts fail will the 1430 // thread attempt to instantiate new monitors. Thread-local free lists 1431 // improve allocation latency, as well as reducing coherency traffic 1432 // on the shared global list. 1433 m = take_from_start_of_om_free_list(self); 1434 if (m != NULL) { 1435 guarantee(m->object() == NULL, "invariant"); 1436 m->set_allocation_state(ObjectMonitor::New); 1437 prepend_to_om_in_use_list(self, m); 1438 return m; 1439 } 1440 1441 // 2: try to allocate from the global om_list_globals._free_list 1442 // If we're using thread-local free lists then try 1443 // to reprovision the caller's free list. 1444 if (Atomic::load(&om_list_globals._free_list) != NULL) { 1445 // Reprovision the thread's om_free_list. 1446 // Use bulk transfers to reduce the allocation rate and heat 1447 // on various locks. 1448 for (int i = self->om_free_provision; --i >= 0;) { 1449 ObjectMonitor* take = take_from_start_of_global_free_list(); 1450 if (take == NULL) { 1451 break; // No more are available. 1452 } 1453 guarantee(take->object() == NULL, "invariant"); 1454 if (AsyncDeflateIdleMonitors) { 1455 // We allowed 3 field values to linger during async deflation. 1456 // Clear or restore them as appropriate. 1457 take->set_header(markWord::zero()); 1458 // DEFLATER_MARKER is the only non-NULL value we should see here. 1459 take->try_set_owner_from(DEFLATER_MARKER, NULL); 1460 if (take->contentions() < 0) { 1461 // Add back max_jint to restore the contentions field to its 1462 // proper value. 1463 take->add_to_contentions(max_jint); 1464 1465 #ifdef ASSERT 1466 jint l_contentions = take->contentions(); 1467 #endif 1468 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d", 1469 l_contentions, take->contentions()); 1470 } 1471 } 1472 take->Recycle(); 1473 // Since we're taking from the global free-list, take must be Free. 1474 // om_release() also sets the allocation state to Free because it 1475 // is called from other code paths. 1476 assert(take->is_free(), "invariant"); 1477 om_release(self, take, false); 1478 } 1479 self->om_free_provision += 1 + (self->om_free_provision / 2); 1480 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1481 continue; 1482 } 1483 1484 // 3: allocate a block of new ObjectMonitors 1485 // Both the local and global free lists are empty -- resort to malloc(). 1486 // In the current implementation ObjectMonitors are TSM - immortal. 1487 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1488 // each ObjectMonitor to start at the beginning of a cache line, 1489 // so we use align_up(). 1490 // A better solution would be to use C++ placement-new. 1491 // BEWARE: As it stands currently, we don't run the ctors! 1492 assert(_BLOCKSIZE > 1, "invariant"); 1493 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1494 PaddedObjectMonitor* temp; 1495 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1496 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); 1497 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1498 (void)memset((void *) temp, 0, neededsize); 1499 1500 // Format the block. 1501 // initialize the linked list, each monitor points to its next 1502 // forming the single linked free list, the very first monitor 1503 // will points to next block, which forms the block list. 1504 // The trick of using the 1st element in the block as g_block_list 1505 // linkage should be reconsidered. A better implementation would 1506 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1507 1508 for (int i = 1; i < _BLOCKSIZE; i++) { 1509 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]); 1510 assert(temp[i].is_free(), "invariant"); 1511 } 1512 1513 // terminate the last monitor as the end of list 1514 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL); 1515 1516 // Element [0] is reserved for global list linkage 1517 temp[0].set_object(CHAINMARKER); 1518 1519 // Consider carving out this thread's current request from the 1520 // block in hand. This avoids some lock traffic and redundant 1521 // list activity. 1522 1523 prepend_block_to_lists(temp); 1524 } 1525 } 1526 1527 // Place "m" on the caller's private per-thread om_free_list. 1528 // In practice there's no need to clamp or limit the number of 1529 // monitors on a thread's om_free_list as the only non-allocation time 1530 // we'll call om_release() is to return a monitor to the free list after 1531 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1532 // accumulate on a thread's free list. 1533 // 1534 // Key constraint: all ObjectMonitors on a thread's free list and the global 1535 // free list must have their object field set to null. This prevents the 1536 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1537 // -- from reclaiming them while we are trying to release them. 1538 1539 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1540 bool from_per_thread_alloc) { 1541 guarantee(m->header().value() == 0, "invariant"); 1542 guarantee(m->object() == NULL, "invariant"); 1543 NoSafepointVerifier nsv; 1544 1545 if ((m->is_busy() | m->_recursions) != 0) { 1546 stringStream ss; 1547 fatal("freeing in-use monitor: %s, recursions=" INTX_FORMAT, 1548 m->is_busy_to_string(&ss), m->_recursions); 1549 } 1550 m->set_allocation_state(ObjectMonitor::Free); 1551 // _next_om is used for both per-thread in-use and free lists so 1552 // we have to remove 'm' from the in-use list first (as needed). 1553 if (from_per_thread_alloc) { 1554 // Need to remove 'm' from om_in_use_list. 1555 ObjectMonitor* mid = NULL; 1556 ObjectMonitor* next = NULL; 1557 1558 // This list walk can race with another list walker or with async 1559 // deflation so we have to worry about an ObjectMonitor being 1560 // removed from this list while we are walking it. 1561 1562 // Lock the list head to avoid racing with another list walker 1563 // or with async deflation. 1564 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { 1565 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); 1566 } 1567 next = unmarked_next(mid); 1568 if (m == mid) { 1569 // First special case: 1570 // 'm' matches mid, is the list head and is locked. Switch the list 1571 // head to next which unlocks the list head, but leaves the extracted 1572 // mid locked: 1573 Atomic::store(&self->om_in_use_list, next); 1574 } else if (m == next) { 1575 // Second special case: 1576 // 'm' matches next after the list head and we already have the list 1577 // head locked so set mid to what we are extracting: 1578 mid = next; 1579 // Lock mid to prevent races with a list walker or an async 1580 // deflater thread that's ahead of us. The locked list head 1581 // prevents races from behind us. 1582 om_lock(mid); 1583 // Update next to what follows mid (if anything): 1584 next = unmarked_next(mid); 1585 // Switch next after the list head to new next which unlocks the 1586 // list head, but leaves the extracted mid locked: 1587 self->om_in_use_list->set_next_om(next); 1588 } else { 1589 // We have to search the list to find 'm'. 1590 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT 1591 " is too short.", p2i(self), p2i(self->om_in_use_list)); 1592 // Our starting anchor is next after the list head which is the 1593 // last ObjectMonitor we checked: 1594 ObjectMonitor* anchor = next; 1595 // Lock anchor to prevent races with a list walker or an async 1596 // deflater thread that's ahead of us. The locked list head 1597 // prevents races from behind us. 1598 om_lock(anchor); 1599 om_unlock(mid); // Unlock the list head now that anchor is locked. 1600 while ((mid = unmarked_next(anchor)) != NULL) { 1601 if (m == mid) { 1602 // We found 'm' on the per-thread in-use list so extract it. 1603 // Update next to what follows mid (if anything): 1604 next = unmarked_next(mid); 1605 // Switch next after the anchor to new next which unlocks the 1606 // anchor, but leaves the extracted mid locked: 1607 anchor->set_next_om(next); 1608 break; 1609 } else { 1610 // Lock the next anchor to prevent races with a list walker 1611 // or an async deflater thread that's ahead of us. The locked 1612 // current anchor prevents races from behind us. 1613 om_lock(mid); 1614 // Unlock current anchor now that next anchor is locked: 1615 om_unlock(anchor); 1616 anchor = mid; // Advance to new anchor and try again. 1617 } 1618 } 1619 } 1620 1621 if (mid == NULL) { 1622 // Reached end of the list and didn't find 'm' so: 1623 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list=" 1624 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list)); 1625 } 1626 1627 // At this point mid is disconnected from the in-use list so 1628 // its lock no longer has any effects on the in-use list. 1629 Atomic::dec(&self->om_in_use_count); 1630 // Unlock mid, but leave the next value for any lagging list 1631 // walkers. It will get cleaned up when mid is prepended to 1632 // the thread's free list: 1633 om_unlock(mid); 1634 } 1635 1636 prepend_to_om_free_list(self, m); 1637 guarantee(m->is_free(), "invariant"); 1638 } 1639 1640 // Return ObjectMonitors on a moribund thread's free and in-use 1641 // lists to the appropriate global lists. The ObjectMonitors on the 1642 // per-thread in-use list may still be in use by other threads. 1643 // 1644 // We currently call om_flush() from Threads::remove() before the 1645 // thread has been excised from the thread list and is no longer a 1646 // mutator. This means that om_flush() cannot run concurrently with 1647 // a safepoint and interleave with deflate_idle_monitors(). In 1648 // particular, this ensures that the thread's in-use monitors are 1649 // scanned by a GC safepoint, either via Thread::oops_do() (before 1650 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1651 // om_flush() is called). 1652 // 1653 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1654 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1655 // run at the same time as om_flush() so we have to follow a careful 1656 // protocol to prevent list corruption. 1657 1658 void ObjectSynchronizer::om_flush(Thread* self) { 1659 // Process the per-thread in-use list first to be consistent. 1660 int in_use_count = 0; 1661 ObjectMonitor* in_use_list = NULL; 1662 ObjectMonitor* in_use_tail = NULL; 1663 NoSafepointVerifier nsv; 1664 1665 // This function can race with a list walker or with an async 1666 // deflater thread so we lock the list head to prevent confusion. 1667 // An async deflater thread checks to see if the target thread 1668 // is exiting, but if it has made it past that check before we 1669 // started exiting, then it is racing to get to the in-use list. 1670 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { 1671 // At this point, we have locked the in-use list head so a racing 1672 // thread cannot come in after us. However, a racing thread could 1673 // be ahead of us; we'll detect that and delay to let it finish. 1674 // 1675 // The thread is going away, however the ObjectMonitors on the 1676 // om_in_use_list may still be in-use by other threads. Link 1677 // them to in_use_tail, which will be linked into the global 1678 // in-use list (om_list_globals._in_use_list) below. 1679 // 1680 // Account for the in-use list head before the loop since it is 1681 // already locked (by this thread): 1682 in_use_tail = in_use_list; 1683 in_use_count++; 1684 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) { 1685 if (is_locked(cur_om)) { 1686 // cur_om is locked so there must be a racing walker or async 1687 // deflater thread ahead of us so we'll give it a chance to finish. 1688 while (is_locked(cur_om)) { 1689 os::naked_short_sleep(1); 1690 } 1691 // Refetch the possibly changed next field and try again. 1692 cur_om = unmarked_next(in_use_tail); 1693 continue; 1694 } 1695 if (cur_om->object() == NULL) { 1696 // cur_om was deflated and the object ref was cleared while it 1697 // was locked. We happened to see it just after it was unlocked 1698 // (and added to the free list). Refetch the possibly changed 1699 // next field and try again. 1700 cur_om = unmarked_next(in_use_tail); 1701 continue; 1702 } 1703 in_use_tail = cur_om; 1704 in_use_count++; 1705 cur_om = unmarked_next(cur_om); 1706 } 1707 guarantee(in_use_tail != NULL, "invariant"); 1708 int l_om_in_use_count = Atomic::load(&self->om_in_use_count); 1709 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: " 1710 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); 1711 Atomic::store(&self->om_in_use_count, 0); 1712 // Clear the in-use list head (which also unlocks it): 1713 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); 1714 om_unlock(in_use_list); 1715 } 1716 1717 int free_count = 0; 1718 ObjectMonitor* free_list = NULL; 1719 ObjectMonitor* free_tail = NULL; 1720 // This function can race with a list walker thread so we lock the 1721 // list head to prevent confusion. 1722 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) { 1723 // At this point, we have locked the free list head so a racing 1724 // thread cannot come in after us. However, a racing thread could 1725 // be ahead of us; we'll detect that and delay to let it finish. 1726 // 1727 // The thread is going away. Set 'free_tail' to the last per-thread free 1728 // monitor which will be linked to om_list_globals._free_list below. 1729 // 1730 // Account for the free list head before the loop since it is 1731 // already locked (by this thread): 1732 free_tail = free_list; 1733 free_count++; 1734 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) { 1735 if (is_locked(s)) { 1736 // s is locked so there must be a racing walker thread ahead 1737 // of us so we'll give it a chance to finish. 1738 while (is_locked(s)) { 1739 os::naked_short_sleep(1); 1740 } 1741 } 1742 free_tail = s; 1743 free_count++; 1744 guarantee(s->object() == NULL, "invariant"); 1745 if (s->is_busy()) { 1746 stringStream ss; 1747 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss)); 1748 } 1749 } 1750 guarantee(free_tail != NULL, "invariant"); 1751 int l_om_free_count = Atomic::load(&self->om_free_count); 1752 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " 1753 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); 1754 Atomic::store(&self->om_free_count, 0); 1755 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); 1756 om_unlock(free_list); 1757 } 1758 1759 if (free_tail != NULL) { 1760 prepend_list_to_global_free_list(free_list, free_tail, free_count); 1761 } 1762 1763 if (in_use_tail != NULL) { 1764 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); 1765 } 1766 1767 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1768 LogStreamHandle(Info, monitorinflation) lsh_info; 1769 LogStream* ls = NULL; 1770 if (log_is_enabled(Debug, monitorinflation)) { 1771 ls = &lsh_debug; 1772 } else if ((free_count != 0 || in_use_count != 0) && 1773 log_is_enabled(Info, monitorinflation)) { 1774 ls = &lsh_info; 1775 } 1776 if (ls != NULL) { 1777 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1778 ", in_use_count=%d" ", om_free_provision=%d", 1779 p2i(self), free_count, in_use_count, self->om_free_provision); 1780 } 1781 } 1782 1783 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1784 const oop obj, 1785 ObjectSynchronizer::InflateCause cause) { 1786 assert(event != NULL, "invariant"); 1787 assert(event->should_commit(), "invariant"); 1788 event->set_monitorClass(obj->klass()); 1789 event->set_address((uintptr_t)(void*)obj); 1790 event->set_cause((u1)cause); 1791 event->commit(); 1792 } 1793 1794 // Fast path code shared by multiple functions 1795 void ObjectSynchronizer::inflate_helper(oop obj) { 1796 markWord mark = obj->mark(); 1797 if (mark.has_monitor()) { 1798 ObjectMonitor* monitor = mark.monitor(); 1799 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor=" INTPTR_FORMAT " is invalid", p2i(monitor)); 1800 markWord dmw = monitor->header(); 1801 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1802 return; 1803 } 1804 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal); 1805 } 1806 1807 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object, 1808 const InflateCause cause) { 1809 // Inflate mutates the heap ... 1810 // Relaxing assertion for bug 6320749. 1811 assert(Universe::verify_in_progress() || 1812 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1813 1814 EventJavaMonitorInflate event; 1815 1816 for (;;) { 1817 const markWord mark = object->mark(); 1818 assert(!mark.has_bias_pattern(), "invariant"); 1819 1820 // The mark can be in one of the following states: 1821 // * Inflated - just return 1822 // * Stack-locked - coerce it to inflated 1823 // * INFLATING - busy wait for conversion to complete 1824 // * Neutral - aggressively inflate the object. 1825 // * BIASED - Illegal. We should never see this 1826 1827 // CASE: inflated 1828 if (mark.has_monitor()) { 1829 ObjectMonitor* inf = mark.monitor(); 1830 markWord dmw = inf->header(); 1831 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1832 assert(AsyncDeflateIdleMonitors || inf->object() == object, "invariant"); 1833 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1834 return inf; 1835 } 1836 1837 // CASE: inflation in progress - inflating over a stack-lock. 1838 // Some other thread is converting from stack-locked to inflated. 1839 // Only that thread can complete inflation -- other threads must wait. 1840 // The INFLATING value is transient. 1841 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1842 // We could always eliminate polling by parking the thread on some auxiliary list. 1843 if (mark == markWord::INFLATING()) { 1844 read_stable_mark(object); 1845 continue; 1846 } 1847 1848 // CASE: stack-locked 1849 // Could be stack-locked either by this thread or by some other thread. 1850 // 1851 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1852 // to install INFLATING into the mark word. We originally installed INFLATING, 1853 // allocated the objectmonitor, and then finally STed the address of the 1854 // objectmonitor into the mark. This was correct, but artificially lengthened 1855 // the interval in which INFLATED appeared in the mark, thus increasing 1856 // the odds of inflation contention. 1857 // 1858 // We now use per-thread private objectmonitor free lists. 1859 // These list are reprovisioned from the global free list outside the 1860 // critical INFLATING...ST interval. A thread can transfer 1861 // multiple objectmonitors en-mass from the global free list to its local free list. 1862 // This reduces coherency traffic and lock contention on the global free list. 1863 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1864 // before or after the CAS(INFLATING) operation. 1865 // See the comments in om_alloc(). 1866 1867 LogStreamHandle(Trace, monitorinflation) lsh; 1868 1869 if (mark.has_locker()) { 1870 ObjectMonitor* m = om_alloc(self); 1871 // Optimistically prepare the objectmonitor - anticipate successful CAS 1872 // We do this before the CAS in order to minimize the length of time 1873 // in which INFLATING appears in the mark. 1874 m->Recycle(); 1875 m->_Responsible = NULL; 1876 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1877 1878 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1879 if (cmp != mark) { 1880 // om_release() will reset the allocation state from New to Free. 1881 om_release(self, m, true); 1882 continue; // Interference -- just retry 1883 } 1884 1885 // We've successfully installed INFLATING (0) into the mark-word. 1886 // This is the only case where 0 will appear in a mark-word. 1887 // Only the singular thread that successfully swings the mark-word 1888 // to 0 can perform (or more precisely, complete) inflation. 1889 // 1890 // Why do we CAS a 0 into the mark-word instead of just CASing the 1891 // mark-word from the stack-locked value directly to the new inflated state? 1892 // Consider what happens when a thread unlocks a stack-locked object. 1893 // It attempts to use CAS to swing the displaced header value from the 1894 // on-stack BasicLock back into the object header. Recall also that the 1895 // header value (hash code, etc) can reside in (a) the object header, or 1896 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1897 // header in an ObjectMonitor. The inflate() routine must copy the header 1898 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1899 // the while preserving the hashCode stability invariants. If the owner 1900 // decides to release the lock while the value is 0, the unlock will fail 1901 // and control will eventually pass from slow_exit() to inflate. The owner 1902 // will then spin, waiting for the 0 value to disappear. Put another way, 1903 // the 0 causes the owner to stall if the owner happens to try to 1904 // drop the lock (restoring the header from the BasicLock to the object) 1905 // while inflation is in-progress. This protocol avoids races that might 1906 // would otherwise permit hashCode values to change or "flicker" for an object. 1907 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1908 // 0 serves as a "BUSY" inflate-in-progress indicator. 1909 1910 1911 // fetch the displaced mark from the owner's stack. 1912 // The owner can't die or unwind past the lock while our INFLATING 1913 // object is in the mark. Furthermore the owner can't complete 1914 // an unlock on the object, either. 1915 markWord dmw = mark.displaced_mark_helper(); 1916 // Catch if the object's header is not neutral (not locked and 1917 // not marked is what we care about here). 1918 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1919 1920 // Setup monitor fields to proper values -- prepare the monitor 1921 m->set_header(dmw); 1922 1923 // Optimization: if the mark.locker stack address is associated 1924 // with this thread we could simply set m->_owner = self. 1925 // Note that a thread can inflate an object 1926 // that it has stack-locked -- as might happen in wait() -- directly 1927 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1928 if (AsyncDeflateIdleMonitors) { 1929 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker()); 1930 } else { 1931 m->set_owner_from(NULL, mark.locker()); 1932 } 1933 m->set_object(object); 1934 // TODO-FIXME: assert BasicLock->dhw != 0. 1935 1936 // Must preserve store ordering. The monitor state must 1937 // be stable at the time of publishing the monitor address. 1938 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1939 object->release_set_mark(markWord::encode(m)); 1940 1941 // Once ObjectMonitor is configured and the object is associated 1942 // with the ObjectMonitor, it is safe to allow async deflation: 1943 assert(m->is_new(), "freshly allocated monitor must be new"); 1944 m->set_allocation_state(ObjectMonitor::Old); 1945 1946 // Hopefully the performance counters are allocated on distinct cache lines 1947 // to avoid false sharing on MP systems ... 1948 OM_PERFDATA_OP(Inflations, inc()); 1949 if (log_is_enabled(Trace, monitorinflation)) { 1950 ResourceMark rm(self); 1951 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1952 INTPTR_FORMAT ", type='%s'", p2i(object), 1953 object->mark().value(), object->klass()->external_name()); 1954 } 1955 if (event.should_commit()) { 1956 post_monitor_inflate_event(&event, object, cause); 1957 } 1958 return m; 1959 } 1960 1961 // CASE: neutral 1962 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1963 // If we know we're inflating for entry it's better to inflate by swinging a 1964 // pre-locked ObjectMonitor pointer into the object header. A successful 1965 // CAS inflates the object *and* confers ownership to the inflating thread. 1966 // In the current implementation we use a 2-step mechanism where we CAS() 1967 // to inflate and then CAS() again to try to swing _owner from NULL to self. 1968 // An inflateTry() method that we could call from enter() would be useful. 1969 1970 // Catch if the object's header is not neutral (not locked and 1971 // not marked is what we care about here). 1972 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1973 ObjectMonitor* m = om_alloc(self); 1974 // prepare m for installation - set monitor to initial state 1975 m->Recycle(); 1976 m->set_header(mark); 1977 if (AsyncDeflateIdleMonitors) { 1978 // DEFLATER_MARKER is the only non-NULL value we should see here. 1979 m->try_set_owner_from(DEFLATER_MARKER, NULL); 1980 } 1981 m->set_object(object); 1982 m->_Responsible = NULL; 1983 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1984 1985 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 1986 m->set_header(markWord::zero()); 1987 m->set_object(NULL); 1988 m->Recycle(); 1989 // om_release() will reset the allocation state from New to Free. 1990 om_release(self, m, true); 1991 m = NULL; 1992 continue; 1993 // interference - the markword changed - just retry. 1994 // The state-transitions are one-way, so there's no chance of 1995 // live-lock -- "Inflated" is an absorbing state. 1996 } 1997 1998 // Once the ObjectMonitor is configured and object is associated 1999 // with the ObjectMonitor, it is safe to allow async deflation: 2000 assert(m->is_new(), "freshly allocated monitor must be new"); 2001 m->set_allocation_state(ObjectMonitor::Old); 2002 2003 // Hopefully the performance counters are allocated on distinct 2004 // cache lines to avoid false sharing on MP systems ... 2005 OM_PERFDATA_OP(Inflations, inc()); 2006 if (log_is_enabled(Trace, monitorinflation)) { 2007 ResourceMark rm(self); 2008 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 2009 INTPTR_FORMAT ", type='%s'", p2i(object), 2010 object->mark().value(), object->klass()->external_name()); 2011 } 2012 if (event.should_commit()) { 2013 post_monitor_inflate_event(&event, object, cause); 2014 } 2015 return m; 2016 } 2017 } 2018 2019 2020 // We maintain a list of in-use monitors for each thread. 2021 // 2022 // For safepoint based deflation: 2023 // deflate_thread_local_monitors() scans a single thread's in-use list, while 2024 // deflate_idle_monitors() scans only a global list of in-use monitors which 2025 // is populated only as a thread dies (see om_flush()). 2026 // 2027 // These operations are called at all safepoints, immediately after mutators 2028 // are stopped, but before any objects have moved. Collectively they traverse 2029 // the population of in-use monitors, deflating where possible. The scavenged 2030 // monitors are returned to the global monitor free list. 2031 // 2032 // Beware that we scavenge at *every* stop-the-world point. Having a large 2033 // number of monitors in-use could negatively impact performance. We also want 2034 // to minimize the total # of monitors in circulation, as they incur a small 2035 // footprint penalty. 2036 // 2037 // Perversely, the heap size -- and thus the STW safepoint rate -- 2038 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 2039 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 2040 // This is an unfortunate aspect of this design. 2041 // 2042 // For async deflation: 2043 // If a special deflation request is made, then the safepoint based 2044 // deflation mechanism is used. Otherwise, an async deflation request 2045 // is registered with the ServiceThread and it is notified. 2046 2047 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) { 2048 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2049 2050 // The per-thread in-use lists are handled in 2051 // ParallelSPCleanupThreadClosure::do_thread(). 2052 2053 if (!AsyncDeflateIdleMonitors) { 2054 // Use the older mechanism for the global in-use list. 2055 ObjectSynchronizer::deflate_idle_monitors(counters); 2056 return; 2057 } 2058 2059 log_debug(monitorinflation)("requesting async deflation of idle monitors."); 2060 // Request deflation of idle monitors by the ServiceThread: 2061 set_is_async_deflation_requested(true); 2062 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 2063 ml.notify_all(); 2064 2065 if (log_is_enabled(Debug, monitorinflation)) { 2066 // exit_globals()'s call to audit_and_print_stats() is done 2067 // at the Info level and not at a safepoint. 2068 // For safepoint based deflation, audit_and_print_stats() is called 2069 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the 2070 // Debug level at a safepoint. 2071 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2072 } 2073 } 2074 2075 // Deflate a single monitor if not in-use 2076 // Return true if deflated, false if in-use 2077 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 2078 ObjectMonitor** free_head_p, 2079 ObjectMonitor** free_tail_p) { 2080 bool deflated; 2081 // Normal case ... The monitor is associated with obj. 2082 const markWord mark = obj->mark(); 2083 guarantee(mark == markWord::encode(mid), "should match: mark=" 2084 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 2085 markWord::encode(mid).value()); 2086 // Make sure that mark.monitor() and markWord::encode() agree: 2087 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 2088 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 2089 const markWord dmw = mid->header(); 2090 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 2091 2092 if (mid->is_busy()) { 2093 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2094 deflated = false; 2095 } else { 2096 // Deflate the monitor if it is no longer being used 2097 // It's idle - scavenge and return to the global free list 2098 // plain old deflation ... 2099 if (log_is_enabled(Trace, monitorinflation)) { 2100 ResourceMark rm; 2101 log_trace(monitorinflation)("deflate_monitor: " 2102 "object=" INTPTR_FORMAT ", mark=" 2103 INTPTR_FORMAT ", type='%s'", p2i(obj), 2104 mark.value(), obj->klass()->external_name()); 2105 } 2106 2107 // Restore the header back to obj 2108 obj->release_set_mark(dmw); 2109 if (AsyncDeflateIdleMonitors) { 2110 // clear() expects the owner field to be NULL. 2111 // DEFLATER_MARKER is the only non-NULL value we should see here. 2112 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2113 } 2114 mid->clear(); 2115 2116 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 2117 p2i(mid->object())); 2118 assert(mid->is_free(), "invariant"); 2119 2120 // Move the deflated ObjectMonitor to the working free list 2121 // defined by free_head_p and free_tail_p. 2122 if (*free_head_p == NULL) *free_head_p = mid; 2123 if (*free_tail_p != NULL) { 2124 // We append to the list so the caller can use mid->_next_om 2125 // to fix the linkages in its context. 2126 ObjectMonitor* prevtail = *free_tail_p; 2127 // Should have been cleaned up by the caller: 2128 // Note: Should not have to lock prevtail here since we're at a 2129 // safepoint and ObjectMonitors on the local free list should 2130 // not be accessed in parallel. 2131 #ifdef ASSERT 2132 ObjectMonitor* l_next_om = prevtail->next_om(); 2133 #endif 2134 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2135 prevtail->set_next_om(mid); 2136 } 2137 *free_tail_p = mid; 2138 // At this point, mid->_next_om still refers to its current 2139 // value and another ObjectMonitor's _next_om field still 2140 // refers to this ObjectMonitor. Those linkages have to be 2141 // cleaned up by the caller who has the complete context. 2142 deflated = true; 2143 } 2144 return deflated; 2145 } 2146 2147 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 2148 // Returns true if it was deflated and false otherwise. 2149 // 2150 // The async deflation protocol sets owner to DEFLATER_MARKER and 2151 // makes contentions negative as signals to contending threads that 2152 // an async deflation is in progress. There are a number of checks 2153 // as part of the protocol to make sure that the calling thread has 2154 // not lost the race to a contending thread. 2155 // 2156 // The ObjectMonitor has been successfully async deflated when: 2157 // (contentions < 0) 2158 // Contending threads that see that condition know to retry their operation. 2159 // 2160 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 2161 ObjectMonitor** free_head_p, 2162 ObjectMonitor** free_tail_p) { 2163 assert(AsyncDeflateIdleMonitors, "sanity check"); 2164 assert(Thread::current()->is_Java_thread(), "precondition"); 2165 // A newly allocated ObjectMonitor should not be seen here so we 2166 // avoid an endless inflate/deflate cycle. 2167 assert(mid->is_old(), "must be old: allocation_state=%d", 2168 (int) mid->allocation_state()); 2169 2170 if (mid->is_busy()) { 2171 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2172 return false; 2173 } 2174 2175 // Set a NULL owner to DEFLATER_MARKER to force any contending thread 2176 // through the slow path. This is just the first part of the async 2177 // deflation dance. 2178 if (mid->try_set_owner_from(NULL, DEFLATER_MARKER) != NULL) { 2179 // The owner field is no longer NULL so we lost the race since the 2180 // ObjectMonitor is now busy. 2181 return false; 2182 } 2183 2184 if (mid->contentions() > 0 || mid->_waiters != 0) { 2185 // Another thread has raced to enter the ObjectMonitor after 2186 // mid->is_busy() above or has already entered and waited on 2187 // it which makes it busy so no deflation. Restore owner to 2188 // NULL if it is still DEFLATER_MARKER. 2189 if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) { 2190 // Deferred decrement for the JT EnterI() that cancelled the async deflation. 2191 mid->add_to_contentions(-1); 2192 } 2193 return false; 2194 } 2195 2196 // Make a zero contentions field negative to force any contending threads 2197 // to retry. This is the second part of the async deflation dance. 2198 if (Atomic::cmpxchg(&mid->_contentions, (jint)0, -max_jint) != 0) { 2199 // Contentions was no longer 0 so we lost the race since the 2200 // ObjectMonitor is now busy. Restore owner to NULL if it is 2201 // still DEFLATER_MARKER: 2202 if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) { 2203 // Deferred decrement for the JT EnterI() that cancelled the async deflation. 2204 mid->add_to_contentions(-1); 2205 } 2206 return false; 2207 } 2208 2209 // Sanity checks for the races: 2210 guarantee(mid->owner_is_DEFLATER_MARKER(), "must be deflater marker"); 2211 guarantee(mid->contentions() < 0, "must be negative: contentions=%d", 2212 mid->contentions()); 2213 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 2214 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 2215 INTPTR_FORMAT, p2i(mid->_cxq)); 2216 guarantee(mid->_EntryList == NULL, 2217 "must be no entering threads: EntryList=" INTPTR_FORMAT, 2218 p2i(mid->_EntryList)); 2219 2220 const oop obj = (oop) mid->object(); 2221 if (log_is_enabled(Trace, monitorinflation)) { 2222 ResourceMark rm; 2223 log_trace(monitorinflation)("deflate_monitor_using_JT: " 2224 "object=" INTPTR_FORMAT ", mark=" 2225 INTPTR_FORMAT ", type='%s'", 2226 p2i(obj), obj->mark().value(), 2227 obj->klass()->external_name()); 2228 } 2229 2230 // Install the old mark word if nobody else has already done it. 2231 mid->install_displaced_markword_in_object(obj); 2232 mid->clear_common(); 2233 2234 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 2235 p2i(mid->object())); 2236 assert(mid->is_free(), "must be free: allocation_state=%d", 2237 (int)mid->allocation_state()); 2238 2239 // Move the deflated ObjectMonitor to the working free list 2240 // defined by free_head_p and free_tail_p. 2241 if (*free_head_p == NULL) { 2242 // First one on the list. 2243 *free_head_p = mid; 2244 } 2245 if (*free_tail_p != NULL) { 2246 // We append to the list so the caller can use mid->_next_om 2247 // to fix the linkages in its context. 2248 ObjectMonitor* prevtail = *free_tail_p; 2249 // prevtail should have been cleaned up by the caller: 2250 #ifdef ASSERT 2251 ObjectMonitor* l_next_om = unmarked_next(prevtail); 2252 #endif 2253 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2254 om_lock(prevtail); 2255 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked) 2256 } 2257 *free_tail_p = mid; 2258 2259 // At this point, mid->_next_om still refers to its current 2260 // value and another ObjectMonitor's _next_om field still 2261 // refers to this ObjectMonitor. Those linkages have to be 2262 // cleaned up by the caller who has the complete context. 2263 2264 // We leave owner == DEFLATER_MARKER and contentions < 0 2265 // to force any racing threads to retry. 2266 return true; // Success, ObjectMonitor has been deflated. 2267 } 2268 2269 // Walk a given monitor list, and deflate idle monitors. 2270 // The given list could be a per-thread list or a global list. 2271 // 2272 // In the case of parallel processing of thread local monitor lists, 2273 // work is done by Threads::parallel_threads_do() which ensures that 2274 // each Java thread is processed by exactly one worker thread, and 2275 // thus avoid conflicts that would arise when worker threads would 2276 // process the same monitor lists concurrently. 2277 // 2278 // See also ParallelSPCleanupTask and 2279 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 2280 // Threads::parallel_java_threads_do() in thread.cpp. 2281 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 2282 int* count_p, 2283 ObjectMonitor** free_head_p, 2284 ObjectMonitor** free_tail_p) { 2285 ObjectMonitor* cur_mid_in_use = NULL; 2286 ObjectMonitor* mid = NULL; 2287 ObjectMonitor* next = NULL; 2288 int deflated_count = 0; 2289 2290 // This list walk executes at a safepoint and does not race with any 2291 // other list walkers. 2292 2293 for (mid = Atomic::load(list_p); mid != NULL; mid = next) { 2294 next = unmarked_next(mid); 2295 oop obj = (oop) mid->object(); 2296 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 2297 // Deflation succeeded and already updated free_head_p and 2298 // free_tail_p as needed. Finish the move to the local free list 2299 // by unlinking mid from the global or per-thread in-use list. 2300 if (cur_mid_in_use == NULL) { 2301 // mid is the list head so switch the list head to next: 2302 Atomic::store(list_p, next); 2303 } else { 2304 // Switch cur_mid_in_use's next field to next: 2305 cur_mid_in_use->set_next_om(next); 2306 } 2307 // At this point mid is disconnected from the in-use list. 2308 deflated_count++; 2309 Atomic::dec(count_p); 2310 // mid is current tail in the free_head_p list so NULL terminate it: 2311 mid->set_next_om(NULL); 2312 } else { 2313 cur_mid_in_use = mid; 2314 } 2315 } 2316 return deflated_count; 2317 } 2318 2319 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 2320 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 2321 // list could be a per-thread in-use list or the global in-use list. 2322 // If a safepoint has started, then we save state via saved_mid_in_use_p 2323 // and return to the caller to honor the safepoint. 2324 // 2325 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, 2326 int* count_p, 2327 ObjectMonitor** free_head_p, 2328 ObjectMonitor** free_tail_p, 2329 ObjectMonitor** saved_mid_in_use_p) { 2330 assert(AsyncDeflateIdleMonitors, "sanity check"); 2331 JavaThread* self = JavaThread::current(); 2332 2333 ObjectMonitor* cur_mid_in_use = NULL; 2334 ObjectMonitor* mid = NULL; 2335 ObjectMonitor* next = NULL; 2336 ObjectMonitor* next_next = NULL; 2337 int deflated_count = 0; 2338 NoSafepointVerifier nsv; 2339 2340 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go 2341 // protocol because om_release() can do list deletions in parallel; 2342 // this also prevents races with a list walker thread. We also 2343 // lock-next-next-as-we-go to prevent an om_flush() that is behind 2344 // this thread from passing us. 2345 if (*saved_mid_in_use_p == NULL) { 2346 // No saved state so start at the beginning. 2347 // Lock the list head so we can possibly deflate it: 2348 if ((mid = get_list_head_locked(list_p)) == NULL) { 2349 return 0; // The list is empty so nothing to deflate. 2350 } 2351 next = unmarked_next(mid); 2352 } else { 2353 // We're restarting after a safepoint so restore the necessary state 2354 // before we resume. 2355 cur_mid_in_use = *saved_mid_in_use_p; 2356 // Lock cur_mid_in_use so we can possibly update its 2357 // next field to extract a deflated ObjectMonitor. 2358 om_lock(cur_mid_in_use); 2359 mid = unmarked_next(cur_mid_in_use); 2360 if (mid == NULL) { 2361 om_unlock(cur_mid_in_use); 2362 *saved_mid_in_use_p = NULL; 2363 return 0; // The remainder is empty so nothing more to deflate. 2364 } 2365 // Lock mid so we can possibly deflate it: 2366 om_lock(mid); 2367 next = unmarked_next(mid); 2368 } 2369 2370 while (true) { 2371 // The current mid is locked at this point. If we have a 2372 // cur_mid_in_use, then it is also locked at this point. 2373 2374 if (next != NULL) { 2375 // We lock next so that an om_flush() thread that is behind us 2376 // cannot pass us when we unlock the current mid. 2377 om_lock(next); 2378 next_next = unmarked_next(next); 2379 } 2380 2381 // Only try to deflate if there is an associated Java object and if 2382 // mid is old (is not newly allocated and is not newly freed). 2383 if (mid->object() != NULL && mid->is_old() && 2384 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { 2385 // Deflation succeeded and already updated free_head_p and 2386 // free_tail_p as needed. Finish the move to the local free list 2387 // by unlinking mid from the global or per-thread in-use list. 2388 if (cur_mid_in_use == NULL) { 2389 // mid is the list head and it is locked. Switch the list head 2390 // to next which is also locked (if not NULL) and also leave 2391 // mid locked: 2392 Atomic::store(list_p, next); 2393 } else { 2394 ObjectMonitor* locked_next = mark_om_ptr(next); 2395 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's 2396 // next field to locked_next and also leave mid locked: 2397 cur_mid_in_use->set_next_om(locked_next); 2398 } 2399 // At this point mid is disconnected from the in-use list so 2400 // its lock longer has any effects on in-use list. 2401 deflated_count++; 2402 Atomic::dec(count_p); 2403 // mid is current tail in the free_head_p list so NULL terminate it 2404 // (which also unlocks it): 2405 mid->set_next_om(NULL); 2406 2407 // All the list management is done so move on to the next one: 2408 mid = next; // mid keeps non-NULL next's locked state 2409 next = next_next; 2410 } else { 2411 // mid is considered in-use if it does not have an associated 2412 // Java object or mid is not old or deflation did not succeed. 2413 // A mid->is_new() node can be seen here when it is freshly 2414 // returned by om_alloc() (and skips the deflation code path). 2415 // A mid->is_old() node can be seen here when deflation failed. 2416 // A mid->is_free() node can be seen here when a fresh node from 2417 // om_alloc() is released by om_release() due to losing the race 2418 // in inflate(). 2419 2420 // All the list management is done so move on to the next one: 2421 if (cur_mid_in_use != NULL) { 2422 om_unlock(cur_mid_in_use); 2423 } 2424 // The next cur_mid_in_use keeps mid's lock state so 2425 // that it is stable for a possible next field change. It 2426 // cannot be modified by om_release() while it is locked. 2427 cur_mid_in_use = mid; 2428 mid = next; // mid keeps non-NULL next's locked state 2429 next = next_next; 2430 2431 if (SafepointMechanism::should_block(self) && 2432 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { 2433 // If a safepoint has started and cur_mid_in_use is not the list 2434 // head and is old, then it is safe to use as saved state. Return 2435 // to the caller before blocking. 2436 *saved_mid_in_use_p = cur_mid_in_use; 2437 om_unlock(cur_mid_in_use); 2438 if (mid != NULL) { 2439 om_unlock(mid); 2440 } 2441 return deflated_count; 2442 } 2443 } 2444 if (mid == NULL) { 2445 if (cur_mid_in_use != NULL) { 2446 om_unlock(cur_mid_in_use); 2447 } 2448 break; // Reached end of the list so nothing more to deflate. 2449 } 2450 2451 // The current mid's next field is locked at this point. If we have 2452 // a cur_mid_in_use, then it is also locked at this point. 2453 } 2454 // We finished the list without a safepoint starting so there's 2455 // no need to save state. 2456 *saved_mid_in_use_p = NULL; 2457 return deflated_count; 2458 } 2459 2460 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2461 counters->n_in_use = 0; // currently associated with objects 2462 counters->n_in_circulation = 0; // extant 2463 counters->n_scavenged = 0; // reclaimed (global and per-thread) 2464 counters->per_thread_scavenged = 0; // per-thread scavenge total 2465 counters->per_thread_times = 0.0; // per-thread scavenge times 2466 } 2467 2468 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2469 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2470 2471 if (AsyncDeflateIdleMonitors) { 2472 // Nothing to do when global idle ObjectMonitors are deflated using 2473 // a JavaThread. 2474 return; 2475 } 2476 2477 bool deflated = false; 2478 2479 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2480 ObjectMonitor* free_tail_p = NULL; 2481 elapsedTimer timer; 2482 2483 if (log_is_enabled(Info, monitorinflation)) { 2484 timer.start(); 2485 } 2486 2487 // Note: the thread-local monitors lists get deflated in 2488 // a separate pass. See deflate_thread_local_monitors(). 2489 2490 // For moribund threads, scan om_list_globals._in_use_list 2491 int deflated_count = 0; 2492 if (Atomic::load(&om_list_globals._in_use_list) != NULL) { 2493 // Update n_in_circulation before om_list_globals._in_use_count is 2494 // updated by deflation. 2495 Atomic::add(&counters->n_in_circulation, 2496 Atomic::load(&om_list_globals._in_use_count)); 2497 2498 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list, 2499 &om_list_globals._in_use_count, 2500 &free_head_p, &free_tail_p); 2501 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count)); 2502 } 2503 2504 if (free_head_p != NULL) { 2505 // Move the deflated ObjectMonitors back to the global free list. 2506 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2507 #ifdef ASSERT 2508 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2509 #endif 2510 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2511 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2512 Atomic::add(&counters->n_scavenged, deflated_count); 2513 } 2514 timer.stop(); 2515 2516 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2517 LogStreamHandle(Info, monitorinflation) lsh_info; 2518 LogStream* ls = NULL; 2519 if (log_is_enabled(Debug, monitorinflation)) { 2520 ls = &lsh_debug; 2521 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2522 ls = &lsh_info; 2523 } 2524 if (ls != NULL) { 2525 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2526 } 2527 } 2528 2529 class HandshakeForDeflation : public HandshakeClosure { 2530 public: 2531 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {} 2532 2533 void do_thread(Thread* thread) { 2534 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" 2535 INTPTR_FORMAT, p2i(thread)); 2536 } 2537 }; 2538 2539 void ObjectSynchronizer::deflate_idle_monitors_using_JT() { 2540 assert(AsyncDeflateIdleMonitors, "sanity check"); 2541 2542 // Deflate any global idle monitors. 2543 deflate_global_idle_monitors_using_JT(); 2544 2545 int count = 0; 2546 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2547 if (Atomic::load(&jt->om_in_use_count) > 0 && !jt->is_exiting()) { 2548 // This JavaThread is using ObjectMonitors so deflate any that 2549 // are idle unless this JavaThread is exiting; do not race with 2550 // ObjectSynchronizer::om_flush(). 2551 deflate_per_thread_idle_monitors_using_JT(jt); 2552 count++; 2553 } 2554 } 2555 if (count > 0) { 2556 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); 2557 } 2558 2559 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, " 2560 "global_free_count=%d, global_wait_count=%d", 2561 Atomic::load(&om_list_globals._population), 2562 Atomic::load(&om_list_globals._in_use_count), 2563 Atomic::load(&om_list_globals._free_count), 2564 Atomic::load(&om_list_globals._wait_count)); 2565 2566 // The ServiceThread's async deflation request has been processed. 2567 _last_async_deflation_time_ns = os::javaTimeNanos(); 2568 set_is_async_deflation_requested(false); 2569 2570 if (Atomic::load(&om_list_globals._wait_count) > 0) { 2571 // There are deflated ObjectMonitors waiting for a handshake 2572 // (or a safepoint) for safety. 2573 2574 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list); 2575 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL"); 2576 int count = Atomic::load(&om_list_globals._wait_count); 2577 Atomic::store(&om_list_globals._wait_count, 0); 2578 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL); 2579 2580 // Find the tail for prepend_list_to_common(). No need to mark 2581 // ObjectMonitors for this list walk since only the deflater 2582 // thread manages the wait list. 2583 int l_count = 0; 2584 ObjectMonitor* tail = NULL; 2585 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { 2586 tail = n; 2587 l_count++; 2588 } 2589 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count); 2590 2591 // Will execute a safepoint if !ThreadLocalHandshakes: 2592 HandshakeForDeflation hfd_hc; 2593 Handshake::execute(&hfd_hc); 2594 2595 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 2596 &om_list_globals._free_count); 2597 2598 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); 2599 } 2600 } 2601 2602 // Deflate global idle ObjectMonitors using a JavaThread. 2603 // 2604 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2605 assert(AsyncDeflateIdleMonitors, "sanity check"); 2606 assert(Thread::current()->is_Java_thread(), "precondition"); 2607 JavaThread* self = JavaThread::current(); 2608 2609 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2610 } 2611 2612 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread. 2613 // 2614 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) { 2615 assert(AsyncDeflateIdleMonitors, "sanity check"); 2616 assert(Thread::current()->is_Java_thread(), "precondition"); 2617 2618 deflate_common_idle_monitors_using_JT(false /* !is_global */, target); 2619 } 2620 2621 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2622 // 2623 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) { 2624 JavaThread* self = JavaThread::current(); 2625 2626 int deflated_count = 0; 2627 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors 2628 ObjectMonitor* free_tail_p = NULL; 2629 ObjectMonitor* saved_mid_in_use_p = NULL; 2630 elapsedTimer timer; 2631 2632 if (log_is_enabled(Info, monitorinflation)) { 2633 timer.start(); 2634 } 2635 2636 if (is_global) { 2637 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count))); 2638 } else { 2639 OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count))); 2640 } 2641 2642 do { 2643 int local_deflated_count; 2644 if (is_global) { 2645 local_deflated_count = 2646 deflate_monitor_list_using_JT(&om_list_globals._in_use_list, 2647 &om_list_globals._in_use_count, 2648 &free_head_p, &free_tail_p, 2649 &saved_mid_in_use_p); 2650 } else { 2651 local_deflated_count = 2652 deflate_monitor_list_using_JT(&target->om_in_use_list, 2653 &target->om_in_use_count, &free_head_p, 2654 &free_tail_p, &saved_mid_in_use_p); 2655 } 2656 deflated_count += local_deflated_count; 2657 2658 if (free_head_p != NULL) { 2659 // Move the deflated ObjectMonitors to the global free list. 2660 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); 2661 // Note: The target thread can be doing an om_alloc() that 2662 // is trying to prepend an ObjectMonitor on its in-use list 2663 // at the same time that we have deflated the current in-use 2664 // list head and put it on the local free list. prepend_to_common() 2665 // will detect the race and retry which avoids list corruption, 2666 // but the next field in free_tail_p can flicker to marked 2667 // and then unmarked while prepend_to_common() is sorting it 2668 // all out. 2669 #ifdef ASSERT 2670 ObjectMonitor* l_next_om = unmarked_next(free_tail_p); 2671 #endif 2672 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2673 2674 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count); 2675 2676 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2677 } 2678 2679 if (saved_mid_in_use_p != NULL) { 2680 // deflate_monitor_list_using_JT() detected a safepoint starting. 2681 timer.stop(); 2682 { 2683 if (is_global) { 2684 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2685 } else { 2686 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); 2687 } 2688 assert(SafepointMechanism::should_block(self), "sanity check"); 2689 ThreadBlockInVM blocker(self); 2690 } 2691 // Prepare for another loop after the safepoint. 2692 free_head_p = NULL; 2693 free_tail_p = NULL; 2694 if (log_is_enabled(Info, monitorinflation)) { 2695 timer.start(); 2696 } 2697 } 2698 } while (saved_mid_in_use_p != NULL); 2699 timer.stop(); 2700 2701 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2702 LogStreamHandle(Info, monitorinflation) lsh_info; 2703 LogStream* ls = NULL; 2704 if (log_is_enabled(Debug, monitorinflation)) { 2705 ls = &lsh_debug; 2706 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2707 ls = &lsh_info; 2708 } 2709 if (ls != NULL) { 2710 if (is_global) { 2711 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2712 } else { 2713 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count); 2714 } 2715 } 2716 } 2717 2718 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2719 // Report the cumulative time for deflating each thread's idle 2720 // monitors. Note: if the work is split among more than one 2721 // worker thread, then the reported time will likely be more 2722 // than a beginning to end measurement of the phase. 2723 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 2724 2725 if (AsyncDeflateIdleMonitors) { 2726 // Nothing to do when idle ObjectMonitors are deflated using 2727 // a JavaThread. 2728 return; 2729 } 2730 2731 if (log_is_enabled(Debug, monitorinflation)) { 2732 // exit_globals()'s call to audit_and_print_stats() is done 2733 // at the Info level and not at a safepoint. 2734 // For async deflation, audit_and_print_stats() is called in 2735 // ObjectSynchronizer::do_safepoint_work() at the Debug level 2736 // at a safepoint. 2737 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2738 } else if (log_is_enabled(Info, monitorinflation)) { 2739 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " 2740 "global_free_count=%d, global_wait_count=%d", 2741 Atomic::load(&om_list_globals._population), 2742 Atomic::load(&om_list_globals._in_use_count), 2743 Atomic::load(&om_list_globals._free_count), 2744 Atomic::load(&om_list_globals._wait_count)); 2745 } 2746 2747 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 2748 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 2749 2750 GVars.stw_random = os::random(); 2751 GVars.stw_cycle++; 2752 } 2753 2754 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2755 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2756 2757 if (AsyncDeflateIdleMonitors) { 2758 // Nothing to do when per-thread idle ObjectMonitors are deflated 2759 // using a JavaThread. 2760 return; 2761 } 2762 2763 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2764 ObjectMonitor* free_tail_p = NULL; 2765 elapsedTimer timer; 2766 2767 if (log_is_enabled(Info, safepoint, cleanup) || 2768 log_is_enabled(Info, monitorinflation)) { 2769 timer.start(); 2770 } 2771 2772 // Update n_in_circulation before om_in_use_count is updated by deflation. 2773 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count)); 2774 2775 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 2776 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count)); 2777 2778 if (free_head_p != NULL) { 2779 // Move the deflated ObjectMonitors back to the global free list. 2780 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2781 #ifdef ASSERT 2782 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2783 #endif 2784 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2785 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2786 Atomic::add(&counters->n_scavenged, deflated_count); 2787 Atomic::add(&counters->per_thread_scavenged, deflated_count); 2788 } 2789 2790 timer.stop(); 2791 counters->per_thread_times += timer.seconds(); 2792 2793 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2794 LogStreamHandle(Info, monitorinflation) lsh_info; 2795 LogStream* ls = NULL; 2796 if (log_is_enabled(Debug, monitorinflation)) { 2797 ls = &lsh_debug; 2798 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2799 ls = &lsh_info; 2800 } 2801 if (ls != NULL) { 2802 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2803 } 2804 } 2805 2806 // Monitor cleanup on JavaThread::exit 2807 2808 // Iterate through monitor cache and attempt to release thread's monitors 2809 // Gives up on a particular monitor if an exception occurs, but continues 2810 // the overall iteration, swallowing the exception. 2811 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2812 private: 2813 TRAPS; 2814 2815 public: 2816 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2817 void do_monitor(ObjectMonitor* mid) { 2818 if (mid->owner() == THREAD) { 2819 (void)mid->complete_exit(CHECK); 2820 } 2821 } 2822 }; 2823 2824 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2825 // ignored. This is meant to be called during JNI thread detach which assumes 2826 // all remaining monitors are heavyweight. All exceptions are swallowed. 2827 // Scanning the extant monitor list can be time consuming. 2828 // A simple optimization is to add a per-thread flag that indicates a thread 2829 // called jni_monitorenter() during its lifetime. 2830 // 2831 // Instead of No_Savepoint_Verifier it might be cheaper to 2832 // use an idiom of the form: 2833 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2834 // <code that must not run at safepoint> 2835 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2836 // Since the tests are extremely cheap we could leave them enabled 2837 // for normal product builds. 2838 2839 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2840 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2841 NoSafepointVerifier nsv; 2842 ReleaseJavaMonitorsClosure rjmc(THREAD); 2843 ObjectSynchronizer::monitors_iterate(&rjmc); 2844 THREAD->clear_pending_exception(); 2845 } 2846 2847 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2848 switch (cause) { 2849 case inflate_cause_vm_internal: return "VM Internal"; 2850 case inflate_cause_monitor_enter: return "Monitor Enter"; 2851 case inflate_cause_wait: return "Monitor Wait"; 2852 case inflate_cause_notify: return "Monitor Notify"; 2853 case inflate_cause_hash_code: return "Monitor Hash Code"; 2854 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2855 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2856 default: 2857 ShouldNotReachHere(); 2858 } 2859 return "Unknown"; 2860 } 2861 2862 //------------------------------------------------------------------------------ 2863 // Debugging code 2864 2865 u_char* ObjectSynchronizer::get_gvars_addr() { 2866 return (u_char*)&GVars; 2867 } 2868 2869 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2870 return (u_char*)&GVars.hc_sequence; 2871 } 2872 2873 size_t ObjectSynchronizer::get_gvars_size() { 2874 return sizeof(SharedGlobals); 2875 } 2876 2877 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2878 return (u_char*)&GVars.stw_random; 2879 } 2880 2881 // This function can be called at a safepoint or it can be called when 2882 // we are trying to exit the VM. When we are trying to exit the VM, the 2883 // list walker functions can run in parallel with the other list 2884 // operations so spin-locking is used for safety. 2885 // 2886 // Calls to this function can be added in various places as a debugging 2887 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor 2888 // details logged at the Info level and 'false' for the 'on_exit' 2889 // parameter to have in-use monitor details logged at the Trace level. 2890 // deflate_monitor_list() no longer uses spin-locking so be careful 2891 // when adding audit_and_print_stats() calls at a safepoint. 2892 // 2893 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2894 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2895 2896 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2897 LogStreamHandle(Info, monitorinflation) lsh_info; 2898 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2899 LogStream* ls = NULL; 2900 if (log_is_enabled(Trace, monitorinflation)) { 2901 ls = &lsh_trace; 2902 } else if (log_is_enabled(Debug, monitorinflation)) { 2903 ls = &lsh_debug; 2904 } else if (log_is_enabled(Info, monitorinflation)) { 2905 ls = &lsh_info; 2906 } 2907 assert(ls != NULL, "sanity check"); 2908 2909 // Log counts for the global and per-thread monitor lists: 2910 int chk_om_population = log_monitor_list_counts(ls); 2911 int error_cnt = 0; 2912 2913 ls->print_cr("Checking global lists:"); 2914 2915 // Check om_list_globals._population: 2916 if (Atomic::load(&om_list_globals._population) == chk_om_population) { 2917 ls->print_cr("global_population=%d equals chk_om_population=%d", 2918 Atomic::load(&om_list_globals._population), chk_om_population); 2919 } else { 2920 // With fine grained locks on the monitor lists, it is possible for 2921 // log_monitor_list_counts() to return a value that doesn't match 2922 // om_list_globals._population. So far a higher value has been 2923 // seen in testing so something is being double counted by 2924 // log_monitor_list_counts(). 2925 ls->print_cr("WARNING: global_population=%d is not equal to " 2926 "chk_om_population=%d", 2927 Atomic::load(&om_list_globals._population), chk_om_population); 2928 } 2929 2930 // Check om_list_globals._in_use_list and om_list_globals._in_use_count: 2931 chk_global_in_use_list_and_count(ls, &error_cnt); 2932 2933 // Check om_list_globals._free_list and om_list_globals._free_count: 2934 chk_global_free_list_and_count(ls, &error_cnt); 2935 2936 // Check om_list_globals._wait_list and om_list_globals._wait_count: 2937 chk_global_wait_list_and_count(ls, &error_cnt); 2938 2939 ls->print_cr("Checking per-thread lists:"); 2940 2941 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2942 // Check om_in_use_list and om_in_use_count: 2943 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2944 2945 // Check om_free_list and om_free_count: 2946 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2947 } 2948 2949 if (error_cnt == 0) { 2950 ls->print_cr("No errors found in monitor list checks."); 2951 } else { 2952 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2953 } 2954 2955 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2956 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2957 // When exiting this log output is at the Info level. When called 2958 // at a safepoint, this log output is at the Trace level since 2959 // there can be a lot of it. 2960 log_in_use_monitor_details(ls); 2961 } 2962 2963 ls->flush(); 2964 2965 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2966 } 2967 2968 // Check a free monitor entry; log any errors. 2969 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 2970 outputStream * out, int *error_cnt_p) { 2971 stringStream ss; 2972 if (n->is_busy()) { 2973 if (jt != NULL) { 2974 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2975 ": free per-thread monitor must not be busy: %s", p2i(jt), 2976 p2i(n), n->is_busy_to_string(&ss)); 2977 } else { 2978 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2979 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 2980 } 2981 *error_cnt_p = *error_cnt_p + 1; 2982 } 2983 if (n->header().value() != 0) { 2984 if (jt != NULL) { 2985 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2986 ": free per-thread monitor must have NULL _header " 2987 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 2988 n->header().value()); 2989 *error_cnt_p = *error_cnt_p + 1; 2990 } else if (!AsyncDeflateIdleMonitors) { 2991 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2992 "must have NULL _header field: _header=" INTPTR_FORMAT, 2993 p2i(n), n->header().value()); 2994 *error_cnt_p = *error_cnt_p + 1; 2995 } 2996 } 2997 if (n->object() != NULL) { 2998 if (jt != NULL) { 2999 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3000 ": free per-thread monitor must have NULL _object " 3001 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 3002 p2i(n->object())); 3003 } else { 3004 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3005 "must have NULL _object field: _object=" INTPTR_FORMAT, 3006 p2i(n), p2i(n->object())); 3007 } 3008 *error_cnt_p = *error_cnt_p + 1; 3009 } 3010 } 3011 3012 // Lock the next ObjectMonitor for traversal and unlock the current 3013 // ObjectMonitor. Returns the next ObjectMonitor if there is one. 3014 // Otherwise returns NULL (after unlocking the current ObjectMonitor). 3015 // This function is used by the various list walker functions to 3016 // safely walk a list without allowing an ObjectMonitor to be moved 3017 // to another list in the middle of a walk. 3018 static ObjectMonitor* lock_next_for_traversal(ObjectMonitor* cur) { 3019 assert(is_locked(cur), "cur=" INTPTR_FORMAT " must be locked", p2i(cur)); 3020 ObjectMonitor* next = unmarked_next(cur); 3021 if (next == NULL) { // Reached the end of the list. 3022 om_unlock(cur); 3023 return NULL; 3024 } 3025 om_lock(next); // Lock next before unlocking current to keep 3026 om_unlock(cur); // from being by-passed by another thread. 3027 return next; 3028 } 3029 3030 // Check the global free list and count; log the results of the checks. 3031 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 3032 int *error_cnt_p) { 3033 int chk_om_free_count = 0; 3034 ObjectMonitor* cur = NULL; 3035 if ((cur = get_list_head_locked(&om_list_globals._free_list)) != NULL) { 3036 // Marked the global free list head so process the list. 3037 while (true) { 3038 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3039 chk_om_free_count++; 3040 3041 cur = lock_next_for_traversal(cur); 3042 if (cur == NULL) { 3043 break; 3044 } 3045 } 3046 } 3047 int l_free_count = Atomic::load(&om_list_globals._free_count); 3048 if (l_free_count == chk_om_free_count) { 3049 out->print_cr("global_free_count=%d equals chk_om_free_count=%d", 3050 l_free_count, chk_om_free_count); 3051 } else { 3052 // With fine grained locks on om_list_globals._free_list, it 3053 // is possible for an ObjectMonitor to be prepended to 3054 // om_list_globals._free_list after we started calculating 3055 // chk_om_free_count so om_list_globals._free_count may not 3056 // match anymore. 3057 out->print_cr("WARNING: global_free_count=%d is not equal to " 3058 "chk_om_free_count=%d", l_free_count, chk_om_free_count); 3059 } 3060 } 3061 3062 // Check the global wait list and count; log the results of the checks. 3063 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, 3064 int *error_cnt_p) { 3065 int chk_om_wait_count = 0; 3066 ObjectMonitor* cur = NULL; 3067 if ((cur = get_list_head_locked(&om_list_globals._wait_list)) != NULL) { 3068 // Marked the global wait list head so process the list. 3069 while (true) { 3070 // Rules for om_list_globals._wait_list are the same as for 3071 // om_list_globals._free_list: 3072 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3073 chk_om_wait_count++; 3074 3075 cur = lock_next_for_traversal(cur); 3076 if (cur == NULL) { 3077 break; 3078 } 3079 } 3080 } 3081 if (Atomic::load(&om_list_globals._wait_count) == chk_om_wait_count) { 3082 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d", 3083 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3084 } else { 3085 out->print_cr("ERROR: global_wait_count=%d is not equal to " 3086 "chk_om_wait_count=%d", 3087 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3088 *error_cnt_p = *error_cnt_p + 1; 3089 } 3090 } 3091 3092 // Check the global in-use list and count; log the results of the checks. 3093 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 3094 int *error_cnt_p) { 3095 int chk_om_in_use_count = 0; 3096 ObjectMonitor* cur = NULL; 3097 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3098 // Marked the global in-use list head so process the list. 3099 while (true) { 3100 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); 3101 chk_om_in_use_count++; 3102 3103 cur = lock_next_for_traversal(cur); 3104 if (cur == NULL) { 3105 break; 3106 } 3107 } 3108 } 3109 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3110 if (l_in_use_count == chk_om_in_use_count) { 3111 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", 3112 l_in_use_count, chk_om_in_use_count); 3113 } else { 3114 // With fine grained locks on the monitor lists, it is possible for 3115 // an exiting JavaThread to put its in-use ObjectMonitors on the 3116 // global in-use list after chk_om_in_use_count is calculated above. 3117 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", 3118 l_in_use_count, chk_om_in_use_count); 3119 } 3120 } 3121 3122 // Check an in-use monitor entry; log any errors. 3123 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 3124 outputStream * out, int *error_cnt_p) { 3125 if (n->header().value() == 0) { 3126 if (jt != NULL) { 3127 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3128 ": in-use per-thread monitor must have non-NULL _header " 3129 "field.", p2i(jt), p2i(n)); 3130 } else { 3131 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3132 "must have non-NULL _header field.", p2i(n)); 3133 } 3134 *error_cnt_p = *error_cnt_p + 1; 3135 } 3136 if (n->object() == NULL) { 3137 if (jt != NULL) { 3138 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3139 ": in-use per-thread monitor must have non-NULL _object " 3140 "field.", p2i(jt), p2i(n)); 3141 } else { 3142 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3143 "must have non-NULL _object field.", p2i(n)); 3144 } 3145 *error_cnt_p = *error_cnt_p + 1; 3146 } 3147 const oop obj = (oop)n->object(); 3148 const markWord mark = obj->mark(); 3149 if (!mark.has_monitor()) { 3150 if (jt != NULL) { 3151 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3152 ": in-use per-thread monitor's object does not think " 3153 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 3154 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 3155 } else { 3156 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3157 "monitor's object does not think it has a monitor: obj=" 3158 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 3159 p2i(obj), mark.value()); 3160 } 3161 *error_cnt_p = *error_cnt_p + 1; 3162 } 3163 ObjectMonitor* const obj_mon = mark.monitor(); 3164 if (n != obj_mon) { 3165 if (jt != NULL) { 3166 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3167 ": in-use per-thread monitor's object does not refer " 3168 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 3169 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 3170 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3171 } else { 3172 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3173 "monitor's object does not refer to the same monitor: obj=" 3174 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 3175 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3176 } 3177 *error_cnt_p = *error_cnt_p + 1; 3178 } 3179 } 3180 3181 // Check the thread's free list and count; log the results of the checks. 3182 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 3183 outputStream * out, 3184 int *error_cnt_p) { 3185 int chk_om_free_count = 0; 3186 ObjectMonitor* cur = NULL; 3187 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { 3188 // Marked the per-thread free list head so process the list. 3189 while (true) { 3190 chk_free_entry(jt, cur, out, error_cnt_p); 3191 chk_om_free_count++; 3192 3193 cur = lock_next_for_traversal(cur); 3194 if (cur == NULL) { 3195 break; 3196 } 3197 } 3198 } 3199 int l_om_free_count = Atomic::load(&jt->om_free_count); 3200 if (l_om_free_count == chk_om_free_count) { 3201 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 3202 "chk_om_free_count=%d", p2i(jt), l_om_free_count, chk_om_free_count); 3203 } else { 3204 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 3205 "equal to chk_om_free_count=%d", p2i(jt), l_om_free_count, 3206 chk_om_free_count); 3207 *error_cnt_p = *error_cnt_p + 1; 3208 } 3209 } 3210 3211 // Check the thread's in-use list and count; log the results of the checks. 3212 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 3213 outputStream * out, 3214 int *error_cnt_p) { 3215 int chk_om_in_use_count = 0; 3216 ObjectMonitor* cur = NULL; 3217 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3218 // Marked the per-thread in-use list head so process the list. 3219 while (true) { 3220 chk_in_use_entry(jt, cur, out, error_cnt_p); 3221 chk_om_in_use_count++; 3222 3223 cur = lock_next_for_traversal(cur); 3224 if (cur == NULL) { 3225 break; 3226 } 3227 } 3228 } 3229 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3230 if (l_om_in_use_count == chk_om_in_use_count) { 3231 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 3232 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3233 chk_om_in_use_count); 3234 } else { 3235 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 3236 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3237 chk_om_in_use_count); 3238 *error_cnt_p = *error_cnt_p + 1; 3239 } 3240 } 3241 3242 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 3243 // flags indicate why the entry is in-use, 'object' and 'object type' 3244 // indicate the associated object and its type. 3245 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { 3246 stringStream ss; 3247 if (Atomic::load(&om_list_globals._in_use_count) > 0) { 3248 out->print_cr("In-use global monitor info:"); 3249 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3250 out->print_cr("%18s %s %18s %18s", 3251 "monitor", "BHL", "object", "object type"); 3252 out->print_cr("================== === ================== =================="); 3253 ObjectMonitor* cur = NULL; 3254 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3255 // Marked the global in-use list head so process the list. 3256 while (true) { 3257 const oop obj = (oop) cur->object(); 3258 const markWord mark = cur->header(); 3259 ResourceMark rm; 3260 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(cur), 3261 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL, 3262 p2i(obj), obj->klass()->external_name()); 3263 if (cur->is_busy() != 0) { 3264 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3265 ss.reset(); 3266 } 3267 out->cr(); 3268 3269 cur = lock_next_for_traversal(cur); 3270 if (cur == NULL) { 3271 break; 3272 } 3273 } 3274 } 3275 } 3276 3277 out->print_cr("In-use per-thread monitor info:"); 3278 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3279 out->print_cr("%18s %18s %s %18s %18s", 3280 "jt", "monitor", "BHL", "object", "object type"); 3281 out->print_cr("================== ================== === ================== =================="); 3282 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3283 ObjectMonitor* cur = NULL; 3284 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3285 // Marked the global in-use list head so process the list. 3286 while (true) { 3287 const oop obj = (oop) cur->object(); 3288 const markWord mark = cur->header(); 3289 ResourceMark rm; 3290 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT 3291 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, 3292 mark.hash() != 0, cur->owner() != NULL, p2i(obj), 3293 obj->klass()->external_name()); 3294 if (cur->is_busy() != 0) { 3295 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3296 ss.reset(); 3297 } 3298 out->cr(); 3299 3300 cur = lock_next_for_traversal(cur); 3301 if (cur == NULL) { 3302 break; 3303 } 3304 } 3305 } 3306 } 3307 3308 out->flush(); 3309 } 3310 3311 // Log counts for the global and per-thread monitor lists and return 3312 // the population count. 3313 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 3314 int pop_count = 0; 3315 out->print_cr("%18s %10s %10s %10s %10s", 3316 "Global Lists:", "InUse", "Free", "Wait", "Total"); 3317 out->print_cr("================== ========== ========== ========== =========="); 3318 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3319 int l_free_count = Atomic::load(&om_list_globals._free_count); 3320 int l_wait_count = Atomic::load(&om_list_globals._wait_count); 3321 out->print_cr("%18s %10d %10d %10d %10d", "", l_in_use_count, 3322 l_free_count, l_wait_count, 3323 Atomic::load(&om_list_globals._population)); 3324 pop_count += l_in_use_count + l_free_count + l_wait_count; 3325 3326 out->print_cr("%18s %10s %10s %10s", 3327 "Per-Thread Lists:", "InUse", "Free", "Provision"); 3328 out->print_cr("================== ========== ========== =========="); 3329 3330 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3331 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3332 int l_om_free_count = Atomic::load(&jt->om_free_count); 3333 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 3334 l_om_in_use_count, l_om_free_count, jt->om_free_provision); 3335 pop_count += l_om_in_use_count + l_om_free_count; 3336 } 3337 return pop_count; 3338 } 3339 3340 #ifndef PRODUCT 3341 3342 // Check if monitor belongs to the monitor cache 3343 // The list is grow-only so it's *relatively* safe to traverse 3344 // the list of extant blocks without taking a lock. 3345 3346 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 3347 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 3348 while (block != NULL) { 3349 assert(block->object() == CHAINMARKER, "must be a block header"); 3350 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 3351 address mon = (address)monitor; 3352 address blk = (address)block; 3353 size_t diff = mon - blk; 3354 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 3355 return 1; 3356 } 3357 // unmarked_next() is not needed with g_block_list (no locking 3358 // used with block linkage _next_om fields). 3359 block = (PaddedObjectMonitor*)block->next_om(); 3360 } 3361 return 0; 3362 } 3363 3364 #endif