1 /* 2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/handshake.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/objectMonitor.inline.hpp" 45 #include "runtime/osThread.hpp" 46 #include "runtime/safepointMechanism.inline.hpp" 47 #include "runtime/safepointVerifiers.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "runtime/thread.inline.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/align.hpp" 56 #include "utilities/dtrace.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/preserveException.hpp" 59 60 // The "core" versions of monitor enter and exit reside in this file. 61 // The interpreter and compilers contain specialized transliterated 62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 63 // for instance. If you make changes here, make sure to modify the 64 // interpreter, and both C1 and C2 fast-path inline locking code emission. 65 // 66 // ----------------------------------------------------------------------------- 67 68 #ifdef DTRACE_ENABLED 69 70 // Only bother with this argument setup if dtrace is available 71 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 72 73 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 74 char* bytes = NULL; \ 75 int len = 0; \ 76 jlong jtid = SharedRuntime::get_java_tid(thread); \ 77 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 78 if (klassname != NULL) { \ 79 bytes = (char*)klassname->bytes(); \ 80 len = klassname->utf8_length(); \ 81 } 82 83 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 84 { \ 85 if (DTraceMonitorProbes) { \ 86 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 87 HOTSPOT_MONITOR_WAIT(jtid, \ 88 (uintptr_t)(monitor), bytes, len, (millis)); \ 89 } \ 90 } 91 92 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 93 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 94 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 95 96 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 97 { \ 98 if (DTraceMonitorProbes) { \ 99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 100 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 101 (uintptr_t)(monitor), bytes, len); \ 102 } \ 103 } 104 105 #else // ndef DTRACE_ENABLED 106 107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 109 110 #endif // ndef DTRACE_ENABLED 111 112 // This exists only as a workaround of dtrace bug 6254741 113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 115 return 0; 116 } 117 118 #define NINFLATIONLOCKS 256 119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 120 121 // global list of blocks of monitors 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 126 127 struct ObjectMonitorListGlobals { 128 char _pad_prefix[OM_CACHE_LINE_SIZE]; 129 // These are highly shared list related variables. 130 // To avoid false-sharing they need to be the sole occupants of a cache line. 131 132 // Global ObjectMonitor free list. Newly allocated and deflated 133 // ObjectMonitors are prepended here. 134 ObjectMonitor* _free_list; 135 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 136 137 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 138 // ObjectMonitors on its per-thread in-use list are prepended here. 139 ObjectMonitor* _in_use_list; 140 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 141 142 // Global ObjectMonitor wait list. Deflated ObjectMonitors wait on 143 // this list until after a handshake or a safepoint for platforms 144 // that don't support handshakes. After the handshake or safepoint, 145 // the deflated ObjectMonitors are prepended to free_list. 146 ObjectMonitor* _wait_list; 147 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 148 149 int _free_count; // # on free_list 150 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); 151 152 int _in_use_count; // # on in_use_list 153 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); 154 155 int _population; // # Extant -- in circulation 156 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int)); 157 158 int _wait_count; // # on wait_list 159 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int)); 160 }; 161 static ObjectMonitorListGlobals om_list_globals; 162 163 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 164 165 166 // =====================> Spin-lock functions 167 168 // ObjectMonitors are not lockable outside of this file. We use spin-locks 169 // implemented using a bit in the _next_om field instead of the heavier 170 // weight locking mechanisms for faster list management. 171 172 #define OM_LOCK_BIT 0x1 173 174 // Return true if the ObjectMonitor is locked. 175 // Otherwise returns false. 176 static bool is_locked(ObjectMonitor* om) { 177 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT; 178 } 179 180 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. 181 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { 182 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); 183 } 184 185 // Return the unmarked next field in an ObjectMonitor. Note: the next 186 // field may or may not have been marked with OM_LOCK_BIT originally. 187 static ObjectMonitor* unmarked_next(ObjectMonitor* om) { 188 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT); 189 } 190 191 // Try to lock an ObjectMonitor. Returns true if locking was successful. 192 // Otherwise returns false. 193 static bool try_om_lock(ObjectMonitor* om) { 194 // Get current next field without any OM_LOCK_BIT value. 195 ObjectMonitor* next = unmarked_next(om); 196 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) { 197 return false; // Cannot lock the ObjectMonitor. 198 } 199 return true; 200 } 201 202 // Lock an ObjectMonitor. 203 static void om_lock(ObjectMonitor* om) { 204 while (true) { 205 if (try_om_lock(om)) { 206 return; 207 } 208 } 209 } 210 211 // Unlock an ObjectMonitor. 212 static void om_unlock(ObjectMonitor* om) { 213 ObjectMonitor* next = om->next_om(); 214 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT 215 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); 216 217 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. 218 om->set_next_om(next); 219 } 220 221 // Get the list head after locking it. Returns the list head or NULL 222 // if the list is empty. 223 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { 224 while (true) { 225 ObjectMonitor* mid = Atomic::load(list_p); 226 if (mid == NULL) { 227 return NULL; // The list is empty. 228 } 229 if (try_om_lock(mid)) { 230 if (Atomic::load(list_p) != mid) { 231 // The list head changed before we could lock it so we have to retry. 232 om_unlock(mid); 233 continue; 234 } 235 return mid; 236 } 237 } 238 } 239 240 #undef OM_LOCK_BIT 241 242 243 // =====================> List Management functions 244 245 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is 246 // the last ObjectMonitor in the list and there are 'count' on the list. 247 // Also updates the specified *count_p. 248 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, 249 int count, ObjectMonitor** list_p, 250 int* count_p) { 251 while (true) { 252 ObjectMonitor* cur = Atomic::load(list_p); 253 // Prepend list to *list_p. 254 if (!try_om_lock(tail)) { 255 // Failed to lock tail due to a list walker so try it all again. 256 continue; 257 } 258 tail->set_next_om(cur); // tail now points to cur (and unlocks tail) 259 if (cur == NULL) { 260 // No potential race with takers or other prependers since 261 // *list_p is empty. 262 if (Atomic::cmpxchg(list_p, cur, list) == cur) { 263 // Successfully switched *list_p to the list value. 264 Atomic::add(count_p, count); 265 break; 266 } 267 // Implied else: try it all again 268 } else { 269 if (!try_om_lock(cur)) { 270 continue; // failed to lock cur so try it all again 271 } 272 // We locked cur so try to switch *list_p to the list value. 273 if (Atomic::cmpxchg(list_p, cur, list) != cur) { 274 // The list head has changed so unlock cur and try again: 275 om_unlock(cur); 276 continue; 277 } 278 Atomic::add(count_p, count); 279 om_unlock(cur); 280 break; 281 } 282 } 283 } 284 285 // Prepend a newly allocated block of ObjectMonitors to g_block_list and 286 // om_list_globals._free_list. Also updates om_list_globals._population 287 // and om_list_globals._free_count. 288 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { 289 // First we handle g_block_list: 290 while (true) { 291 PaddedObjectMonitor* cur = Atomic::load(&g_block_list); 292 // Prepend new_blk to g_block_list. The first ObjectMonitor in 293 // a block is reserved for use as linkage to the next block. 294 new_blk[0].set_next_om(cur); 295 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { 296 // Successfully switched g_block_list to the new_blk value. 297 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1); 298 break; 299 } 300 // Implied else: try it all again 301 } 302 303 // Second we handle om_list_globals._free_list: 304 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, 305 &om_list_globals._free_list, &om_list_globals._free_count); 306 } 307 308 // Prepend a list of ObjectMonitors to om_list_globals._free_list. 309 // 'tail' is the last ObjectMonitor in the list and there are 'count' 310 // on the list. Also updates om_list_globals._free_count. 311 static void prepend_list_to_global_free_list(ObjectMonitor* list, 312 ObjectMonitor* tail, int count) { 313 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 314 &om_list_globals._free_count); 315 } 316 317 // Prepend a list of ObjectMonitors to om_list_globals._wait_list. 318 // 'tail' is the last ObjectMonitor in the list and there are 'count' 319 // on the list. Also updates om_list_globals._wait_count. 320 static void prepend_list_to_global_wait_list(ObjectMonitor* list, 321 ObjectMonitor* tail, int count) { 322 prepend_list_to_common(list, tail, count, &om_list_globals._wait_list, 323 &om_list_globals._wait_count); 324 } 325 326 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list. 327 // 'tail' is the last ObjectMonitor in the list and there are 'count' 328 // on the list. Also updates om_list_globals._in_use_list. 329 static void prepend_list_to_global_in_use_list(ObjectMonitor* list, 330 ObjectMonitor* tail, int count) { 331 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list, 332 &om_list_globals._in_use_count); 333 } 334 335 // Prepend an ObjectMonitor to the specified list. Also updates 336 // the specified counter. 337 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, 338 int* count_p) { 339 while (true) { 340 om_lock(m); // Lock m so we can safely update its next field. 341 ObjectMonitor* cur = NULL; 342 // Lock the list head to guard against races with a list walker 343 // or async deflater thread (which only races in om_in_use_list): 344 if ((cur = get_list_head_locked(list_p)) != NULL) { 345 // List head is now locked so we can safely switch it. 346 m->set_next_om(cur); // m now points to cur (and unlocks m) 347 Atomic::store(list_p, m); // Switch list head to unlocked m. 348 om_unlock(cur); 349 break; 350 } 351 // The list is empty so try to set the list head. 352 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); 353 m->set_next_om(cur); // m now points to NULL (and unlocks m) 354 if (Atomic::cmpxchg(list_p, cur, m) == cur) { 355 // List head is now unlocked m. 356 break; 357 } 358 // Implied else: try it all again 359 } 360 Atomic::inc(count_p); 361 } 362 363 // Prepend an ObjectMonitor to a per-thread om_free_list. 364 // Also updates the per-thread om_free_count. 365 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) { 366 prepend_to_common(m, &self->om_free_list, &self->om_free_count); 367 } 368 369 // Prepend an ObjectMonitor to a per-thread om_in_use_list. 370 // Also updates the per-thread om_in_use_count. 371 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) { 372 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count); 373 } 374 375 // Take an ObjectMonitor from the start of the specified list. Also 376 // decrements the specified counter. Returns NULL if none are available. 377 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, 378 int* count_p) { 379 ObjectMonitor* take = NULL; 380 // Lock the list head to guard against races with a list walker 381 // or async deflater thread (which only races in om_list_globals._free_list): 382 if ((take = get_list_head_locked(list_p)) == NULL) { 383 return NULL; // None are available. 384 } 385 ObjectMonitor* next = unmarked_next(take); 386 // Switch locked list head to next (which unlocks the list head, but 387 // leaves take locked): 388 Atomic::store(list_p, next); 389 Atomic::dec(count_p); 390 // Unlock take, but leave the next value for any lagging list 391 // walkers. It will get cleaned up when take is prepended to 392 // the in-use list: 393 om_unlock(take); 394 return take; 395 } 396 397 // Take an ObjectMonitor from the start of the om_list_globals._free_list. 398 // Also updates om_list_globals._free_count. Returns NULL if none are 399 // available. 400 static ObjectMonitor* take_from_start_of_global_free_list() { 401 return take_from_start_of_common(&om_list_globals._free_list, 402 &om_list_globals._free_count); 403 } 404 405 // Take an ObjectMonitor from the start of a per-thread free-list. 406 // Also updates om_free_count. Returns NULL if none are available. 407 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) { 408 return take_from_start_of_common(&self->om_free_list, &self->om_free_count); 409 } 410 411 412 // =====================> Quick functions 413 414 // The quick_* forms are special fast-path variants used to improve 415 // performance. In the simplest case, a "quick_*" implementation could 416 // simply return false, in which case the caller will perform the necessary 417 // state transitions and call the slow-path form. 418 // The fast-path is designed to handle frequently arising cases in an efficient 419 // manner and is just a degenerate "optimistic" variant of the slow-path. 420 // returns true -- to indicate the call was satisfied. 421 // returns false -- to indicate the call needs the services of the slow-path. 422 // A no-loitering ordinance is in effect for code in the quick_* family 423 // operators: safepoints or indefinite blocking (blocking that might span a 424 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 425 // entry. 426 // 427 // Consider: An interesting optimization is to have the JIT recognize the 428 // following common idiom: 429 // synchronized (someobj) { .... ; notify(); } 430 // That is, we find a notify() or notifyAll() call that immediately precedes 431 // the monitorexit operation. In that case the JIT could fuse the operations 432 // into a single notifyAndExit() runtime primitive. 433 434 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 435 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 436 assert(self->is_Java_thread(), "invariant"); 437 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 438 NoSafepointVerifier nsv; 439 if (obj == NULL) return false; // slow-path for invalid obj 440 const markWord mark = obj->mark(); 441 442 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 443 // Degenerate notify 444 // stack-locked by caller so by definition the implied waitset is empty. 445 return true; 446 } 447 448 if (mark.has_monitor()) { 449 ObjectMonitor* const mon = mark.monitor(); 450 assert(mon->object() == obj, "invariant"); 451 if (mon->owner() != self) return false; // slow-path for IMS exception 452 453 if (mon->first_waiter() != NULL) { 454 // We have one or more waiters. Since this is an inflated monitor 455 // that we own, we can transfer one or more threads from the waitset 456 // to the entrylist here and now, avoiding the slow-path. 457 if (all) { 458 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 459 } else { 460 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 461 } 462 int free_count = 0; 463 do { 464 mon->INotify(self); 465 ++free_count; 466 } while (mon->first_waiter() != NULL && all); 467 OM_PERFDATA_OP(Notifications, inc(free_count)); 468 } 469 return true; 470 } 471 472 // biased locking and any other IMS exception states take the slow-path 473 return false; 474 } 475 476 477 // The LockNode emitted directly at the synchronization site would have 478 // been too big if it were to have included support for the cases of inflated 479 // recursive enter and exit, so they go here instead. 480 // Note that we can't safely call AsyncPrintJavaStack() from within 481 // quick_enter() as our thread state remains _in_Java. 482 483 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 484 BasicLock * lock) { 485 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 486 assert(self->is_Java_thread(), "invariant"); 487 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 488 NoSafepointVerifier nsv; 489 if (obj == NULL) return false; // Need to throw NPE 490 491 const markWord mark = obj->mark(); 492 493 if (mark.has_monitor()) { 494 ObjectMonitor* const m = mark.monitor(); 495 if (AsyncDeflateIdleMonitors) { 496 // An async deflation can race us before we manage to make the 497 // ObjectMonitor busy by setting the owner below. If we detect 498 // that race we just bail out to the slow-path here. 499 if (m->object() == NULL) { 500 return false; 501 } 502 } else { 503 assert(m->object() == obj, "invariant"); 504 } 505 Thread* const owner = (Thread *) m->_owner; 506 507 // Lock contention and Transactional Lock Elision (TLE) diagnostics 508 // and observability 509 // Case: light contention possibly amenable to TLE 510 // Case: TLE inimical operations such as nested/recursive synchronization 511 512 if (owner == self) { 513 m->_recursions++; 514 return true; 515 } 516 517 // This Java Monitor is inflated so obj's header will never be 518 // displaced to this thread's BasicLock. Make the displaced header 519 // non-NULL so this BasicLock is not seen as recursive nor as 520 // being locked. We do this unconditionally so that this thread's 521 // BasicLock cannot be mis-interpreted by any stack walkers. For 522 // performance reasons, stack walkers generally first check for 523 // Biased Locking in the object's header, the second check is for 524 // stack-locking in the object's header, the third check is for 525 // recursive stack-locking in the displaced header in the BasicLock, 526 // and last are the inflated Java Monitor (ObjectMonitor) checks. 527 lock->set_displaced_header(markWord::unused_mark()); 528 529 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) { 530 assert(m->_recursions == 0, "invariant"); 531 return true; 532 } 533 } 534 535 // Note that we could inflate in quick_enter. 536 // This is likely a useful optimization 537 // Critically, in quick_enter() we must not: 538 // -- perform bias revocation, or 539 // -- block indefinitely, or 540 // -- reach a safepoint 541 542 return false; // revert to slow-path 543 } 544 545 // ----------------------------------------------------------------------------- 546 // Monitor Enter/Exit 547 // The interpreter and compiler assembly code tries to lock using the fast path 548 // of this algorithm. Make sure to update that code if the following function is 549 // changed. The implementation is extremely sensitive to race condition. Be careful. 550 551 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 552 if (UseBiasedLocking) { 553 if (!SafepointSynchronize::is_at_safepoint()) { 554 BiasedLocking::revoke(obj, THREAD); 555 } else { 556 BiasedLocking::revoke_at_safepoint(obj); 557 } 558 } 559 560 markWord mark = obj->mark(); 561 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 562 563 if (mark.is_neutral()) { 564 // Anticipate successful CAS -- the ST of the displaced mark must 565 // be visible <= the ST performed by the CAS. 566 lock->set_displaced_header(mark); 567 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 568 return; 569 } 570 // Fall through to inflate() ... 571 } else if (mark.has_locker() && 572 THREAD->is_lock_owned((address)mark.locker())) { 573 assert(lock != mark.locker(), "must not re-lock the same lock"); 574 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 575 lock->set_displaced_header(markWord::from_pointer(NULL)); 576 return; 577 } 578 579 // The object header will never be displaced to this lock, 580 // so it does not matter what the value is, except that it 581 // must be non-zero to avoid looking like a re-entrant lock, 582 // and must not look locked either. 583 lock->set_displaced_header(markWord::unused_mark()); 584 // An async deflation can race after the inflate() call and before 585 // enter() can make the ObjectMonitor busy. enter() returns false if 586 // we have lost the race to async deflation and we simply try again. 587 while (true) { 588 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_monitor_enter); 589 if (monitor->enter(THREAD)) { 590 return; 591 } 592 } 593 } 594 595 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 596 markWord mark = object->mark(); 597 // We cannot check for Biased Locking if we are racing an inflation. 598 assert(mark == markWord::INFLATING() || 599 !mark.has_bias_pattern(), "should not see bias pattern here"); 600 601 markWord dhw = lock->displaced_header(); 602 if (dhw.value() == 0) { 603 // If the displaced header is NULL, then this exit matches up with 604 // a recursive enter. No real work to do here except for diagnostics. 605 #ifndef PRODUCT 606 if (mark != markWord::INFLATING()) { 607 // Only do diagnostics if we are not racing an inflation. Simply 608 // exiting a recursive enter of a Java Monitor that is being 609 // inflated is safe; see the has_monitor() comment below. 610 assert(!mark.is_neutral(), "invariant"); 611 assert(!mark.has_locker() || 612 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 613 if (mark.has_monitor()) { 614 // The BasicLock's displaced_header is marked as a recursive 615 // enter and we have an inflated Java Monitor (ObjectMonitor). 616 // This is a special case where the Java Monitor was inflated 617 // after this thread entered the stack-lock recursively. When a 618 // Java Monitor is inflated, we cannot safely walk the Java 619 // Monitor owner's stack and update the BasicLocks because a 620 // Java Monitor can be asynchronously inflated by a thread that 621 // does not own the Java Monitor. 622 ObjectMonitor* m = mark.monitor(); 623 assert(((oop)(m->object()))->mark() == mark, "invariant"); 624 assert(m->is_entered(THREAD), "invariant"); 625 } 626 } 627 #endif 628 return; 629 } 630 631 if (mark == markWord::from_pointer(lock)) { 632 // If the object is stack-locked by the current thread, try to 633 // swing the displaced header from the BasicLock back to the mark. 634 assert(dhw.is_neutral(), "invariant"); 635 if (object->cas_set_mark(dhw, mark) == mark) { 636 return; 637 } 638 } 639 640 // We have to take the slow-path of possible inflation and then exit. 641 // The ObjectMonitor* can't be async deflated until ownership is 642 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 643 ObjectMonitor* monitor = inflate(THREAD, object, inflate_cause_vm_internal); 644 monitor->exit(true, THREAD); 645 } 646 647 // ----------------------------------------------------------------------------- 648 // Class Loader support to workaround deadlocks on the class loader lock objects 649 // Also used by GC 650 // complete_exit()/reenter() are used to wait on a nested lock 651 // i.e. to give up an outer lock completely and then re-enter 652 // Used when holding nested locks - lock acquisition order: lock1 then lock2 653 // 1) complete_exit lock1 - saving recursion count 654 // 2) wait on lock2 655 // 3) when notified on lock2, unlock lock2 656 // 4) reenter lock1 with original recursion count 657 // 5) lock lock2 658 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 659 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 660 if (UseBiasedLocking) { 661 BiasedLocking::revoke(obj, THREAD); 662 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 663 } 664 665 // The ObjectMonitor* can't be async deflated until ownership is 666 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 667 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 668 intptr_t ret_code = monitor->complete_exit(THREAD); 669 return ret_code; 670 } 671 672 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 673 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) { 674 if (UseBiasedLocking) { 675 BiasedLocking::revoke(obj, THREAD); 676 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 677 } 678 679 // An async deflation can race after the inflate() call and before 680 // reenter() -> enter() can make the ObjectMonitor busy. reenter() -> 681 // enter() returns false if we have lost the race to async deflation 682 // and we simply try again. 683 while (true) { 684 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 685 if (monitor->reenter(recursions, THREAD)) { 686 return; 687 } 688 } 689 } 690 691 // ----------------------------------------------------------------------------- 692 // JNI locks on java objects 693 // NOTE: must use heavy weight monitor to handle jni monitor enter 694 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 695 // the current locking is from JNI instead of Java code 696 if (UseBiasedLocking) { 697 BiasedLocking::revoke(obj, THREAD); 698 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 699 } 700 THREAD->set_current_pending_monitor_is_from_java(false); 701 // An async deflation can race after the inflate() call and before 702 // enter() can make the ObjectMonitor busy. enter() returns false if 703 // we have lost the race to async deflation and we simply try again. 704 while (true) { 705 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_jni_enter); 706 if (monitor->enter(THREAD)) { 707 break; 708 } 709 } 710 THREAD->set_current_pending_monitor_is_from_java(true); 711 } 712 713 // NOTE: must use heavy weight monitor to handle jni monitor exit 714 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 715 if (UseBiasedLocking) { 716 Handle h_obj(THREAD, obj); 717 BiasedLocking::revoke(h_obj, THREAD); 718 obj = h_obj(); 719 } 720 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 721 722 // The ObjectMonitor* can't be async deflated until ownership is 723 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 724 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); 725 // If this thread has locked the object, exit the monitor. We 726 // intentionally do not use CHECK here because we must exit the 727 // monitor even if an exception is pending. 728 if (monitor->check_owner(THREAD)) { 729 monitor->exit(true, THREAD); 730 } 731 } 732 733 // ----------------------------------------------------------------------------- 734 // Internal VM locks on java objects 735 // standard constructor, allows locking failures 736 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 737 _dolock = do_lock; 738 _thread = thread; 739 _thread->check_for_valid_safepoint_state(); 740 _obj = obj; 741 742 if (_dolock) { 743 ObjectSynchronizer::enter(_obj, &_lock, _thread); 744 } 745 } 746 747 ObjectLocker::~ObjectLocker() { 748 if (_dolock) { 749 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 750 } 751 } 752 753 754 // ----------------------------------------------------------------------------- 755 // Wait/Notify/NotifyAll 756 // NOTE: must use heavy weight monitor to handle wait() 757 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 758 if (UseBiasedLocking) { 759 BiasedLocking::revoke(obj, THREAD); 760 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 761 } 762 if (millis < 0) { 763 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 764 } 765 // The ObjectMonitor* can't be async deflated because the _waiters 766 // field is incremented before ownership is dropped and decremented 767 // after ownership is regained. 768 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 769 770 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 771 monitor->wait(millis, true, THREAD); 772 773 // This dummy call is in place to get around dtrace bug 6254741. Once 774 // that's fixed we can uncomment the following line, remove the call 775 // and change this function back into a "void" func. 776 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 777 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 778 return ret_code; 779 } 780 781 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 782 if (UseBiasedLocking) { 783 BiasedLocking::revoke(obj, THREAD); 784 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 785 } 786 if (millis < 0) { 787 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 788 } 789 // The ObjectMonitor* can't be async deflated because the _waiters 790 // field is incremented before ownership is dropped and decremented 791 // after ownership is regained. 792 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 793 monitor->wait(millis, false, THREAD); 794 } 795 796 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 797 if (UseBiasedLocking) { 798 BiasedLocking::revoke(obj, THREAD); 799 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 800 } 801 802 markWord mark = obj->mark(); 803 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 804 return; 805 } 806 // The ObjectMonitor* can't be async deflated until ownership is 807 // dropped by the calling thread. 808 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 809 monitor->notify(THREAD); 810 } 811 812 // NOTE: see comment of notify() 813 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 814 if (UseBiasedLocking) { 815 BiasedLocking::revoke(obj, THREAD); 816 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 817 } 818 819 markWord mark = obj->mark(); 820 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 821 return; 822 } 823 // The ObjectMonitor* can't be async deflated until ownership is 824 // dropped by the calling thread. 825 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 826 monitor->notifyAll(THREAD); 827 } 828 829 // ----------------------------------------------------------------------------- 830 // Hash Code handling 831 // 832 // Performance concern: 833 // OrderAccess::storestore() calls release() which at one time stored 0 834 // into the global volatile OrderAccess::dummy variable. This store was 835 // unnecessary for correctness. Many threads storing into a common location 836 // causes considerable cache migration or "sloshing" on large SMP systems. 837 // As such, I avoided using OrderAccess::storestore(). In some cases 838 // OrderAccess::fence() -- which incurs local latency on the executing 839 // processor -- is a better choice as it scales on SMP systems. 840 // 841 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 842 // a discussion of coherency costs. Note that all our current reference 843 // platforms provide strong ST-ST order, so the issue is moot on IA32, 844 // x64, and SPARC. 845 // 846 // As a general policy we use "volatile" to control compiler-based reordering 847 // and explicit fences (barriers) to control for architectural reordering 848 // performed by the CPU(s) or platform. 849 850 struct SharedGlobals { 851 char _pad_prefix[OM_CACHE_LINE_SIZE]; 852 // These are highly shared mostly-read variables. 853 // To avoid false-sharing they need to be the sole occupants of a cache line. 854 volatile int stw_random; 855 volatile int stw_cycle; 856 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 857 // Hot RW variable -- Sequester to avoid false-sharing 858 volatile int hc_sequence; 859 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 860 }; 861 862 static SharedGlobals GVars; 863 864 static markWord read_stable_mark(oop obj) { 865 markWord mark = obj->mark(); 866 if (!mark.is_being_inflated()) { 867 return mark; // normal fast-path return 868 } 869 870 int its = 0; 871 for (;;) { 872 markWord mark = obj->mark(); 873 if (!mark.is_being_inflated()) { 874 return mark; // normal fast-path return 875 } 876 877 // The object is being inflated by some other thread. 878 // The caller of read_stable_mark() must wait for inflation to complete. 879 // Avoid live-lock 880 // TODO: consider calling SafepointSynchronize::do_call_back() while 881 // spinning to see if there's a safepoint pending. If so, immediately 882 // yielding or blocking would be appropriate. Avoid spinning while 883 // there is a safepoint pending. 884 // TODO: add inflation contention performance counters. 885 // TODO: restrict the aggregate number of spinners. 886 887 ++its; 888 if (its > 10000 || !os::is_MP()) { 889 if (its & 1) { 890 os::naked_yield(); 891 } else { 892 // Note that the following code attenuates the livelock problem but is not 893 // a complete remedy. A more complete solution would require that the inflating 894 // thread hold the associated inflation lock. The following code simply restricts 895 // the number of spinners to at most one. We'll have N-2 threads blocked 896 // on the inflationlock, 1 thread holding the inflation lock and using 897 // a yield/park strategy, and 1 thread in the midst of inflation. 898 // A more refined approach would be to change the encoding of INFLATING 899 // to allow encapsulation of a native thread pointer. Threads waiting for 900 // inflation to complete would use CAS to push themselves onto a singly linked 901 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 902 // and calling park(). When inflation was complete the thread that accomplished inflation 903 // would detach the list and set the markword to inflated with a single CAS and 904 // then for each thread on the list, set the flag and unpark() the thread. 905 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 906 // wakes at most one thread whereas we need to wake the entire list. 907 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 908 int YieldThenBlock = 0; 909 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 910 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 911 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 912 while (obj->mark() == markWord::INFLATING()) { 913 // Beware: NakedYield() is advisory and has almost no effect on some platforms 914 // so we periodically call self->_ParkEvent->park(1). 915 // We use a mixed spin/yield/block mechanism. 916 if ((YieldThenBlock++) >= 16) { 917 Thread::current()->_ParkEvent->park(1); 918 } else { 919 os::naked_yield(); 920 } 921 } 922 Thread::muxRelease(gInflationLocks + ix); 923 } 924 } else { 925 SpinPause(); // SMP-polite spinning 926 } 927 } 928 } 929 930 // hashCode() generation : 931 // 932 // Possibilities: 933 // * MD5Digest of {obj,stw_random} 934 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 935 // * A DES- or AES-style SBox[] mechanism 936 // * One of the Phi-based schemes, such as: 937 // 2654435761 = 2^32 * Phi (golden ratio) 938 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 939 // * A variation of Marsaglia's shift-xor RNG scheme. 940 // * (obj ^ stw_random) is appealing, but can result 941 // in undesirable regularity in the hashCode values of adjacent objects 942 // (objects allocated back-to-back, in particular). This could potentially 943 // result in hashtable collisions and reduced hashtable efficiency. 944 // There are simple ways to "diffuse" the middle address bits over the 945 // generated hashCode values: 946 947 static inline intptr_t get_next_hash(Thread* self, oop obj) { 948 intptr_t value = 0; 949 if (hashCode == 0) { 950 // This form uses global Park-Miller RNG. 951 // On MP system we'll have lots of RW access to a global, so the 952 // mechanism induces lots of coherency traffic. 953 value = os::random(); 954 } else if (hashCode == 1) { 955 // This variation has the property of being stable (idempotent) 956 // between STW operations. This can be useful in some of the 1-0 957 // synchronization schemes. 958 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 959 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 960 } else if (hashCode == 2) { 961 value = 1; // for sensitivity testing 962 } else if (hashCode == 3) { 963 value = ++GVars.hc_sequence; 964 } else if (hashCode == 4) { 965 value = cast_from_oop<intptr_t>(obj); 966 } else { 967 // Marsaglia's xor-shift scheme with thread-specific state 968 // This is probably the best overall implementation -- we'll 969 // likely make this the default in future releases. 970 unsigned t = self->_hashStateX; 971 t ^= (t << 11); 972 self->_hashStateX = self->_hashStateY; 973 self->_hashStateY = self->_hashStateZ; 974 self->_hashStateZ = self->_hashStateW; 975 unsigned v = self->_hashStateW; 976 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 977 self->_hashStateW = v; 978 value = v; 979 } 980 981 value &= markWord::hash_mask; 982 if (value == 0) value = 0xBAD; 983 assert(value != markWord::no_hash, "invariant"); 984 return value; 985 } 986 987 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 988 if (UseBiasedLocking) { 989 // NOTE: many places throughout the JVM do not expect a safepoint 990 // to be taken here, in particular most operations on perm gen 991 // objects. However, we only ever bias Java instances and all of 992 // the call sites of identity_hash that might revoke biases have 993 // been checked to make sure they can handle a safepoint. The 994 // added check of the bias pattern is to avoid useless calls to 995 // thread-local storage. 996 if (obj->mark().has_bias_pattern()) { 997 // Handle for oop obj in case of STW safepoint 998 Handle hobj(self, obj); 999 // Relaxing assertion for bug 6320749. 1000 assert(Universe::verify_in_progress() || 1001 !SafepointSynchronize::is_at_safepoint(), 1002 "biases should not be seen by VM thread here"); 1003 BiasedLocking::revoke(hobj, JavaThread::current()); 1004 obj = hobj(); 1005 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1006 } 1007 } 1008 1009 // hashCode() is a heap mutator ... 1010 // Relaxing assertion for bug 6320749. 1011 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1012 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1013 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1014 self->is_Java_thread() , "invariant"); 1015 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1016 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 1017 1018 while (true) { 1019 ObjectMonitor* monitor = NULL; 1020 markWord temp, test; 1021 intptr_t hash; 1022 markWord mark = read_stable_mark(obj); 1023 1024 // object should remain ineligible for biased locking 1025 assert(!mark.has_bias_pattern(), "invariant"); 1026 1027 if (mark.is_neutral()) { // if this is a normal header 1028 hash = mark.hash(); 1029 if (hash != 0) { // if it has a hash, just return it 1030 return hash; 1031 } 1032 hash = get_next_hash(self, obj); // get a new hash 1033 temp = mark.copy_set_hash(hash); // merge the hash into header 1034 // try to install the hash 1035 test = obj->cas_set_mark(temp, mark); 1036 if (test == mark) { // if the hash was installed, return it 1037 return hash; 1038 } 1039 // Failed to install the hash. It could be that another thread 1040 // installed the hash just before our attempt or inflation has 1041 // occurred or... so we fall thru to inflate the monitor for 1042 // stability and then install the hash. 1043 } else if (mark.has_monitor()) { 1044 monitor = mark.monitor(); 1045 temp = monitor->header(); 1046 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1047 hash = temp.hash(); 1048 if (hash != 0) { 1049 // It has a hash. 1050 1051 // Separate load of dmw/header above from the loads in 1052 // is_being_async_deflated(). 1053 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1054 // A non-multiple copy atomic (nMCA) machine needs a bigger 1055 // hammer to make sure that the load above and the loads 1056 // below all see non-stale memory values. 1057 OrderAccess::fence(); 1058 } else { 1059 OrderAccess::loadload(); 1060 } 1061 if (monitor->is_being_async_deflated()) { 1062 // But we can't safely use the hash if we detect that async 1063 // deflation has occurred. So we attempt to restore the 1064 // header/dmw to the object's header so that we only retry 1065 // once if the deflater thread happens to be slow. 1066 monitor->install_displaced_markword_in_object(obj); 1067 continue; 1068 } 1069 return hash; 1070 } 1071 // Fall thru so we only have one place that installs the hash in 1072 // the ObjectMonitor. 1073 } else if (self->is_lock_owned((address)mark.locker())) { 1074 // This is a stack lock owned by the calling thread so fetch the 1075 // displaced markWord from the BasicLock on the stack. 1076 temp = mark.displaced_mark_helper(); 1077 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1078 hash = temp.hash(); 1079 if (hash != 0) { // if it has a hash, just return it 1080 return hash; 1081 } 1082 // WARNING: 1083 // The displaced header in the BasicLock on a thread's stack 1084 // is strictly immutable. It CANNOT be changed in ANY cases. 1085 // So we have to inflate the stack lock into an ObjectMonitor 1086 // even if the current thread owns the lock. The BasicLock on 1087 // a thread's stack can be asynchronously read by other threads 1088 // during an inflate() call so any change to that stack memory 1089 // may not propagate to other threads correctly. 1090 } 1091 1092 // Inflate the monitor to set the hash. 1093 1094 // An async deflation can race after the inflate() call and before we 1095 // can update the ObjectMonitor's header with the hash value below. 1096 monitor = inflate(self, obj, inflate_cause_hash_code); 1097 // Load ObjectMonitor's header/dmw field and see if it has a hash. 1098 mark = monitor->header(); 1099 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1100 hash = mark.hash(); 1101 if (hash == 0) { // if it does not have a hash 1102 hash = get_next_hash(self, obj); // get a new hash 1103 temp = mark.copy_set_hash(hash); // merge the hash into header 1104 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1105 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1106 test = markWord(v); 1107 if (test != mark) { 1108 // The attempt to update the ObjectMonitor's header/dmw field 1109 // did not work. This can happen if another thread managed to 1110 // merge in the hash just before our cmpxchg(). 1111 // If we add any new usages of the header/dmw field, this code 1112 // will need to be updated. 1113 hash = test.hash(); 1114 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1115 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1116 } 1117 if (monitor->is_being_async_deflated()) { 1118 // If we detect that async deflation has occurred, then we 1119 // attempt to restore the header/dmw to the object's header 1120 // so that we only retry once if the deflater thread happens 1121 // to be slow. 1122 monitor->install_displaced_markword_in_object(obj); 1123 continue; 1124 } 1125 } 1126 // We finally get the hash. 1127 return hash; 1128 } 1129 } 1130 1131 // Deprecated -- use FastHashCode() instead. 1132 1133 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1134 return FastHashCode(Thread::current(), obj()); 1135 } 1136 1137 1138 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1139 Handle h_obj) { 1140 if (UseBiasedLocking) { 1141 BiasedLocking::revoke(h_obj, thread); 1142 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1143 } 1144 1145 assert(thread == JavaThread::current(), "Can only be called on current thread"); 1146 oop obj = h_obj(); 1147 1148 markWord mark = read_stable_mark(obj); 1149 1150 // Uncontended case, header points to stack 1151 if (mark.has_locker()) { 1152 return thread->is_lock_owned((address)mark.locker()); 1153 } 1154 // Contended case, header points to ObjectMonitor (tagged pointer) 1155 if (mark.has_monitor()) { 1156 // The first stage of async deflation does not affect any field 1157 // used by this comparison so the ObjectMonitor* is usable here. 1158 ObjectMonitor* monitor = mark.monitor(); 1159 return monitor->is_entered(thread) != 0; 1160 } 1161 // Unlocked case, header in place 1162 assert(mark.is_neutral(), "sanity check"); 1163 return false; 1164 } 1165 1166 // Be aware of this method could revoke bias of the lock object. 1167 // This method queries the ownership of the lock handle specified by 'h_obj'. 1168 // If the current thread owns the lock, it returns owner_self. If no 1169 // thread owns the lock, it returns owner_none. Otherwise, it will return 1170 // owner_other. 1171 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1172 (JavaThread *self, Handle h_obj) { 1173 // The caller must beware this method can revoke bias, and 1174 // revocation can result in a safepoint. 1175 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 1176 assert(self->thread_state() != _thread_blocked, "invariant"); 1177 1178 // Possible mark states: neutral, biased, stack-locked, inflated 1179 1180 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 1181 // CASE: biased 1182 BiasedLocking::revoke(h_obj, self); 1183 assert(!h_obj->mark().has_bias_pattern(), 1184 "biases should be revoked by now"); 1185 } 1186 1187 assert(self == JavaThread::current(), "Can only be called on current thread"); 1188 oop obj = h_obj(); 1189 markWord mark = read_stable_mark(obj); 1190 1191 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1192 if (mark.has_locker()) { 1193 return self->is_lock_owned((address)mark.locker()) ? 1194 owner_self : owner_other; 1195 } 1196 1197 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 1198 // The Object:ObjectMonitor relationship is stable as long as we're 1199 // not at a safepoint and AsyncDeflateIdleMonitors is false. 1200 if (mark.has_monitor()) { 1201 // The first stage of async deflation does not affect any field 1202 // used by this comparison so the ObjectMonitor* is usable here. 1203 ObjectMonitor* monitor = mark.monitor(); 1204 void* owner = monitor->owner(); 1205 if (owner == NULL) return owner_none; 1206 return (owner == self || 1207 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1208 } 1209 1210 // CASE: neutral 1211 assert(mark.is_neutral(), "sanity check"); 1212 return owner_none; // it's unlocked 1213 } 1214 1215 // FIXME: jvmti should call this 1216 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1217 if (UseBiasedLocking) { 1218 if (SafepointSynchronize::is_at_safepoint()) { 1219 BiasedLocking::revoke_at_safepoint(h_obj); 1220 } else { 1221 BiasedLocking::revoke(h_obj, JavaThread::current()); 1222 } 1223 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1224 } 1225 1226 oop obj = h_obj(); 1227 address owner = NULL; 1228 1229 markWord mark = read_stable_mark(obj); 1230 1231 // Uncontended case, header points to stack 1232 if (mark.has_locker()) { 1233 owner = (address) mark.locker(); 1234 } 1235 1236 // Contended case, header points to ObjectMonitor (tagged pointer) 1237 else if (mark.has_monitor()) { 1238 // The first stage of async deflation does not affect any field 1239 // used by this comparison so the ObjectMonitor* is usable here. 1240 ObjectMonitor* monitor = mark.monitor(); 1241 assert(monitor != NULL, "monitor should be non-null"); 1242 owner = (address) monitor->owner(); 1243 } 1244 1245 if (owner != NULL) { 1246 // owning_thread_from_monitor_owner() may also return NULL here 1247 return Threads::owning_thread_from_monitor_owner(t_list, owner); 1248 } 1249 1250 // Unlocked case, header in place 1251 // Cannot have assertion since this object may have been 1252 // locked by another thread when reaching here. 1253 // assert(mark.is_neutral(), "sanity check"); 1254 1255 return NULL; 1256 } 1257 1258 // Visitors ... 1259 1260 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1261 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 1262 while (block != NULL) { 1263 assert(block->object() == CHAINMARKER, "must be a block header"); 1264 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1265 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1266 if (mid->object() != NULL) { 1267 // Only process with closure if the object is set. 1268 1269 // monitors_iterate() is only called at a safepoint or when the 1270 // target thread is suspended or when the target thread is 1271 // operating on itself. The current closures in use today are 1272 // only interested in an owned ObjectMonitor and ownership 1273 // cannot be dropped under the calling contexts so the 1274 // ObjectMonitor cannot be async deflated. 1275 closure->do_monitor(mid); 1276 } 1277 } 1278 // unmarked_next() is not needed with g_block_list (no locking 1279 // used with block linkage _next_om fields). 1280 block = (PaddedObjectMonitor*)block->next_om(); 1281 } 1282 } 1283 1284 static bool monitors_used_above_threshold() { 1285 int population = Atomic::load(&om_list_globals._population); 1286 if (population == 0) { 1287 return false; 1288 } 1289 if (MonitorUsedDeflationThreshold > 0) { 1290 int monitors_used = population - Atomic::load(&om_list_globals._free_count) - 1291 Atomic::load(&om_list_globals._wait_count); 1292 int monitor_usage = (monitors_used * 100LL) / population; 1293 return monitor_usage > MonitorUsedDeflationThreshold; 1294 } 1295 return false; 1296 } 1297 1298 bool ObjectSynchronizer::is_async_deflation_needed() { 1299 if (!AsyncDeflateIdleMonitors) { 1300 return false; 1301 } 1302 if (is_async_deflation_requested()) { 1303 // Async deflation request. 1304 return true; 1305 } 1306 if (AsyncDeflationInterval > 0 && 1307 time_since_last_async_deflation_ms() > AsyncDeflationInterval && 1308 monitors_used_above_threshold()) { 1309 // It's been longer than our specified deflate interval and there 1310 // are too many monitors in use. We don't deflate more frequently 1311 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1312 // in order to not swamp the ServiceThread. 1313 _last_async_deflation_time_ns = os::javaTimeNanos(); 1314 return true; 1315 } 1316 return false; 1317 } 1318 1319 bool ObjectSynchronizer::is_safepoint_deflation_needed() { 1320 if (!AsyncDeflateIdleMonitors) { 1321 if (monitors_used_above_threshold()) { 1322 // Too many monitors in use. 1323 return true; 1324 } 1325 return false; 1326 } 1327 if (is_special_deflation_requested()) { 1328 // For AsyncDeflateIdleMonitors only do a safepoint deflation 1329 // if there is a special deflation request. 1330 return true; 1331 } 1332 return false; 1333 } 1334 1335 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1336 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); 1337 } 1338 1339 void ObjectSynchronizer::oops_do(OopClosure* f) { 1340 // We only scan the global used list here (for moribund threads), and 1341 // the thread-local monitors in Thread::oops_do(). 1342 global_used_oops_do(f); 1343 } 1344 1345 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1346 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1347 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f); 1348 } 1349 1350 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1351 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1352 list_oops_do(thread->om_in_use_list, f); 1353 } 1354 1355 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1356 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1357 // The oops_do() phase does not overlap with monitor deflation 1358 // so no need to lock ObjectMonitors for the list traversal. 1359 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { 1360 if (mid->object() != NULL) { 1361 f->do_oop((oop*)mid->object_addr()); 1362 } 1363 } 1364 } 1365 1366 1367 // ----------------------------------------------------------------------------- 1368 // ObjectMonitor Lifecycle 1369 // ----------------------- 1370 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread 1371 // free list and associates them with objects. Deflation -- which occurs at 1372 // STW-time or asynchronously -- disassociates idle monitors from objects. 1373 // Such scavenged monitors are returned to the om_list_globals._free_list. 1374 // 1375 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1376 // 1377 // Lifecycle: 1378 // -- unassigned and on the om_list_globals._free_list 1379 // -- unassigned and on a per-thread free list 1380 // -- assigned to an object. The object is inflated and the mark refers 1381 // to the ObjectMonitor. 1382 1383 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { 1384 // A large MAXPRIVATE value reduces both list lock contention 1385 // and list coherency traffic, but also tends to increase the 1386 // number of ObjectMonitors in circulation as well as the STW 1387 // scavenge costs. As usual, we lean toward time in space-time 1388 // tradeoffs. 1389 const int MAXPRIVATE = 1024; 1390 NoSafepointVerifier nsv; 1391 1392 for (;;) { 1393 ObjectMonitor* m; 1394 1395 // 1: try to allocate from the thread's local om_free_list. 1396 // Threads will attempt to allocate first from their local list, then 1397 // from the global list, and only after those attempts fail will the 1398 // thread attempt to instantiate new monitors. Thread-local free lists 1399 // improve allocation latency, as well as reducing coherency traffic 1400 // on the shared global list. 1401 m = take_from_start_of_om_free_list(self); 1402 if (m != NULL) { 1403 guarantee(m->object() == NULL, "invariant"); 1404 m->set_allocation_state(ObjectMonitor::New); 1405 prepend_to_om_in_use_list(self, m); 1406 return m; 1407 } 1408 1409 // 2: try to allocate from the global om_list_globals._free_list 1410 // If we're using thread-local free lists then try 1411 // to reprovision the caller's free list. 1412 if (Atomic::load(&om_list_globals._free_list) != NULL) { 1413 // Reprovision the thread's om_free_list. 1414 // Use bulk transfers to reduce the allocation rate and heat 1415 // on various locks. 1416 for (int i = self->om_free_provision; --i >= 0;) { 1417 ObjectMonitor* take = take_from_start_of_global_free_list(); 1418 if (take == NULL) { 1419 break; // No more are available. 1420 } 1421 guarantee(take->object() == NULL, "invariant"); 1422 if (AsyncDeflateIdleMonitors) { 1423 // We allowed 3 field values to linger during async deflation. 1424 // Clear or restore them as appropriate. 1425 take->set_header(markWord::zero()); 1426 // DEFLATER_MARKER is the only non-NULL value we should see here. 1427 take->try_set_owner_from(DEFLATER_MARKER, NULL); 1428 if (take->contentions() < 0) { 1429 // Add back max_jint to restore the contentions field to its 1430 // proper value. 1431 take->add_to_contentions(max_jint); 1432 1433 #ifdef ASSERT 1434 jint l_contentions = take->contentions(); 1435 #endif 1436 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d", 1437 l_contentions, take->contentions()); 1438 } 1439 } 1440 take->Recycle(); 1441 // Since we're taking from the global free-list, take must be Free. 1442 // om_release() also sets the allocation state to Free because it 1443 // is called from other code paths. 1444 assert(take->is_free(), "invariant"); 1445 om_release(self, take, false); 1446 } 1447 self->om_free_provision += 1 + (self->om_free_provision / 2); 1448 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1449 continue; 1450 } 1451 1452 // 3: allocate a block of new ObjectMonitors 1453 // Both the local and global free lists are empty -- resort to malloc(). 1454 // In the current implementation ObjectMonitors are TSM - immortal. 1455 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1456 // each ObjectMonitor to start at the beginning of a cache line, 1457 // so we use align_up(). 1458 // A better solution would be to use C++ placement-new. 1459 // BEWARE: As it stands currently, we don't run the ctors! 1460 assert(_BLOCKSIZE > 1, "invariant"); 1461 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1462 PaddedObjectMonitor* temp; 1463 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1464 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); 1465 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1466 (void)memset((void *) temp, 0, neededsize); 1467 1468 // Format the block. 1469 // initialize the linked list, each monitor points to its next 1470 // forming the single linked free list, the very first monitor 1471 // will points to next block, which forms the block list. 1472 // The trick of using the 1st element in the block as g_block_list 1473 // linkage should be reconsidered. A better implementation would 1474 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1475 1476 for (int i = 1; i < _BLOCKSIZE; i++) { 1477 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]); 1478 assert(temp[i].is_free(), "invariant"); 1479 } 1480 1481 // terminate the last monitor as the end of list 1482 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL); 1483 1484 // Element [0] is reserved for global list linkage 1485 temp[0].set_object(CHAINMARKER); 1486 1487 // Consider carving out this thread's current request from the 1488 // block in hand. This avoids some lock traffic and redundant 1489 // list activity. 1490 1491 prepend_block_to_lists(temp); 1492 } 1493 } 1494 1495 // Place "m" on the caller's private per-thread om_free_list. 1496 // In practice there's no need to clamp or limit the number of 1497 // monitors on a thread's om_free_list as the only non-allocation time 1498 // we'll call om_release() is to return a monitor to the free list after 1499 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1500 // accumulate on a thread's free list. 1501 // 1502 // Key constraint: all ObjectMonitors on a thread's free list and the global 1503 // free list must have their object field set to null. This prevents the 1504 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1505 // -- from reclaiming them while we are trying to release them. 1506 1507 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1508 bool from_per_thread_alloc) { 1509 guarantee(m->header().value() == 0, "invariant"); 1510 guarantee(m->object() == NULL, "invariant"); 1511 NoSafepointVerifier nsv; 1512 1513 if ((m->is_busy() | m->_recursions) != 0) { 1514 stringStream ss; 1515 fatal("freeing in-use monitor: %s, recursions=" INTX_FORMAT, 1516 m->is_busy_to_string(&ss), m->_recursions); 1517 } 1518 m->set_allocation_state(ObjectMonitor::Free); 1519 // _next_om is used for both per-thread in-use and free lists so 1520 // we have to remove 'm' from the in-use list first (as needed). 1521 if (from_per_thread_alloc) { 1522 // Need to remove 'm' from om_in_use_list. 1523 ObjectMonitor* mid = NULL; 1524 ObjectMonitor* next = NULL; 1525 1526 // This list walk can race with another list walker or with async 1527 // deflation so we have to worry about an ObjectMonitor being 1528 // removed from this list while we are walking it. 1529 1530 // Lock the list head to avoid racing with another list walker 1531 // or with async deflation. 1532 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { 1533 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); 1534 } 1535 next = unmarked_next(mid); 1536 if (m == mid) { 1537 // First special case: 1538 // 'm' matches mid, is the list head and is locked. Switch the list 1539 // head to next which unlocks the list head, but leaves the extracted 1540 // mid locked: 1541 Atomic::store(&self->om_in_use_list, next); 1542 } else if (m == next) { 1543 // Second special case: 1544 // 'm' matches next after the list head and we already have the list 1545 // head locked so set mid to what we are extracting: 1546 mid = next; 1547 // Lock mid to prevent races with a list walker or an async 1548 // deflater thread that's ahead of us. The locked list head 1549 // prevents races from behind us. 1550 om_lock(mid); 1551 // Update next to what follows mid (if anything): 1552 next = unmarked_next(mid); 1553 // Switch next after the list head to new next which unlocks the 1554 // list head, but leaves the extracted mid locked: 1555 self->om_in_use_list->set_next_om(next); 1556 } else { 1557 // We have to search the list to find 'm'. 1558 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT 1559 " is too short.", p2i(self), p2i(self->om_in_use_list)); 1560 // Our starting anchor is next after the list head which is the 1561 // last ObjectMonitor we checked: 1562 ObjectMonitor* anchor = next; 1563 // Lock anchor to prevent races with a list walker or an async 1564 // deflater thread that's ahead of us. The locked list head 1565 // prevents races from behind us. 1566 om_lock(anchor); 1567 om_unlock(mid); // Unlock the list head now that anchor is locked. 1568 while ((mid = unmarked_next(anchor)) != NULL) { 1569 if (m == mid) { 1570 // We found 'm' on the per-thread in-use list so extract it. 1571 // Update next to what follows mid (if anything): 1572 next = unmarked_next(mid); 1573 // Switch next after the anchor to new next which unlocks the 1574 // anchor, but leaves the extracted mid locked: 1575 anchor->set_next_om(next); 1576 break; 1577 } else { 1578 // Lock the next anchor to prevent races with a list walker 1579 // or an async deflater thread that's ahead of us. The locked 1580 // current anchor prevents races from behind us. 1581 om_lock(mid); 1582 // Unlock current anchor now that next anchor is locked: 1583 om_unlock(anchor); 1584 anchor = mid; // Advance to new anchor and try again. 1585 } 1586 } 1587 } 1588 1589 if (mid == NULL) { 1590 // Reached end of the list and didn't find 'm' so: 1591 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list=" 1592 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list)); 1593 } 1594 1595 // At this point mid is disconnected from the in-use list so 1596 // its lock no longer has any effects on the in-use list. 1597 Atomic::dec(&self->om_in_use_count); 1598 // Unlock mid, but leave the next value for any lagging list 1599 // walkers. It will get cleaned up when mid is prepended to 1600 // the thread's free list: 1601 om_unlock(mid); 1602 } 1603 1604 prepend_to_om_free_list(self, m); 1605 guarantee(m->is_free(), "invariant"); 1606 } 1607 1608 // Return ObjectMonitors on a moribund thread's free and in-use 1609 // lists to the appropriate global lists. The ObjectMonitors on the 1610 // per-thread in-use list may still be in use by other threads. 1611 // 1612 // We currently call om_flush() from Threads::remove() before the 1613 // thread has been excised from the thread list and is no longer a 1614 // mutator. This means that om_flush() cannot run concurrently with 1615 // a safepoint and interleave with deflate_idle_monitors(). In 1616 // particular, this ensures that the thread's in-use monitors are 1617 // scanned by a GC safepoint, either via Thread::oops_do() (before 1618 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1619 // om_flush() is called). 1620 // 1621 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1622 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1623 // run at the same time as om_flush() so we have to follow a careful 1624 // protocol to prevent list corruption. 1625 1626 void ObjectSynchronizer::om_flush(Thread* self) { 1627 // Process the per-thread in-use list first to be consistent. 1628 int in_use_count = 0; 1629 ObjectMonitor* in_use_list = NULL; 1630 ObjectMonitor* in_use_tail = NULL; 1631 NoSafepointVerifier nsv; 1632 1633 // This function can race with a list walker or with an async 1634 // deflater thread so we lock the list head to prevent confusion. 1635 // An async deflater thread checks to see if the target thread 1636 // is exiting, but if it has made it past that check before we 1637 // started exiting, then it is racing to get to the in-use list. 1638 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { 1639 // At this point, we have locked the in-use list head so a racing 1640 // thread cannot come in after us. However, a racing thread could 1641 // be ahead of us; we'll detect that and delay to let it finish. 1642 // 1643 // The thread is going away, however the ObjectMonitors on the 1644 // om_in_use_list may still be in-use by other threads. Link 1645 // them to in_use_tail, which will be linked into the global 1646 // in-use list (om_list_globals._in_use_list) below. 1647 // 1648 // Account for the in-use list head before the loop since it is 1649 // already locked (by this thread): 1650 in_use_tail = in_use_list; 1651 in_use_count++; 1652 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) { 1653 if (is_locked(cur_om)) { 1654 // cur_om is locked so there must be a racing walker or async 1655 // deflater thread ahead of us so we'll give it a chance to finish. 1656 while (is_locked(cur_om)) { 1657 os::naked_short_sleep(1); 1658 } 1659 // Refetch the possibly changed next field and try again. 1660 cur_om = unmarked_next(in_use_tail); 1661 continue; 1662 } 1663 if (cur_om->object() == NULL) { 1664 // cur_om was deflated and the object ref was cleared while it 1665 // was locked. We happened to see it just after it was unlocked 1666 // (and added to the free list). Refetch the possibly changed 1667 // next field and try again. 1668 cur_om = unmarked_next(in_use_tail); 1669 continue; 1670 } 1671 in_use_tail = cur_om; 1672 in_use_count++; 1673 cur_om = unmarked_next(cur_om); 1674 } 1675 guarantee(in_use_tail != NULL, "invariant"); 1676 int l_om_in_use_count = Atomic::load(&self->om_in_use_count); 1677 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: " 1678 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); 1679 Atomic::store(&self->om_in_use_count, 0); 1680 // Clear the in-use list head (which also unlocks it): 1681 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); 1682 om_unlock(in_use_list); 1683 } 1684 1685 int free_count = 0; 1686 ObjectMonitor* free_list = NULL; 1687 ObjectMonitor* free_tail = NULL; 1688 // This function can race with a list walker thread so we lock the 1689 // list head to prevent confusion. 1690 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) { 1691 // At this point, we have locked the free list head so a racing 1692 // thread cannot come in after us. However, a racing thread could 1693 // be ahead of us; we'll detect that and delay to let it finish. 1694 // 1695 // The thread is going away. Set 'free_tail' to the last per-thread free 1696 // monitor which will be linked to om_list_globals._free_list below. 1697 // 1698 // Account for the free list head before the loop since it is 1699 // already locked (by this thread): 1700 free_tail = free_list; 1701 free_count++; 1702 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) { 1703 if (is_locked(s)) { 1704 // s is locked so there must be a racing walker thread ahead 1705 // of us so we'll give it a chance to finish. 1706 while (is_locked(s)) { 1707 os::naked_short_sleep(1); 1708 } 1709 } 1710 free_tail = s; 1711 free_count++; 1712 guarantee(s->object() == NULL, "invariant"); 1713 if (s->is_busy()) { 1714 stringStream ss; 1715 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss)); 1716 } 1717 } 1718 guarantee(free_tail != NULL, "invariant"); 1719 int l_om_free_count = Atomic::load(&self->om_free_count); 1720 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " 1721 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); 1722 Atomic::store(&self->om_free_count, 0); 1723 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); 1724 om_unlock(free_list); 1725 } 1726 1727 if (free_tail != NULL) { 1728 prepend_list_to_global_free_list(free_list, free_tail, free_count); 1729 } 1730 1731 if (in_use_tail != NULL) { 1732 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); 1733 } 1734 1735 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1736 LogStreamHandle(Info, monitorinflation) lsh_info; 1737 LogStream* ls = NULL; 1738 if (log_is_enabled(Debug, monitorinflation)) { 1739 ls = &lsh_debug; 1740 } else if ((free_count != 0 || in_use_count != 0) && 1741 log_is_enabled(Info, monitorinflation)) { 1742 ls = &lsh_info; 1743 } 1744 if (ls != NULL) { 1745 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1746 ", in_use_count=%d" ", om_free_provision=%d", 1747 p2i(self), free_count, in_use_count, self->om_free_provision); 1748 } 1749 } 1750 1751 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1752 const oop obj, 1753 ObjectSynchronizer::InflateCause cause) { 1754 assert(event != NULL, "invariant"); 1755 assert(event->should_commit(), "invariant"); 1756 event->set_monitorClass(obj->klass()); 1757 event->set_address((uintptr_t)(void*)obj); 1758 event->set_cause((u1)cause); 1759 event->commit(); 1760 } 1761 1762 // Fast path code shared by multiple functions 1763 void ObjectSynchronizer::inflate_helper(oop obj) { 1764 markWord mark = obj->mark(); 1765 if (mark.has_monitor()) { 1766 ObjectMonitor* monitor = mark.monitor(); 1767 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor=" INTPTR_FORMAT " is invalid", p2i(monitor)); 1768 markWord dmw = monitor->header(); 1769 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1770 return; 1771 } 1772 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal); 1773 } 1774 1775 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object, 1776 const InflateCause cause) { 1777 // Inflate mutates the heap ... 1778 // Relaxing assertion for bug 6320749. 1779 assert(Universe::verify_in_progress() || 1780 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1781 1782 EventJavaMonitorInflate event; 1783 1784 for (;;) { 1785 const markWord mark = object->mark(); 1786 assert(!mark.has_bias_pattern(), "invariant"); 1787 1788 // The mark can be in one of the following states: 1789 // * Inflated - just return 1790 // * Stack-locked - coerce it to inflated 1791 // * INFLATING - busy wait for conversion to complete 1792 // * Neutral - aggressively inflate the object. 1793 // * BIASED - Illegal. We should never see this 1794 1795 // CASE: inflated 1796 if (mark.has_monitor()) { 1797 ObjectMonitor* inf = mark.monitor(); 1798 markWord dmw = inf->header(); 1799 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1800 assert(AsyncDeflateIdleMonitors || inf->object() == object, "invariant"); 1801 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1802 return inf; 1803 } 1804 1805 // CASE: inflation in progress - inflating over a stack-lock. 1806 // Some other thread is converting from stack-locked to inflated. 1807 // Only that thread can complete inflation -- other threads must wait. 1808 // The INFLATING value is transient. 1809 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1810 // We could always eliminate polling by parking the thread on some auxiliary list. 1811 if (mark == markWord::INFLATING()) { 1812 read_stable_mark(object); 1813 continue; 1814 } 1815 1816 // CASE: stack-locked 1817 // Could be stack-locked either by this thread or by some other thread. 1818 // 1819 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1820 // to install INFLATING into the mark word. We originally installed INFLATING, 1821 // allocated the objectmonitor, and then finally STed the address of the 1822 // objectmonitor into the mark. This was correct, but artificially lengthened 1823 // the interval in which INFLATED appeared in the mark, thus increasing 1824 // the odds of inflation contention. 1825 // 1826 // We now use per-thread private objectmonitor free lists. 1827 // These list are reprovisioned from the global free list outside the 1828 // critical INFLATING...ST interval. A thread can transfer 1829 // multiple objectmonitors en-mass from the global free list to its local free list. 1830 // This reduces coherency traffic and lock contention on the global free list. 1831 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1832 // before or after the CAS(INFLATING) operation. 1833 // See the comments in om_alloc(). 1834 1835 LogStreamHandle(Trace, monitorinflation) lsh; 1836 1837 if (mark.has_locker()) { 1838 ObjectMonitor* m = om_alloc(self); 1839 // Optimistically prepare the objectmonitor - anticipate successful CAS 1840 // We do this before the CAS in order to minimize the length of time 1841 // in which INFLATING appears in the mark. 1842 m->Recycle(); 1843 m->_Responsible = NULL; 1844 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1845 1846 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1847 if (cmp != mark) { 1848 // om_release() will reset the allocation state from New to Free. 1849 om_release(self, m, true); 1850 continue; // Interference -- just retry 1851 } 1852 1853 // We've successfully installed INFLATING (0) into the mark-word. 1854 // This is the only case where 0 will appear in a mark-word. 1855 // Only the singular thread that successfully swings the mark-word 1856 // to 0 can perform (or more precisely, complete) inflation. 1857 // 1858 // Why do we CAS a 0 into the mark-word instead of just CASing the 1859 // mark-word from the stack-locked value directly to the new inflated state? 1860 // Consider what happens when a thread unlocks a stack-locked object. 1861 // It attempts to use CAS to swing the displaced header value from the 1862 // on-stack BasicLock back into the object header. Recall also that the 1863 // header value (hash code, etc) can reside in (a) the object header, or 1864 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1865 // header in an ObjectMonitor. The inflate() routine must copy the header 1866 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1867 // the while preserving the hashCode stability invariants. If the owner 1868 // decides to release the lock while the value is 0, the unlock will fail 1869 // and control will eventually pass from slow_exit() to inflate. The owner 1870 // will then spin, waiting for the 0 value to disappear. Put another way, 1871 // the 0 causes the owner to stall if the owner happens to try to 1872 // drop the lock (restoring the header from the BasicLock to the object) 1873 // while inflation is in-progress. This protocol avoids races that might 1874 // would otherwise permit hashCode values to change or "flicker" for an object. 1875 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1876 // 0 serves as a "BUSY" inflate-in-progress indicator. 1877 1878 1879 // fetch the displaced mark from the owner's stack. 1880 // The owner can't die or unwind past the lock while our INFLATING 1881 // object is in the mark. Furthermore the owner can't complete 1882 // an unlock on the object, either. 1883 markWord dmw = mark.displaced_mark_helper(); 1884 // Catch if the object's header is not neutral (not locked and 1885 // not marked is what we care about here). 1886 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1887 1888 // Setup monitor fields to proper values -- prepare the monitor 1889 m->set_header(dmw); 1890 1891 // Optimization: if the mark.locker stack address is associated 1892 // with this thread we could simply set m->_owner = self. 1893 // Note that a thread can inflate an object 1894 // that it has stack-locked -- as might happen in wait() -- directly 1895 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1896 if (AsyncDeflateIdleMonitors) { 1897 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker()); 1898 } else { 1899 m->set_owner_from(NULL, mark.locker()); 1900 } 1901 m->set_object(object); 1902 // TODO-FIXME: assert BasicLock->dhw != 0. 1903 1904 // Must preserve store ordering. The monitor state must 1905 // be stable at the time of publishing the monitor address. 1906 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1907 object->release_set_mark(markWord::encode(m)); 1908 1909 // Once ObjectMonitor is configured and the object is associated 1910 // with the ObjectMonitor, it is safe to allow async deflation: 1911 assert(m->is_new(), "freshly allocated monitor must be new"); 1912 m->set_allocation_state(ObjectMonitor::Old); 1913 1914 // Hopefully the performance counters are allocated on distinct cache lines 1915 // to avoid false sharing on MP systems ... 1916 OM_PERFDATA_OP(Inflations, inc()); 1917 if (log_is_enabled(Trace, monitorinflation)) { 1918 ResourceMark rm(self); 1919 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1920 INTPTR_FORMAT ", type='%s'", p2i(object), 1921 object->mark().value(), object->klass()->external_name()); 1922 } 1923 if (event.should_commit()) { 1924 post_monitor_inflate_event(&event, object, cause); 1925 } 1926 return m; 1927 } 1928 1929 // CASE: neutral 1930 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1931 // If we know we're inflating for entry it's better to inflate by swinging a 1932 // pre-locked ObjectMonitor pointer into the object header. A successful 1933 // CAS inflates the object *and* confers ownership to the inflating thread. 1934 // In the current implementation we use a 2-step mechanism where we CAS() 1935 // to inflate and then CAS() again to try to swing _owner from NULL to self. 1936 // An inflateTry() method that we could call from enter() would be useful. 1937 1938 // Catch if the object's header is not neutral (not locked and 1939 // not marked is what we care about here). 1940 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1941 ObjectMonitor* m = om_alloc(self); 1942 // prepare m for installation - set monitor to initial state 1943 m->Recycle(); 1944 m->set_header(mark); 1945 if (AsyncDeflateIdleMonitors) { 1946 // DEFLATER_MARKER is the only non-NULL value we should see here. 1947 m->try_set_owner_from(DEFLATER_MARKER, NULL); 1948 } 1949 m->set_object(object); 1950 m->_Responsible = NULL; 1951 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1952 1953 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 1954 m->set_header(markWord::zero()); 1955 m->set_object(NULL); 1956 m->Recycle(); 1957 // om_release() will reset the allocation state from New to Free. 1958 om_release(self, m, true); 1959 m = NULL; 1960 continue; 1961 // interference - the markword changed - just retry. 1962 // The state-transitions are one-way, so there's no chance of 1963 // live-lock -- "Inflated" is an absorbing state. 1964 } 1965 1966 // Once the ObjectMonitor is configured and object is associated 1967 // with the ObjectMonitor, it is safe to allow async deflation: 1968 assert(m->is_new(), "freshly allocated monitor must be new"); 1969 m->set_allocation_state(ObjectMonitor::Old); 1970 1971 // Hopefully the performance counters are allocated on distinct 1972 // cache lines to avoid false sharing on MP systems ... 1973 OM_PERFDATA_OP(Inflations, inc()); 1974 if (log_is_enabled(Trace, monitorinflation)) { 1975 ResourceMark rm(self); 1976 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 1977 INTPTR_FORMAT ", type='%s'", p2i(object), 1978 object->mark().value(), object->klass()->external_name()); 1979 } 1980 if (event.should_commit()) { 1981 post_monitor_inflate_event(&event, object, cause); 1982 } 1983 return m; 1984 } 1985 } 1986 1987 1988 // We maintain a list of in-use monitors for each thread. 1989 // 1990 // For safepoint based deflation: 1991 // deflate_thread_local_monitors() scans a single thread's in-use list, while 1992 // deflate_idle_monitors() scans only a global list of in-use monitors which 1993 // is populated only as a thread dies (see om_flush()). 1994 // 1995 // These operations are called at all safepoints, immediately after mutators 1996 // are stopped, but before any objects have moved. Collectively they traverse 1997 // the population of in-use monitors, deflating where possible. The scavenged 1998 // monitors are returned to the global monitor free list. 1999 // 2000 // Beware that we scavenge at *every* stop-the-world point. Having a large 2001 // number of monitors in-use could negatively impact performance. We also want 2002 // to minimize the total # of monitors in circulation, as they incur a small 2003 // footprint penalty. 2004 // 2005 // Perversely, the heap size -- and thus the STW safepoint rate -- 2006 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 2007 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 2008 // This is an unfortunate aspect of this design. 2009 // 2010 // For async deflation: 2011 // If a special deflation request is made, then the safepoint based 2012 // deflation mechanism is used. Otherwise, an async deflation request 2013 // is registered with the ServiceThread and it is notified. 2014 2015 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) { 2016 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2017 2018 // The per-thread in-use lists are handled in 2019 // ParallelSPCleanupThreadClosure::do_thread(). 2020 2021 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { 2022 // Use the older mechanism for the global in-use list or if a 2023 // special deflation has been requested before the safepoint. 2024 ObjectSynchronizer::deflate_idle_monitors(counters); 2025 return; 2026 } 2027 2028 log_debug(monitorinflation)("requesting async deflation of idle monitors."); 2029 // Request deflation of idle monitors by the ServiceThread: 2030 set_is_async_deflation_requested(true); 2031 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 2032 ml.notify_all(); 2033 2034 if (log_is_enabled(Debug, monitorinflation)) { 2035 // exit_globals()'s call to audit_and_print_stats() is done 2036 // at the Info level and not at a safepoint. 2037 // For safepoint based deflation, audit_and_print_stats() is called 2038 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the 2039 // Debug level at a safepoint. 2040 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2041 } 2042 } 2043 2044 // Deflate a single monitor if not in-use 2045 // Return true if deflated, false if in-use 2046 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 2047 ObjectMonitor** free_head_p, 2048 ObjectMonitor** free_tail_p) { 2049 bool deflated; 2050 // Normal case ... The monitor is associated with obj. 2051 const markWord mark = obj->mark(); 2052 guarantee(mark == markWord::encode(mid), "should match: mark=" 2053 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 2054 markWord::encode(mid).value()); 2055 // Make sure that mark.monitor() and markWord::encode() agree: 2056 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 2057 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 2058 const markWord dmw = mid->header(); 2059 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 2060 2061 if (mid->is_busy()) { 2062 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2063 deflated = false; 2064 } else { 2065 // Deflate the monitor if it is no longer being used 2066 // It's idle - scavenge and return to the global free list 2067 // plain old deflation ... 2068 if (log_is_enabled(Trace, monitorinflation)) { 2069 ResourceMark rm; 2070 log_trace(monitorinflation)("deflate_monitor: " 2071 "object=" INTPTR_FORMAT ", mark=" 2072 INTPTR_FORMAT ", type='%s'", p2i(obj), 2073 mark.value(), obj->klass()->external_name()); 2074 } 2075 2076 // Restore the header back to obj 2077 obj->release_set_mark(dmw); 2078 if (AsyncDeflateIdleMonitors) { 2079 // clear() expects the owner field to be NULL. 2080 // DEFLATER_MARKER is the only non-NULL value we should see here. 2081 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2082 } 2083 mid->clear(); 2084 2085 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 2086 p2i(mid->object())); 2087 assert(mid->is_free(), "invariant"); 2088 2089 // Move the deflated ObjectMonitor to the working free list 2090 // defined by free_head_p and free_tail_p. 2091 if (*free_head_p == NULL) *free_head_p = mid; 2092 if (*free_tail_p != NULL) { 2093 // We append to the list so the caller can use mid->_next_om 2094 // to fix the linkages in its context. 2095 ObjectMonitor* prevtail = *free_tail_p; 2096 // Should have been cleaned up by the caller: 2097 // Note: Should not have to lock prevtail here since we're at a 2098 // safepoint and ObjectMonitors on the local free list should 2099 // not be accessed in parallel. 2100 #ifdef ASSERT 2101 ObjectMonitor* l_next_om = prevtail->next_om(); 2102 #endif 2103 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2104 prevtail->set_next_om(mid); 2105 } 2106 *free_tail_p = mid; 2107 // At this point, mid->_next_om still refers to its current 2108 // value and another ObjectMonitor's _next_om field still 2109 // refers to this ObjectMonitor. Those linkages have to be 2110 // cleaned up by the caller who has the complete context. 2111 deflated = true; 2112 } 2113 return deflated; 2114 } 2115 2116 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 2117 // Returns true if it was deflated and false otherwise. 2118 // 2119 // The async deflation protocol sets owner to DEFLATER_MARKER and 2120 // makes contentions negative as signals to contending threads that 2121 // an async deflation is in progress. There are a number of checks 2122 // as part of the protocol to make sure that the calling thread has 2123 // not lost the race to a contending thread. 2124 // 2125 // The ObjectMonitor has been successfully async deflated when: 2126 // (owner == DEFLATER_MARKER && contentions < 0) 2127 // Contending threads that see those values know to retry their operation. 2128 // 2129 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 2130 ObjectMonitor** free_head_p, 2131 ObjectMonitor** free_tail_p) { 2132 assert(AsyncDeflateIdleMonitors, "sanity check"); 2133 assert(Thread::current()->is_Java_thread(), "precondition"); 2134 // A newly allocated ObjectMonitor should not be seen here so we 2135 // avoid an endless inflate/deflate cycle. 2136 assert(mid->is_old(), "must be old: allocation_state=%d", 2137 (int) mid->allocation_state()); 2138 2139 if (mid->is_busy()) { 2140 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2141 return false; 2142 } 2143 2144 // Set a NULL owner to DEFLATER_MARKER to force any contending thread 2145 // through the slow path. This is just the first part of the async 2146 // deflation dance. 2147 if (mid->try_set_owner_from(NULL, DEFLATER_MARKER) != NULL) { 2148 // The owner field is no longer NULL so we lost the race since the 2149 // ObjectMonitor is now busy. 2150 return false; 2151 } 2152 2153 if (mid->contentions() > 0 || mid->_waiters != 0) { 2154 // Another thread has raced to enter the ObjectMonitor after 2155 // mid->is_busy() above or has already entered and waited on 2156 // it which makes it busy so no deflation. Restore owner to 2157 // NULL if it is still DEFLATER_MARKER. 2158 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2159 return false; 2160 } 2161 2162 // Make a zero contentions field negative to force any contending threads 2163 // to retry. This is the second part of the async deflation dance. 2164 if (Atomic::cmpxchg(&mid->_contentions, (jint)0, -max_jint) != 0) { 2165 // Contentions was no longer 0 so we lost the race since the 2166 // ObjectMonitor is now busy. Restore owner to NULL if it is 2167 // still DEFLATER_MARKER: 2168 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2169 return false; 2170 } 2171 2172 // If owner is still DEFLATER_MARKER, then we have successfully 2173 // signaled any contending threads to retry. 2174 if (!mid->owner_is_DEFLATER_MARKER()) { 2175 // If it is not, then we have lost the race to an entering thread 2176 // and the ObjectMonitor is now busy. This is the third and final 2177 // part of the async deflation dance. 2178 // Note: This owner check solves the ABA problem with contentions 2179 // where another thread acquired the ObjectMonitor, finished 2180 // using it and restored contentions to zero. 2181 2182 // Add back max_jint to restore the contentions field to its 2183 // proper value (which may not be what we saw above): 2184 mid->add_to_contentions(max_jint); 2185 2186 #ifdef ASSERT 2187 jint l_contentions = mid->contentions(); 2188 #endif 2189 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d", 2190 l_contentions, mid->contentions()); 2191 return false; 2192 } 2193 2194 // Sanity checks for the races: 2195 guarantee(mid->contentions() < 0, "must be negative: contentions=%d", 2196 mid->contentions()); 2197 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 2198 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 2199 INTPTR_FORMAT, p2i(mid->_cxq)); 2200 guarantee(mid->_EntryList == NULL, 2201 "must be no entering threads: EntryList=" INTPTR_FORMAT, 2202 p2i(mid->_EntryList)); 2203 2204 const oop obj = (oop) mid->object(); 2205 if (log_is_enabled(Trace, monitorinflation)) { 2206 ResourceMark rm; 2207 log_trace(monitorinflation)("deflate_monitor_using_JT: " 2208 "object=" INTPTR_FORMAT ", mark=" 2209 INTPTR_FORMAT ", type='%s'", 2210 p2i(obj), obj->mark().value(), 2211 obj->klass()->external_name()); 2212 } 2213 2214 // Install the old mark word if nobody else has already done it. 2215 mid->install_displaced_markword_in_object(obj); 2216 mid->clear_common(); 2217 2218 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 2219 p2i(mid->object())); 2220 assert(mid->is_free(), "must be free: allocation_state=%d", 2221 (int)mid->allocation_state()); 2222 2223 // Move the deflated ObjectMonitor to the working free list 2224 // defined by free_head_p and free_tail_p. 2225 if (*free_head_p == NULL) { 2226 // First one on the list. 2227 *free_head_p = mid; 2228 } 2229 if (*free_tail_p != NULL) { 2230 // We append to the list so the caller can use mid->_next_om 2231 // to fix the linkages in its context. 2232 ObjectMonitor* prevtail = *free_tail_p; 2233 // prevtail should have been cleaned up by the caller: 2234 #ifdef ASSERT 2235 ObjectMonitor* l_next_om = unmarked_next(prevtail); 2236 #endif 2237 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2238 om_lock(prevtail); 2239 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked) 2240 } 2241 *free_tail_p = mid; 2242 2243 // At this point, mid->_next_om still refers to its current 2244 // value and another ObjectMonitor's _next_om field still 2245 // refers to this ObjectMonitor. Those linkages have to be 2246 // cleaned up by the caller who has the complete context. 2247 2248 // We leave owner == DEFLATER_MARKER and contentions < 0 2249 // to force any racing threads to retry. 2250 return true; // Success, ObjectMonitor has been deflated. 2251 } 2252 2253 // Walk a given monitor list, and deflate idle monitors. 2254 // The given list could be a per-thread list or a global list. 2255 // 2256 // In the case of parallel processing of thread local monitor lists, 2257 // work is done by Threads::parallel_threads_do() which ensures that 2258 // each Java thread is processed by exactly one worker thread, and 2259 // thus avoid conflicts that would arise when worker threads would 2260 // process the same monitor lists concurrently. 2261 // 2262 // See also ParallelSPCleanupTask and 2263 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 2264 // Threads::parallel_java_threads_do() in thread.cpp. 2265 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 2266 int* count_p, 2267 ObjectMonitor** free_head_p, 2268 ObjectMonitor** free_tail_p) { 2269 ObjectMonitor* cur_mid_in_use = NULL; 2270 ObjectMonitor* mid = NULL; 2271 ObjectMonitor* next = NULL; 2272 int deflated_count = 0; 2273 2274 // This list walk executes at a safepoint and does not race with any 2275 // other list walkers. 2276 2277 for (mid = Atomic::load(list_p); mid != NULL; mid = next) { 2278 next = unmarked_next(mid); 2279 oop obj = (oop) mid->object(); 2280 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 2281 // Deflation succeeded and already updated free_head_p and 2282 // free_tail_p as needed. Finish the move to the local free list 2283 // by unlinking mid from the global or per-thread in-use list. 2284 if (cur_mid_in_use == NULL) { 2285 // mid is the list head so switch the list head to next: 2286 Atomic::store(list_p, next); 2287 } else { 2288 // Switch cur_mid_in_use's next field to next: 2289 cur_mid_in_use->set_next_om(next); 2290 } 2291 // At this point mid is disconnected from the in-use list. 2292 deflated_count++; 2293 Atomic::dec(count_p); 2294 // mid is current tail in the free_head_p list so NULL terminate it: 2295 mid->set_next_om(NULL); 2296 } else { 2297 cur_mid_in_use = mid; 2298 } 2299 } 2300 return deflated_count; 2301 } 2302 2303 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 2304 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 2305 // list could be a per-thread in-use list or the global in-use list. 2306 // If a safepoint has started, then we save state via saved_mid_in_use_p 2307 // and return to the caller to honor the safepoint. 2308 // 2309 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, 2310 int* count_p, 2311 ObjectMonitor** free_head_p, 2312 ObjectMonitor** free_tail_p, 2313 ObjectMonitor** saved_mid_in_use_p) { 2314 assert(AsyncDeflateIdleMonitors, "sanity check"); 2315 JavaThread* self = JavaThread::current(); 2316 2317 ObjectMonitor* cur_mid_in_use = NULL; 2318 ObjectMonitor* mid = NULL; 2319 ObjectMonitor* next = NULL; 2320 ObjectMonitor* next_next = NULL; 2321 int deflated_count = 0; 2322 NoSafepointVerifier nsv; 2323 2324 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go 2325 // protocol because om_release() can do list deletions in parallel; 2326 // this also prevents races with a list walker thread. We also 2327 // lock-next-next-as-we-go to prevent an om_flush() that is behind 2328 // this thread from passing us. 2329 if (*saved_mid_in_use_p == NULL) { 2330 // No saved state so start at the beginning. 2331 // Lock the list head so we can possibly deflate it: 2332 if ((mid = get_list_head_locked(list_p)) == NULL) { 2333 return 0; // The list is empty so nothing to deflate. 2334 } 2335 next = unmarked_next(mid); 2336 } else { 2337 // We're restarting after a safepoint so restore the necessary state 2338 // before we resume. 2339 cur_mid_in_use = *saved_mid_in_use_p; 2340 // Lock cur_mid_in_use so we can possibly update its 2341 // next field to extract a deflated ObjectMonitor. 2342 om_lock(cur_mid_in_use); 2343 mid = unmarked_next(cur_mid_in_use); 2344 if (mid == NULL) { 2345 om_unlock(cur_mid_in_use); 2346 *saved_mid_in_use_p = NULL; 2347 return 0; // The remainder is empty so nothing more to deflate. 2348 } 2349 // Lock mid so we can possibly deflate it: 2350 om_lock(mid); 2351 next = unmarked_next(mid); 2352 } 2353 2354 while (true) { 2355 // The current mid is locked at this point. If we have a 2356 // cur_mid_in_use, then it is also locked at this point. 2357 2358 if (next != NULL) { 2359 // We lock next so that an om_flush() thread that is behind us 2360 // cannot pass us when we unlock the current mid. 2361 om_lock(next); 2362 next_next = unmarked_next(next); 2363 } 2364 2365 // Only try to deflate if there is an associated Java object and if 2366 // mid is old (is not newly allocated and is not newly freed). 2367 if (mid->object() != NULL && mid->is_old() && 2368 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { 2369 // Deflation succeeded and already updated free_head_p and 2370 // free_tail_p as needed. Finish the move to the local free list 2371 // by unlinking mid from the global or per-thread in-use list. 2372 if (cur_mid_in_use == NULL) { 2373 // mid is the list head and it is locked. Switch the list head 2374 // to next which is also locked (if not NULL) and also leave 2375 // mid locked: 2376 Atomic::store(list_p, next); 2377 } else { 2378 ObjectMonitor* locked_next = mark_om_ptr(next); 2379 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's 2380 // next field to locked_next and also leave mid locked: 2381 cur_mid_in_use->set_next_om(locked_next); 2382 } 2383 // At this point mid is disconnected from the in-use list so 2384 // its lock longer has any effects on in-use list. 2385 deflated_count++; 2386 Atomic::dec(count_p); 2387 // mid is current tail in the free_head_p list so NULL terminate it 2388 // (which also unlocks it): 2389 mid->set_next_om(NULL); 2390 2391 // All the list management is done so move on to the next one: 2392 mid = next; // mid keeps non-NULL next's locked state 2393 next = next_next; 2394 } else { 2395 // mid is considered in-use if it does not have an associated 2396 // Java object or mid is not old or deflation did not succeed. 2397 // A mid->is_new() node can be seen here when it is freshly 2398 // returned by om_alloc() (and skips the deflation code path). 2399 // A mid->is_old() node can be seen here when deflation failed. 2400 // A mid->is_free() node can be seen here when a fresh node from 2401 // om_alloc() is released by om_release() due to losing the race 2402 // in inflate(). 2403 2404 // All the list management is done so move on to the next one: 2405 if (cur_mid_in_use != NULL) { 2406 om_unlock(cur_mid_in_use); 2407 } 2408 // The next cur_mid_in_use keeps mid's lock state so 2409 // that it is stable for a possible next field change. It 2410 // cannot be modified by om_release() while it is locked. 2411 cur_mid_in_use = mid; 2412 mid = next; // mid keeps non-NULL next's locked state 2413 next = next_next; 2414 2415 if (SafepointMechanism::should_block(self) && 2416 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { 2417 // If a safepoint has started and cur_mid_in_use is not the list 2418 // head and is old, then it is safe to use as saved state. Return 2419 // to the caller before blocking. 2420 *saved_mid_in_use_p = cur_mid_in_use; 2421 om_unlock(cur_mid_in_use); 2422 if (mid != NULL) { 2423 om_unlock(mid); 2424 } 2425 return deflated_count; 2426 } 2427 } 2428 if (mid == NULL) { 2429 if (cur_mid_in_use != NULL) { 2430 om_unlock(cur_mid_in_use); 2431 } 2432 break; // Reached end of the list so nothing more to deflate. 2433 } 2434 2435 // The current mid's next field is locked at this point. If we have 2436 // a cur_mid_in_use, then it is also locked at this point. 2437 } 2438 // We finished the list without a safepoint starting so there's 2439 // no need to save state. 2440 *saved_mid_in_use_p = NULL; 2441 return deflated_count; 2442 } 2443 2444 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2445 counters->n_in_use = 0; // currently associated with objects 2446 counters->n_in_circulation = 0; // extant 2447 counters->n_scavenged = 0; // reclaimed (global and per-thread) 2448 counters->per_thread_scavenged = 0; // per-thread scavenge total 2449 counters->per_thread_times = 0.0; // per-thread scavenge times 2450 } 2451 2452 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2453 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2454 2455 if (AsyncDeflateIdleMonitors) { 2456 // Nothing to do when global idle ObjectMonitors are deflated using 2457 // a JavaThread unless a special deflation has been requested. 2458 if (!is_special_deflation_requested()) { 2459 return; 2460 } 2461 } 2462 2463 bool deflated = false; 2464 2465 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2466 ObjectMonitor* free_tail_p = NULL; 2467 elapsedTimer timer; 2468 2469 if (log_is_enabled(Info, monitorinflation)) { 2470 timer.start(); 2471 } 2472 2473 // Note: the thread-local monitors lists get deflated in 2474 // a separate pass. See deflate_thread_local_monitors(). 2475 2476 // For moribund threads, scan om_list_globals._in_use_list 2477 int deflated_count = 0; 2478 if (Atomic::load(&om_list_globals._in_use_list) != NULL) { 2479 // Update n_in_circulation before om_list_globals._in_use_count is 2480 // updated by deflation. 2481 Atomic::add(&counters->n_in_circulation, 2482 Atomic::load(&om_list_globals._in_use_count)); 2483 2484 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list, 2485 &om_list_globals._in_use_count, 2486 &free_head_p, &free_tail_p); 2487 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count)); 2488 } 2489 2490 if (free_head_p != NULL) { 2491 // Move the deflated ObjectMonitors back to the global free list. 2492 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2493 #ifdef ASSERT 2494 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2495 #endif 2496 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2497 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2498 Atomic::add(&counters->n_scavenged, deflated_count); 2499 } 2500 timer.stop(); 2501 2502 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2503 LogStreamHandle(Info, monitorinflation) lsh_info; 2504 LogStream* ls = NULL; 2505 if (log_is_enabled(Debug, monitorinflation)) { 2506 ls = &lsh_debug; 2507 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2508 ls = &lsh_info; 2509 } 2510 if (ls != NULL) { 2511 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2512 } 2513 } 2514 2515 class HandshakeForDeflation : public HandshakeClosure { 2516 public: 2517 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {} 2518 2519 void do_thread(Thread* thread) { 2520 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" 2521 INTPTR_FORMAT, p2i(thread)); 2522 } 2523 }; 2524 2525 void ObjectSynchronizer::deflate_idle_monitors_using_JT() { 2526 assert(AsyncDeflateIdleMonitors, "sanity check"); 2527 2528 // Deflate any global idle monitors. 2529 deflate_global_idle_monitors_using_JT(); 2530 2531 int count = 0; 2532 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2533 if (Atomic::load(&jt->om_in_use_count) > 0 && !jt->is_exiting()) { 2534 // This JavaThread is using ObjectMonitors so deflate any that 2535 // are idle unless this JavaThread is exiting; do not race with 2536 // ObjectSynchronizer::om_flush(). 2537 deflate_per_thread_idle_monitors_using_JT(jt); 2538 count++; 2539 } 2540 } 2541 if (count > 0) { 2542 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); 2543 } 2544 2545 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, " 2546 "global_free_count=%d, global_wait_count=%d", 2547 Atomic::load(&om_list_globals._population), 2548 Atomic::load(&om_list_globals._in_use_count), 2549 Atomic::load(&om_list_globals._free_count), 2550 Atomic::load(&om_list_globals._wait_count)); 2551 2552 // The ServiceThread's async deflation request has been processed. 2553 set_is_async_deflation_requested(false); 2554 2555 if (Atomic::load(&om_list_globals._wait_count) > 0) { 2556 // There are deflated ObjectMonitors waiting for a handshake 2557 // (or a safepoint) for safety. 2558 2559 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list); 2560 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL"); 2561 int count = Atomic::load(&om_list_globals._wait_count); 2562 Atomic::store(&om_list_globals._wait_count, 0); 2563 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL); 2564 2565 // Find the tail for prepend_list_to_common(). No need to mark 2566 // ObjectMonitors for this list walk since only the deflater 2567 // thread manages the wait list. 2568 int l_count = 0; 2569 ObjectMonitor* tail = NULL; 2570 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { 2571 tail = n; 2572 l_count++; 2573 } 2574 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count); 2575 2576 // Will execute a safepoint if !ThreadLocalHandshakes: 2577 HandshakeForDeflation hfd_hc; 2578 Handshake::execute(&hfd_hc); 2579 2580 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 2581 &om_list_globals._free_count); 2582 2583 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); 2584 } 2585 } 2586 2587 // Deflate global idle ObjectMonitors using a JavaThread. 2588 // 2589 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2590 assert(AsyncDeflateIdleMonitors, "sanity check"); 2591 assert(Thread::current()->is_Java_thread(), "precondition"); 2592 JavaThread* self = JavaThread::current(); 2593 2594 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2595 } 2596 2597 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread. 2598 // 2599 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) { 2600 assert(AsyncDeflateIdleMonitors, "sanity check"); 2601 assert(Thread::current()->is_Java_thread(), "precondition"); 2602 2603 deflate_common_idle_monitors_using_JT(false /* !is_global */, target); 2604 } 2605 2606 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2607 // 2608 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) { 2609 JavaThread* self = JavaThread::current(); 2610 2611 int deflated_count = 0; 2612 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors 2613 ObjectMonitor* free_tail_p = NULL; 2614 ObjectMonitor* saved_mid_in_use_p = NULL; 2615 elapsedTimer timer; 2616 2617 if (log_is_enabled(Info, monitorinflation)) { 2618 timer.start(); 2619 } 2620 2621 if (is_global) { 2622 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count))); 2623 } else { 2624 OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count))); 2625 } 2626 2627 do { 2628 if (saved_mid_in_use_p != NULL) { 2629 // We looped around because deflate_monitor_list_using_JT() 2630 // detected a pending safepoint. Honoring the safepoint is good, 2631 // but as long as is_special_deflation_requested() is supported, 2632 // we can't safely restart using saved_mid_in_use_p. That saved 2633 // ObjectMonitor could have been deflated by safepoint based 2634 // deflation and would no longer be on the in-use list where we 2635 // originally found it. 2636 saved_mid_in_use_p = NULL; 2637 } 2638 int local_deflated_count; 2639 if (is_global) { 2640 local_deflated_count = 2641 deflate_monitor_list_using_JT(&om_list_globals._in_use_list, 2642 &om_list_globals._in_use_count, 2643 &free_head_p, &free_tail_p, 2644 &saved_mid_in_use_p); 2645 } else { 2646 local_deflated_count = 2647 deflate_monitor_list_using_JT(&target->om_in_use_list, 2648 &target->om_in_use_count, &free_head_p, 2649 &free_tail_p, &saved_mid_in_use_p); 2650 } 2651 deflated_count += local_deflated_count; 2652 2653 if (free_head_p != NULL) { 2654 // Move the deflated ObjectMonitors to the global free list. 2655 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); 2656 // Note: The target thread can be doing an om_alloc() that 2657 // is trying to prepend an ObjectMonitor on its in-use list 2658 // at the same time that we have deflated the current in-use 2659 // list head and put it on the local free list. prepend_to_common() 2660 // will detect the race and retry which avoids list corruption, 2661 // but the next field in free_tail_p can flicker to marked 2662 // and then unmarked while prepend_to_common() is sorting it 2663 // all out. 2664 #ifdef ASSERT 2665 ObjectMonitor* l_next_om = unmarked_next(free_tail_p); 2666 #endif 2667 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2668 2669 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count); 2670 2671 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2672 } 2673 2674 if (saved_mid_in_use_p != NULL) { 2675 // deflate_monitor_list_using_JT() detected a safepoint starting. 2676 timer.stop(); 2677 { 2678 if (is_global) { 2679 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2680 } else { 2681 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); 2682 } 2683 assert(SafepointMechanism::should_block(self), "sanity check"); 2684 ThreadBlockInVM blocker(self); 2685 } 2686 // Prepare for another loop after the safepoint. 2687 free_head_p = NULL; 2688 free_tail_p = NULL; 2689 if (log_is_enabled(Info, monitorinflation)) { 2690 timer.start(); 2691 } 2692 } 2693 } while (saved_mid_in_use_p != NULL); 2694 timer.stop(); 2695 2696 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2697 LogStreamHandle(Info, monitorinflation) lsh_info; 2698 LogStream* ls = NULL; 2699 if (log_is_enabled(Debug, monitorinflation)) { 2700 ls = &lsh_debug; 2701 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2702 ls = &lsh_info; 2703 } 2704 if (ls != NULL) { 2705 if (is_global) { 2706 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2707 } else { 2708 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count); 2709 } 2710 } 2711 } 2712 2713 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2714 // Report the cumulative time for deflating each thread's idle 2715 // monitors. Note: if the work is split among more than one 2716 // worker thread, then the reported time will likely be more 2717 // than a beginning to end measurement of the phase. 2718 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 2719 2720 bool needs_special_deflation = is_special_deflation_requested(); 2721 if (AsyncDeflateIdleMonitors && !needs_special_deflation) { 2722 // Nothing to do when idle ObjectMonitors are deflated using 2723 // a JavaThread unless a special deflation has been requested. 2724 return; 2725 } 2726 2727 if (log_is_enabled(Debug, monitorinflation)) { 2728 // exit_globals()'s call to audit_and_print_stats() is done 2729 // at the Info level and not at a safepoint. 2730 // For async deflation, audit_and_print_stats() is called in 2731 // ObjectSynchronizer::do_safepoint_work() at the Debug level 2732 // at a safepoint. 2733 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2734 } else if (log_is_enabled(Info, monitorinflation)) { 2735 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " 2736 "global_free_count=%d, global_wait_count=%d", 2737 Atomic::load(&om_list_globals._population), 2738 Atomic::load(&om_list_globals._in_use_count), 2739 Atomic::load(&om_list_globals._free_count), 2740 Atomic::load(&om_list_globals._wait_count)); 2741 } 2742 2743 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 2744 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 2745 2746 GVars.stw_random = os::random(); 2747 GVars.stw_cycle++; 2748 2749 if (needs_special_deflation) { 2750 set_is_special_deflation_requested(false); // special deflation is done 2751 } 2752 } 2753 2754 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2755 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2756 2757 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) { 2758 // Nothing to do if a special deflation has NOT been requested. 2759 return; 2760 } 2761 2762 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2763 ObjectMonitor* free_tail_p = NULL; 2764 elapsedTimer timer; 2765 2766 if (log_is_enabled(Info, safepoint, cleanup) || 2767 log_is_enabled(Info, monitorinflation)) { 2768 timer.start(); 2769 } 2770 2771 // Update n_in_circulation before om_in_use_count is updated by deflation. 2772 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count)); 2773 2774 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 2775 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count)); 2776 2777 if (free_head_p != NULL) { 2778 // Move the deflated ObjectMonitors back to the global free list. 2779 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2780 #ifdef ASSERT 2781 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2782 #endif 2783 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2784 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2785 Atomic::add(&counters->n_scavenged, deflated_count); 2786 Atomic::add(&counters->per_thread_scavenged, deflated_count); 2787 } 2788 2789 timer.stop(); 2790 counters->per_thread_times += timer.seconds(); 2791 2792 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2793 LogStreamHandle(Info, monitorinflation) lsh_info; 2794 LogStream* ls = NULL; 2795 if (log_is_enabled(Debug, monitorinflation)) { 2796 ls = &lsh_debug; 2797 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2798 ls = &lsh_info; 2799 } 2800 if (ls != NULL) { 2801 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2802 } 2803 } 2804 2805 // Monitor cleanup on JavaThread::exit 2806 2807 // Iterate through monitor cache and attempt to release thread's monitors 2808 // Gives up on a particular monitor if an exception occurs, but continues 2809 // the overall iteration, swallowing the exception. 2810 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2811 private: 2812 TRAPS; 2813 2814 public: 2815 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2816 void do_monitor(ObjectMonitor* mid) { 2817 if (mid->owner() == THREAD) { 2818 (void)mid->complete_exit(CHECK); 2819 } 2820 } 2821 }; 2822 2823 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2824 // ignored. This is meant to be called during JNI thread detach which assumes 2825 // all remaining monitors are heavyweight. All exceptions are swallowed. 2826 // Scanning the extant monitor list can be time consuming. 2827 // A simple optimization is to add a per-thread flag that indicates a thread 2828 // called jni_monitorenter() during its lifetime. 2829 // 2830 // Instead of No_Savepoint_Verifier it might be cheaper to 2831 // use an idiom of the form: 2832 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2833 // <code that must not run at safepoint> 2834 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2835 // Since the tests are extremely cheap we could leave them enabled 2836 // for normal product builds. 2837 2838 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2839 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2840 NoSafepointVerifier nsv; 2841 ReleaseJavaMonitorsClosure rjmc(THREAD); 2842 ObjectSynchronizer::monitors_iterate(&rjmc); 2843 THREAD->clear_pending_exception(); 2844 } 2845 2846 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2847 switch (cause) { 2848 case inflate_cause_vm_internal: return "VM Internal"; 2849 case inflate_cause_monitor_enter: return "Monitor Enter"; 2850 case inflate_cause_wait: return "Monitor Wait"; 2851 case inflate_cause_notify: return "Monitor Notify"; 2852 case inflate_cause_hash_code: return "Monitor Hash Code"; 2853 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2854 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2855 default: 2856 ShouldNotReachHere(); 2857 } 2858 return "Unknown"; 2859 } 2860 2861 //------------------------------------------------------------------------------ 2862 // Debugging code 2863 2864 u_char* ObjectSynchronizer::get_gvars_addr() { 2865 return (u_char*)&GVars; 2866 } 2867 2868 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2869 return (u_char*)&GVars.hc_sequence; 2870 } 2871 2872 size_t ObjectSynchronizer::get_gvars_size() { 2873 return sizeof(SharedGlobals); 2874 } 2875 2876 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2877 return (u_char*)&GVars.stw_random; 2878 } 2879 2880 // This function can be called at a safepoint or it can be called when 2881 // we are trying to exit the VM. When we are trying to exit the VM, the 2882 // list walker functions can run in parallel with the other list 2883 // operations so spin-locking is used for safety. 2884 // 2885 // Calls to this function can be added in various places as a debugging 2886 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor 2887 // details logged at the Info level and 'false' for the 'on_exit' 2888 // parameter to have in-use monitor details logged at the Trace level. 2889 // deflate_monitor_list() no longer uses spin-locking so be careful 2890 // when adding audit_and_print_stats() calls at a safepoint. 2891 // 2892 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2893 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2894 2895 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2896 LogStreamHandle(Info, monitorinflation) lsh_info; 2897 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2898 LogStream* ls = NULL; 2899 if (log_is_enabled(Trace, monitorinflation)) { 2900 ls = &lsh_trace; 2901 } else if (log_is_enabled(Debug, monitorinflation)) { 2902 ls = &lsh_debug; 2903 } else if (log_is_enabled(Info, monitorinflation)) { 2904 ls = &lsh_info; 2905 } 2906 assert(ls != NULL, "sanity check"); 2907 2908 // Log counts for the global and per-thread monitor lists: 2909 int chk_om_population = log_monitor_list_counts(ls); 2910 int error_cnt = 0; 2911 2912 ls->print_cr("Checking global lists:"); 2913 2914 // Check om_list_globals._population: 2915 if (Atomic::load(&om_list_globals._population) == chk_om_population) { 2916 ls->print_cr("global_population=%d equals chk_om_population=%d", 2917 Atomic::load(&om_list_globals._population), chk_om_population); 2918 } else { 2919 // With fine grained locks on the monitor lists, it is possible for 2920 // log_monitor_list_counts() to return a value that doesn't match 2921 // om_list_globals._population. So far a higher value has been 2922 // seen in testing so something is being double counted by 2923 // log_monitor_list_counts(). 2924 ls->print_cr("WARNING: global_population=%d is not equal to " 2925 "chk_om_population=%d", 2926 Atomic::load(&om_list_globals._population), chk_om_population); 2927 } 2928 2929 // Check om_list_globals._in_use_list and om_list_globals._in_use_count: 2930 chk_global_in_use_list_and_count(ls, &error_cnt); 2931 2932 // Check om_list_globals._free_list and om_list_globals._free_count: 2933 chk_global_free_list_and_count(ls, &error_cnt); 2934 2935 // Check om_list_globals._wait_list and om_list_globals._wait_count: 2936 chk_global_wait_list_and_count(ls, &error_cnt); 2937 2938 ls->print_cr("Checking per-thread lists:"); 2939 2940 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2941 // Check om_in_use_list and om_in_use_count: 2942 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2943 2944 // Check om_free_list and om_free_count: 2945 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2946 } 2947 2948 if (error_cnt == 0) { 2949 ls->print_cr("No errors found in monitor list checks."); 2950 } else { 2951 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2952 } 2953 2954 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2955 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2956 // When exiting this log output is at the Info level. When called 2957 // at a safepoint, this log output is at the Trace level since 2958 // there can be a lot of it. 2959 log_in_use_monitor_details(ls); 2960 } 2961 2962 ls->flush(); 2963 2964 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2965 } 2966 2967 // Check a free monitor entry; log any errors. 2968 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 2969 outputStream * out, int *error_cnt_p) { 2970 stringStream ss; 2971 if (n->is_busy()) { 2972 if (jt != NULL) { 2973 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2974 ": free per-thread monitor must not be busy: %s", p2i(jt), 2975 p2i(n), n->is_busy_to_string(&ss)); 2976 } else { 2977 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2978 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 2979 } 2980 *error_cnt_p = *error_cnt_p + 1; 2981 } 2982 if (n->header().value() != 0) { 2983 if (jt != NULL) { 2984 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2985 ": free per-thread monitor must have NULL _header " 2986 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 2987 n->header().value()); 2988 *error_cnt_p = *error_cnt_p + 1; 2989 } else if (!AsyncDeflateIdleMonitors) { 2990 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 2991 "must have NULL _header field: _header=" INTPTR_FORMAT, 2992 p2i(n), n->header().value()); 2993 *error_cnt_p = *error_cnt_p + 1; 2994 } 2995 } 2996 if (n->object() != NULL) { 2997 if (jt != NULL) { 2998 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 2999 ": free per-thread monitor must have NULL _object " 3000 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 3001 p2i(n->object())); 3002 } else { 3003 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3004 "must have NULL _object field: _object=" INTPTR_FORMAT, 3005 p2i(n), p2i(n->object())); 3006 } 3007 *error_cnt_p = *error_cnt_p + 1; 3008 } 3009 } 3010 3011 // Lock the next ObjectMonitor for traversal and unlock the current 3012 // ObjectMonitor. Returns the next ObjectMonitor if there is one. 3013 // Otherwise returns NULL (after unlocking the current ObjectMonitor). 3014 // This function is used by the various list walker functions to 3015 // safely walk a list without allowing an ObjectMonitor to be moved 3016 // to another list in the middle of a walk. 3017 static ObjectMonitor* lock_next_for_traversal(ObjectMonitor* cur) { 3018 assert(is_locked(cur), "cur=" INTPTR_FORMAT " must be locked", p2i(cur)); 3019 ObjectMonitor* next = unmarked_next(cur); 3020 if (next == NULL) { // Reached the end of the list. 3021 om_unlock(cur); 3022 return NULL; 3023 } 3024 om_lock(next); // Lock next before unlocking current to keep 3025 om_unlock(cur); // from being by-passed by another thread. 3026 return next; 3027 } 3028 3029 // Check the global free list and count; log the results of the checks. 3030 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 3031 int *error_cnt_p) { 3032 int chk_om_free_count = 0; 3033 ObjectMonitor* cur = NULL; 3034 if ((cur = get_list_head_locked(&om_list_globals._free_list)) != NULL) { 3035 // Marked the global free list head so process the list. 3036 while (true) { 3037 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3038 chk_om_free_count++; 3039 3040 cur = lock_next_for_traversal(cur); 3041 if (cur == NULL) { 3042 break; 3043 } 3044 } 3045 } 3046 int l_free_count = Atomic::load(&om_list_globals._free_count); 3047 if (l_free_count == chk_om_free_count) { 3048 out->print_cr("global_free_count=%d equals chk_om_free_count=%d", 3049 l_free_count, chk_om_free_count); 3050 } else { 3051 // With fine grained locks on om_list_globals._free_list, it 3052 // is possible for an ObjectMonitor to be prepended to 3053 // om_list_globals._free_list after we started calculating 3054 // chk_om_free_count so om_list_globals._free_count may not 3055 // match anymore. 3056 out->print_cr("WARNING: global_free_count=%d is not equal to " 3057 "chk_om_free_count=%d", l_free_count, chk_om_free_count); 3058 } 3059 } 3060 3061 // Check the global wait list and count; log the results of the checks. 3062 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, 3063 int *error_cnt_p) { 3064 int chk_om_wait_count = 0; 3065 ObjectMonitor* cur = NULL; 3066 if ((cur = get_list_head_locked(&om_list_globals._wait_list)) != NULL) { 3067 // Marked the global wait list head so process the list. 3068 while (true) { 3069 // Rules for om_list_globals._wait_list are the same as for 3070 // om_list_globals._free_list: 3071 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3072 chk_om_wait_count++; 3073 3074 cur = lock_next_for_traversal(cur); 3075 if (cur == NULL) { 3076 break; 3077 } 3078 } 3079 } 3080 if (Atomic::load(&om_list_globals._wait_count) == chk_om_wait_count) { 3081 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d", 3082 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3083 } else { 3084 out->print_cr("ERROR: global_wait_count=%d is not equal to " 3085 "chk_om_wait_count=%d", 3086 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3087 *error_cnt_p = *error_cnt_p + 1; 3088 } 3089 } 3090 3091 // Check the global in-use list and count; log the results of the checks. 3092 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 3093 int *error_cnt_p) { 3094 int chk_om_in_use_count = 0; 3095 ObjectMonitor* cur = NULL; 3096 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3097 // Marked the global in-use list head so process the list. 3098 while (true) { 3099 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); 3100 chk_om_in_use_count++; 3101 3102 cur = lock_next_for_traversal(cur); 3103 if (cur == NULL) { 3104 break; 3105 } 3106 } 3107 } 3108 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3109 if (l_in_use_count == chk_om_in_use_count) { 3110 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", 3111 l_in_use_count, chk_om_in_use_count); 3112 } else { 3113 // With fine grained locks on the monitor lists, it is possible for 3114 // an exiting JavaThread to put its in-use ObjectMonitors on the 3115 // global in-use list after chk_om_in_use_count is calculated above. 3116 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", 3117 l_in_use_count, chk_om_in_use_count); 3118 } 3119 } 3120 3121 // Check an in-use monitor entry; log any errors. 3122 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 3123 outputStream * out, int *error_cnt_p) { 3124 if (n->header().value() == 0) { 3125 if (jt != NULL) { 3126 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3127 ": in-use per-thread monitor must have non-NULL _header " 3128 "field.", p2i(jt), p2i(n)); 3129 } else { 3130 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3131 "must have non-NULL _header field.", p2i(n)); 3132 } 3133 *error_cnt_p = *error_cnt_p + 1; 3134 } 3135 if (n->object() == NULL) { 3136 if (jt != NULL) { 3137 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3138 ": in-use per-thread monitor must have non-NULL _object " 3139 "field.", p2i(jt), p2i(n)); 3140 } else { 3141 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3142 "must have non-NULL _object field.", p2i(n)); 3143 } 3144 *error_cnt_p = *error_cnt_p + 1; 3145 } 3146 const oop obj = (oop)n->object(); 3147 const markWord mark = obj->mark(); 3148 if (!mark.has_monitor()) { 3149 if (jt != NULL) { 3150 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3151 ": in-use per-thread monitor's object does not think " 3152 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 3153 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 3154 } else { 3155 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3156 "monitor's object does not think it has a monitor: obj=" 3157 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 3158 p2i(obj), mark.value()); 3159 } 3160 *error_cnt_p = *error_cnt_p + 1; 3161 } 3162 ObjectMonitor* const obj_mon = mark.monitor(); 3163 if (n != obj_mon) { 3164 if (jt != NULL) { 3165 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3166 ": in-use per-thread monitor's object does not refer " 3167 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 3168 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 3169 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3170 } else { 3171 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3172 "monitor's object does not refer to the same monitor: obj=" 3173 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 3174 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3175 } 3176 *error_cnt_p = *error_cnt_p + 1; 3177 } 3178 } 3179 3180 // Check the thread's free list and count; log the results of the checks. 3181 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 3182 outputStream * out, 3183 int *error_cnt_p) { 3184 int chk_om_free_count = 0; 3185 ObjectMonitor* cur = NULL; 3186 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { 3187 // Marked the per-thread free list head so process the list. 3188 while (true) { 3189 chk_free_entry(jt, cur, out, error_cnt_p); 3190 chk_om_free_count++; 3191 3192 cur = lock_next_for_traversal(cur); 3193 if (cur == NULL) { 3194 break; 3195 } 3196 } 3197 } 3198 int l_om_free_count = Atomic::load(&jt->om_free_count); 3199 if (l_om_free_count == chk_om_free_count) { 3200 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 3201 "chk_om_free_count=%d", p2i(jt), l_om_free_count, chk_om_free_count); 3202 } else { 3203 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 3204 "equal to chk_om_free_count=%d", p2i(jt), l_om_free_count, 3205 chk_om_free_count); 3206 *error_cnt_p = *error_cnt_p + 1; 3207 } 3208 } 3209 3210 // Check the thread's in-use list and count; log the results of the checks. 3211 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 3212 outputStream * out, 3213 int *error_cnt_p) { 3214 int chk_om_in_use_count = 0; 3215 ObjectMonitor* cur = NULL; 3216 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3217 // Marked the per-thread in-use list head so process the list. 3218 while (true) { 3219 chk_in_use_entry(jt, cur, out, error_cnt_p); 3220 chk_om_in_use_count++; 3221 3222 cur = lock_next_for_traversal(cur); 3223 if (cur == NULL) { 3224 break; 3225 } 3226 } 3227 } 3228 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3229 if (l_om_in_use_count == chk_om_in_use_count) { 3230 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 3231 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3232 chk_om_in_use_count); 3233 } else { 3234 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 3235 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3236 chk_om_in_use_count); 3237 *error_cnt_p = *error_cnt_p + 1; 3238 } 3239 } 3240 3241 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 3242 // flags indicate why the entry is in-use, 'object' and 'object type' 3243 // indicate the associated object and its type. 3244 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { 3245 stringStream ss; 3246 if (Atomic::load(&om_list_globals._in_use_count) > 0) { 3247 out->print_cr("In-use global monitor info:"); 3248 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3249 out->print_cr("%18s %s %18s %18s", 3250 "monitor", "BHL", "object", "object type"); 3251 out->print_cr("================== === ================== =================="); 3252 ObjectMonitor* cur = NULL; 3253 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3254 // Marked the global in-use list head so process the list. 3255 while (true) { 3256 const oop obj = (oop) cur->object(); 3257 const markWord mark = cur->header(); 3258 ResourceMark rm; 3259 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(cur), 3260 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL, 3261 p2i(obj), obj->klass()->external_name()); 3262 if (cur->is_busy() != 0) { 3263 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3264 ss.reset(); 3265 } 3266 out->cr(); 3267 3268 cur = lock_next_for_traversal(cur); 3269 if (cur == NULL) { 3270 break; 3271 } 3272 } 3273 } 3274 } 3275 3276 out->print_cr("In-use per-thread monitor info:"); 3277 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3278 out->print_cr("%18s %18s %s %18s %18s", 3279 "jt", "monitor", "BHL", "object", "object type"); 3280 out->print_cr("================== ================== === ================== =================="); 3281 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3282 ObjectMonitor* cur = NULL; 3283 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3284 // Marked the global in-use list head so process the list. 3285 while (true) { 3286 const oop obj = (oop) cur->object(); 3287 const markWord mark = cur->header(); 3288 ResourceMark rm; 3289 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT 3290 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, 3291 mark.hash() != 0, cur->owner() != NULL, p2i(obj), 3292 obj->klass()->external_name()); 3293 if (cur->is_busy() != 0) { 3294 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3295 ss.reset(); 3296 } 3297 out->cr(); 3298 3299 cur = lock_next_for_traversal(cur); 3300 if (cur == NULL) { 3301 break; 3302 } 3303 } 3304 } 3305 } 3306 3307 out->flush(); 3308 } 3309 3310 // Log counts for the global and per-thread monitor lists and return 3311 // the population count. 3312 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 3313 int pop_count = 0; 3314 out->print_cr("%18s %10s %10s %10s %10s", 3315 "Global Lists:", "InUse", "Free", "Wait", "Total"); 3316 out->print_cr("================== ========== ========== ========== =========="); 3317 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3318 int l_free_count = Atomic::load(&om_list_globals._free_count); 3319 int l_wait_count = Atomic::load(&om_list_globals._wait_count); 3320 out->print_cr("%18s %10d %10d %10d %10d", "", l_in_use_count, 3321 l_free_count, l_wait_count, 3322 Atomic::load(&om_list_globals._population)); 3323 pop_count += l_in_use_count + l_free_count + l_wait_count; 3324 3325 out->print_cr("%18s %10s %10s %10s", 3326 "Per-Thread Lists:", "InUse", "Free", "Provision"); 3327 out->print_cr("================== ========== ========== =========="); 3328 3329 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3330 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3331 int l_om_free_count = Atomic::load(&jt->om_free_count); 3332 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 3333 l_om_in_use_count, l_om_free_count, jt->om_free_provision); 3334 pop_count += l_om_in_use_count + l_om_free_count; 3335 } 3336 return pop_count; 3337 } 3338 3339 #ifndef PRODUCT 3340 3341 // Check if monitor belongs to the monitor cache 3342 // The list is grow-only so it's *relatively* safe to traverse 3343 // the list of extant blocks without taking a lock. 3344 3345 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 3346 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 3347 while (block != NULL) { 3348 assert(block->object() == CHAINMARKER, "must be a block header"); 3349 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 3350 address mon = (address)monitor; 3351 address blk = (address)block; 3352 size_t diff = mon - blk; 3353 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 3354 return 1; 3355 } 3356 // unmarked_next() is not needed with g_block_list (no locking 3357 // used with block linkage _next_om fields). 3358 block = (PaddedObjectMonitor*)block->next_om(); 3359 } 3360 return 0; 3361 } 3362 3363 #endif