1 /* 2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/handshake.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/objectMonitor.inline.hpp" 45 #include "runtime/osThread.hpp" 46 #include "runtime/safepointMechanism.inline.hpp" 47 #include "runtime/safepointVerifiers.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "runtime/thread.inline.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/align.hpp" 56 #include "utilities/dtrace.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/preserveException.hpp" 59 60 // The "core" versions of monitor enter and exit reside in this file. 61 // The interpreter and compilers contain specialized transliterated 62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 63 // for instance. If you make changes here, make sure to modify the 64 // interpreter, and both C1 and C2 fast-path inline locking code emission. 65 // 66 // ----------------------------------------------------------------------------- 67 68 #ifdef DTRACE_ENABLED 69 70 // Only bother with this argument setup if dtrace is available 71 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 72 73 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 74 char* bytes = NULL; \ 75 int len = 0; \ 76 jlong jtid = SharedRuntime::get_java_tid(thread); \ 77 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 78 if (klassname != NULL) { \ 79 bytes = (char*)klassname->bytes(); \ 80 len = klassname->utf8_length(); \ 81 } 82 83 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 84 { \ 85 if (DTraceMonitorProbes) { \ 86 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 87 HOTSPOT_MONITOR_WAIT(jtid, \ 88 (uintptr_t)(monitor), bytes, len, (millis)); \ 89 } \ 90 } 91 92 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 93 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 94 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 95 96 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 97 { \ 98 if (DTraceMonitorProbes) { \ 99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 100 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 101 (uintptr_t)(monitor), bytes, len); \ 102 } \ 103 } 104 105 #else // ndef DTRACE_ENABLED 106 107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 109 110 #endif // ndef DTRACE_ENABLED 111 112 // This exists only as a workaround of dtrace bug 6254741 113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 115 return 0; 116 } 117 118 #define NINFLATIONLOCKS 256 119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 120 121 // global list of blocks of monitors 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 126 127 struct ObjectMonitorListGlobals { 128 char _pad_prefix[OM_CACHE_LINE_SIZE]; 129 // These are highly shared list related variables. 130 // To avoid false-sharing they need to be the sole occupants of a cache line. 131 132 // Global ObjectMonitor free list. Newly allocated and deflated 133 // ObjectMonitors are prepended here. 134 ObjectMonitor* _free_list; 135 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 136 137 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 138 // ObjectMonitors on its per-thread in-use list are prepended here. 139 ObjectMonitor* _in_use_list; 140 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 141 142 // Global ObjectMonitor wait list. Deflated ObjectMonitors wait on 143 // this list until after a handshake or a safepoint for platforms 144 // that don't support handshakes. After the handshake or safepoint, 145 // the deflated ObjectMonitors are prepended to free_list. 146 ObjectMonitor* _wait_list; 147 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 148 149 int _free_count; // # on free_list 150 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); 151 152 int _in_use_count; // # on in_use_list 153 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); 154 155 int _population; // # Extant -- in circulation 156 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int)); 157 158 int _wait_count; // # on wait_list 159 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int)); 160 }; 161 static ObjectMonitorListGlobals om_list_globals; 162 163 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 164 165 166 // =====================> Spin-lock functions 167 168 // ObjectMonitors are not lockable outside of this file. We use spin-locks 169 // implemented using a bit in the _next_om field instead of the heavier 170 // weight locking mechanisms for faster list management. 171 172 #define OM_LOCK_BIT 0x1 173 174 // Return true if the ObjectMonitor is locked. 175 // Otherwise returns false. 176 static bool is_locked(ObjectMonitor* om) { 177 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT; 178 } 179 180 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. 181 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { 182 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); 183 } 184 185 // Return the unmarked next field in an ObjectMonitor. Note: the next 186 // field may or may not have been marked with OM_LOCK_BIT originally. 187 static ObjectMonitor* unmarked_next(ObjectMonitor* om) { 188 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT); 189 } 190 191 // Try to lock an ObjectMonitor. Returns true if locking was successful. 192 // Otherwise returns false. 193 static bool try_om_lock(ObjectMonitor* om) { 194 // Get current next field without any OM_LOCK_BIT value. 195 ObjectMonitor* next = unmarked_next(om); 196 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) { 197 return false; // Cannot lock the ObjectMonitor. 198 } 199 return true; 200 } 201 202 // Lock an ObjectMonitor. 203 static void om_lock(ObjectMonitor* om) { 204 while (true) { 205 if (try_om_lock(om)) { 206 return; 207 } 208 } 209 } 210 211 // Unlock an ObjectMonitor. 212 static void om_unlock(ObjectMonitor* om) { 213 ObjectMonitor* next = om->next_om(); 214 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT 215 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); 216 217 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. 218 om->set_next_om(next); 219 } 220 221 // Get the list head after locking it. Returns the list head or NULL 222 // if the list is empty. 223 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { 224 while (true) { 225 ObjectMonitor* mid = Atomic::load(list_p); 226 if (mid == NULL) { 227 return NULL; // The list is empty. 228 } 229 if (try_om_lock(mid)) { 230 if (Atomic::load(list_p) != mid) { 231 // The list head changed before we could lock it so we have to retry. 232 om_unlock(mid); 233 continue; 234 } 235 return mid; 236 } 237 } 238 } 239 240 #undef OM_LOCK_BIT 241 242 243 // =====================> List Management functions 244 245 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is 246 // the last ObjectMonitor in the list and there are 'count' on the list. 247 // Also updates the specified *count_p. 248 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, 249 int count, ObjectMonitor** list_p, 250 int* count_p) { 251 while (true) { 252 ObjectMonitor* cur = Atomic::load(list_p); 253 // Prepend list to *list_p. 254 if (!try_om_lock(tail)) { 255 // Failed to lock tail due to a list walker so try it all again. 256 continue; 257 } 258 tail->set_next_om(cur); // tail now points to cur (and unlocks tail) 259 if (cur == NULL) { 260 // No potential race with takers or other prependers since 261 // *list_p is empty. 262 if (Atomic::cmpxchg(list_p, cur, list) == cur) { 263 // Successfully switched *list_p to the list value. 264 Atomic::add(count_p, count); 265 break; 266 } 267 // Implied else: try it all again 268 } else { 269 if (!try_om_lock(cur)) { 270 continue; // failed to lock cur so try it all again 271 } 272 // We locked cur so try to switch *list_p to the list value. 273 if (Atomic::cmpxchg(list_p, cur, list) != cur) { 274 // The list head has changed so unlock cur and try again: 275 om_unlock(cur); 276 continue; 277 } 278 Atomic::add(count_p, count); 279 om_unlock(cur); 280 break; 281 } 282 } 283 } 284 285 // Prepend a newly allocated block of ObjectMonitors to g_block_list and 286 // om_list_globals._free_list. Also updates om_list_globals._population 287 // and om_list_globals._free_count. 288 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { 289 // First we handle g_block_list: 290 while (true) { 291 PaddedObjectMonitor* cur = Atomic::load(&g_block_list); 292 // Prepend new_blk to g_block_list. The first ObjectMonitor in 293 // a block is reserved for use as linkage to the next block. 294 new_blk[0].set_next_om(cur); 295 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { 296 // Successfully switched g_block_list to the new_blk value. 297 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1); 298 break; 299 } 300 // Implied else: try it all again 301 } 302 303 // Second we handle om_list_globals._free_list: 304 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, 305 &om_list_globals._free_list, &om_list_globals._free_count); 306 } 307 308 // Prepend a list of ObjectMonitors to om_list_globals._free_list. 309 // 'tail' is the last ObjectMonitor in the list and there are 'count' 310 // on the list. Also updates om_list_globals._free_count. 311 static void prepend_list_to_global_free_list(ObjectMonitor* list, 312 ObjectMonitor* tail, int count) { 313 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 314 &om_list_globals._free_count); 315 } 316 317 // Prepend a list of ObjectMonitors to om_list_globals._wait_list. 318 // 'tail' is the last ObjectMonitor in the list and there are 'count' 319 // on the list. Also updates om_list_globals._wait_count. 320 static void prepend_list_to_global_wait_list(ObjectMonitor* list, 321 ObjectMonitor* tail, int count) { 322 prepend_list_to_common(list, tail, count, &om_list_globals._wait_list, 323 &om_list_globals._wait_count); 324 } 325 326 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list. 327 // 'tail' is the last ObjectMonitor in the list and there are 'count' 328 // on the list. Also updates om_list_globals._in_use_list. 329 static void prepend_list_to_global_in_use_list(ObjectMonitor* list, 330 ObjectMonitor* tail, int count) { 331 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list, 332 &om_list_globals._in_use_count); 333 } 334 335 // Prepend an ObjectMonitor to the specified list. Also updates 336 // the specified counter. 337 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, 338 int* count_p) { 339 while (true) { 340 om_lock(m); // Lock m so we can safely update its next field. 341 ObjectMonitor* cur = NULL; 342 // Lock the list head to guard against races with a list walker 343 // or async deflater thread (which only races in om_in_use_list): 344 if ((cur = get_list_head_locked(list_p)) != NULL) { 345 // List head is now locked so we can safely switch it. 346 m->set_next_om(cur); // m now points to cur (and unlocks m) 347 Atomic::store(list_p, m); // Switch list head to unlocked m. 348 om_unlock(cur); 349 break; 350 } 351 // The list is empty so try to set the list head. 352 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); 353 m->set_next_om(cur); // m now points to NULL (and unlocks m) 354 if (Atomic::cmpxchg(list_p, cur, m) == cur) { 355 // List head is now unlocked m. 356 break; 357 } 358 // Implied else: try it all again 359 } 360 Atomic::inc(count_p); 361 } 362 363 // Prepend an ObjectMonitor to a per-thread om_free_list. 364 // Also updates the per-thread om_free_count. 365 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) { 366 prepend_to_common(m, &self->om_free_list, &self->om_free_count); 367 } 368 369 // Prepend an ObjectMonitor to a per-thread om_in_use_list. 370 // Also updates the per-thread om_in_use_count. 371 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) { 372 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count); 373 } 374 375 // Take an ObjectMonitor from the start of the specified list. Also 376 // decrements the specified counter. Returns NULL if none are available. 377 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, 378 int* count_p) { 379 ObjectMonitor* take = NULL; 380 // Lock the list head to guard against races with a list walker 381 // or async deflater thread (which only races in om_list_globals._free_list): 382 if ((take = get_list_head_locked(list_p)) == NULL) { 383 return NULL; // None are available. 384 } 385 ObjectMonitor* next = unmarked_next(take); 386 // Switch locked list head to next (which unlocks the list head, but 387 // leaves take locked): 388 Atomic::store(list_p, next); 389 Atomic::dec(count_p); 390 // Unlock take, but leave the next value for any lagging list 391 // walkers. It will get cleaned up when take is prepended to 392 // the in-use list: 393 om_unlock(take); 394 return take; 395 } 396 397 // Take an ObjectMonitor from the start of the om_list_globals._free_list. 398 // Also updates om_list_globals._free_count. Returns NULL if none are 399 // available. 400 static ObjectMonitor* take_from_start_of_global_free_list() { 401 return take_from_start_of_common(&om_list_globals._free_list, 402 &om_list_globals._free_count); 403 } 404 405 // Take an ObjectMonitor from the start of a per-thread free-list. 406 // Also updates om_free_count. Returns NULL if none are available. 407 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) { 408 return take_from_start_of_common(&self->om_free_list, &self->om_free_count); 409 } 410 411 412 // =====================> Quick functions 413 414 // The quick_* forms are special fast-path variants used to improve 415 // performance. In the simplest case, a "quick_*" implementation could 416 // simply return false, in which case the caller will perform the necessary 417 // state transitions and call the slow-path form. 418 // The fast-path is designed to handle frequently arising cases in an efficient 419 // manner and is just a degenerate "optimistic" variant of the slow-path. 420 // returns true -- to indicate the call was satisfied. 421 // returns false -- to indicate the call needs the services of the slow-path. 422 // A no-loitering ordinance is in effect for code in the quick_* family 423 // operators: safepoints or indefinite blocking (blocking that might span a 424 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 425 // entry. 426 // 427 // Consider: An interesting optimization is to have the JIT recognize the 428 // following common idiom: 429 // synchronized (someobj) { .... ; notify(); } 430 // That is, we find a notify() or notifyAll() call that immediately precedes 431 // the monitorexit operation. In that case the JIT could fuse the operations 432 // into a single notifyAndExit() runtime primitive. 433 434 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 435 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 436 assert(self->is_Java_thread(), "invariant"); 437 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 438 NoSafepointVerifier nsv; 439 if (obj == NULL) return false; // slow-path for invalid obj 440 const markWord mark = obj->mark(); 441 442 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 443 // Degenerate notify 444 // stack-locked by caller so by definition the implied waitset is empty. 445 return true; 446 } 447 448 if (mark.has_monitor()) { 449 ObjectMonitor* const mon = mark.monitor(); 450 assert(mon->object() == obj, "invariant"); 451 if (mon->owner() != self) return false; // slow-path for IMS exception 452 453 if (mon->first_waiter() != NULL) { 454 // We have one or more waiters. Since this is an inflated monitor 455 // that we own, we can transfer one or more threads from the waitset 456 // to the entrylist here and now, avoiding the slow-path. 457 if (all) { 458 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 459 } else { 460 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 461 } 462 int free_count = 0; 463 do { 464 mon->INotify(self); 465 ++free_count; 466 } while (mon->first_waiter() != NULL && all); 467 OM_PERFDATA_OP(Notifications, inc(free_count)); 468 } 469 return true; 470 } 471 472 // biased locking and any other IMS exception states take the slow-path 473 return false; 474 } 475 476 477 // The LockNode emitted directly at the synchronization site would have 478 // been too big if it were to have included support for the cases of inflated 479 // recursive enter and exit, so they go here instead. 480 // Note that we can't safely call AsyncPrintJavaStack() from within 481 // quick_enter() as our thread state remains _in_Java. 482 483 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 484 BasicLock * lock) { 485 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 486 assert(self->is_Java_thread(), "invariant"); 487 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 488 NoSafepointVerifier nsv; 489 if (obj == NULL) return false; // Need to throw NPE 490 491 const markWord mark = obj->mark(); 492 493 if (mark.has_monitor()) { 494 ObjectMonitor* const m = mark.monitor(); 495 if (AsyncDeflateIdleMonitors) { 496 // An async deflation can race us before we manage to make the 497 // ObjectMonitor busy by setting the owner below. If we detect 498 // that race we just bail out to the slow-path here. 499 if (m->object() == NULL) { 500 return false; 501 } 502 } else { 503 assert(m->object() == obj, "invariant"); 504 } 505 Thread* const owner = (Thread *) m->_owner; 506 507 // Lock contention and Transactional Lock Elision (TLE) diagnostics 508 // and observability 509 // Case: light contention possibly amenable to TLE 510 // Case: TLE inimical operations such as nested/recursive synchronization 511 512 if (owner == self) { 513 m->_recursions++; 514 return true; 515 } 516 517 // This Java Monitor is inflated so obj's header will never be 518 // displaced to this thread's BasicLock. Make the displaced header 519 // non-NULL so this BasicLock is not seen as recursive nor as 520 // being locked. We do this unconditionally so that this thread's 521 // BasicLock cannot be mis-interpreted by any stack walkers. For 522 // performance reasons, stack walkers generally first check for 523 // Biased Locking in the object's header, the second check is for 524 // stack-locking in the object's header, the third check is for 525 // recursive stack-locking in the displaced header in the BasicLock, 526 // and last are the inflated Java Monitor (ObjectMonitor) checks. 527 lock->set_displaced_header(markWord::unused_mark()); 528 529 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) { 530 assert(m->_recursions == 0, "invariant"); 531 return true; 532 } 533 } 534 535 // Note that we could inflate in quick_enter. 536 // This is likely a useful optimization 537 // Critically, in quick_enter() we must not: 538 // -- perform bias revocation, or 539 // -- block indefinitely, or 540 // -- reach a safepoint 541 542 return false; // revert to slow-path 543 } 544 545 // ----------------------------------------------------------------------------- 546 // Monitor Enter/Exit 547 // The interpreter and compiler assembly code tries to lock using the fast path 548 // of this algorithm. Make sure to update that code if the following function is 549 // changed. The implementation is extremely sensitive to race condition. Be careful. 550 551 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 552 if (UseBiasedLocking) { 553 if (!SafepointSynchronize::is_at_safepoint()) { 554 BiasedLocking::revoke(obj, THREAD); 555 } else { 556 BiasedLocking::revoke_at_safepoint(obj); 557 } 558 } 559 560 markWord mark = obj->mark(); 561 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 562 563 if (mark.is_neutral()) { 564 // Anticipate successful CAS -- the ST of the displaced mark must 565 // be visible <= the ST performed by the CAS. 566 lock->set_displaced_header(mark); 567 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 568 return; 569 } 570 // Fall through to inflate() ... 571 } else if (mark.has_locker() && 572 THREAD->is_lock_owned((address)mark.locker())) { 573 assert(lock != mark.locker(), "must not re-lock the same lock"); 574 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 575 lock->set_displaced_header(markWord::from_pointer(NULL)); 576 return; 577 } 578 579 // The object header will never be displaced to this lock, 580 // so it does not matter what the value is, except that it 581 // must be non-zero to avoid looking like a re-entrant lock, 582 // and must not look locked either. 583 lock->set_displaced_header(markWord::unused_mark()); 584 // An async deflation can race after the inflate() call and before 585 // enter() can make the ObjectMonitor busy. enter() returns false if 586 // we have lost the race to async deflation and we simply try again. 587 while (true) { 588 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_monitor_enter); 589 if (monitor->enter(THREAD)) { 590 return; 591 } 592 } 593 } 594 595 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 596 markWord mark = object->mark(); 597 // We cannot check for Biased Locking if we are racing an inflation. 598 assert(mark == markWord::INFLATING() || 599 !mark.has_bias_pattern(), "should not see bias pattern here"); 600 601 markWord dhw = lock->displaced_header(); 602 if (dhw.value() == 0) { 603 // If the displaced header is NULL, then this exit matches up with 604 // a recursive enter. No real work to do here except for diagnostics. 605 #ifndef PRODUCT 606 if (mark != markWord::INFLATING()) { 607 // Only do diagnostics if we are not racing an inflation. Simply 608 // exiting a recursive enter of a Java Monitor that is being 609 // inflated is safe; see the has_monitor() comment below. 610 assert(!mark.is_neutral(), "invariant"); 611 assert(!mark.has_locker() || 612 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 613 if (mark.has_monitor()) { 614 // The BasicLock's displaced_header is marked as a recursive 615 // enter and we have an inflated Java Monitor (ObjectMonitor). 616 // This is a special case where the Java Monitor was inflated 617 // after this thread entered the stack-lock recursively. When a 618 // Java Monitor is inflated, we cannot safely walk the Java 619 // Monitor owner's stack and update the BasicLocks because a 620 // Java Monitor can be asynchronously inflated by a thread that 621 // does not own the Java Monitor. 622 ObjectMonitor* m = mark.monitor(); 623 assert(((oop)(m->object()))->mark() == mark, "invariant"); 624 assert(m->is_entered(THREAD), "invariant"); 625 } 626 } 627 #endif 628 return; 629 } 630 631 if (mark == markWord::from_pointer(lock)) { 632 // If the object is stack-locked by the current thread, try to 633 // swing the displaced header from the BasicLock back to the mark. 634 assert(dhw.is_neutral(), "invariant"); 635 if (object->cas_set_mark(dhw, mark) == mark) { 636 return; 637 } 638 } 639 640 // We have to take the slow-path of possible inflation and then exit. 641 // The ObjectMonitor* can't be async deflated until ownership is 642 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 643 ObjectMonitor* monitor = inflate(THREAD, object, inflate_cause_vm_internal); 644 monitor->exit(true, THREAD); 645 } 646 647 // ----------------------------------------------------------------------------- 648 // Class Loader support to workaround deadlocks on the class loader lock objects 649 // Also used by GC 650 // complete_exit()/reenter() are used to wait on a nested lock 651 // i.e. to give up an outer lock completely and then re-enter 652 // Used when holding nested locks - lock acquisition order: lock1 then lock2 653 // 1) complete_exit lock1 - saving recursion count 654 // 2) wait on lock2 655 // 3) when notified on lock2, unlock lock2 656 // 4) reenter lock1 with original recursion count 657 // 5) lock lock2 658 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 659 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 660 if (UseBiasedLocking) { 661 BiasedLocking::revoke(obj, THREAD); 662 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 663 } 664 665 // The ObjectMonitor* can't be async deflated until ownership is 666 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 667 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 668 intptr_t ret_code = monitor->complete_exit(THREAD); 669 return ret_code; 670 } 671 672 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 673 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) { 674 if (UseBiasedLocking) { 675 BiasedLocking::revoke(obj, THREAD); 676 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 677 } 678 679 // An async deflation can race after the inflate() call and before 680 // reenter() -> enter() can make the ObjectMonitor busy. reenter() -> 681 // enter() returns false if we have lost the race to async deflation 682 // and we simply try again. 683 while (true) { 684 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 685 if (monitor->reenter(recursions, THREAD)) { 686 return; 687 } 688 } 689 } 690 691 // ----------------------------------------------------------------------------- 692 // JNI locks on java objects 693 // NOTE: must use heavy weight monitor to handle jni monitor enter 694 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 695 // the current locking is from JNI instead of Java code 696 if (UseBiasedLocking) { 697 BiasedLocking::revoke(obj, THREAD); 698 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 699 } 700 THREAD->set_current_pending_monitor_is_from_java(false); 701 // An async deflation can race after the inflate() call and before 702 // enter() can make the ObjectMonitor busy. enter() returns false if 703 // we have lost the race to async deflation and we simply try again. 704 while (true) { 705 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_jni_enter); 706 if (monitor->enter(THREAD)) { 707 break; 708 } 709 } 710 THREAD->set_current_pending_monitor_is_from_java(true); 711 } 712 713 // NOTE: must use heavy weight monitor to handle jni monitor exit 714 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 715 if (UseBiasedLocking) { 716 Handle h_obj(THREAD, obj); 717 BiasedLocking::revoke(h_obj, THREAD); 718 obj = h_obj(); 719 } 720 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 721 722 // The ObjectMonitor* can't be async deflated until ownership is 723 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 724 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); 725 // If this thread has locked the object, exit the monitor. We 726 // intentionally do not use CHECK here because we must exit the 727 // monitor even if an exception is pending. 728 if (monitor->check_owner(THREAD)) { 729 monitor->exit(true, THREAD); 730 } 731 } 732 733 // ----------------------------------------------------------------------------- 734 // Internal VM locks on java objects 735 // standard constructor, allows locking failures 736 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 737 _dolock = do_lock; 738 _thread = thread; 739 _thread->check_for_valid_safepoint_state(); 740 _obj = obj; 741 742 if (_dolock) { 743 ObjectSynchronizer::enter(_obj, &_lock, _thread); 744 } 745 } 746 747 ObjectLocker::~ObjectLocker() { 748 if (_dolock) { 749 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 750 } 751 } 752 753 754 // ----------------------------------------------------------------------------- 755 // Wait/Notify/NotifyAll 756 // NOTE: must use heavy weight monitor to handle wait() 757 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 758 if (UseBiasedLocking) { 759 BiasedLocking::revoke(obj, THREAD); 760 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 761 } 762 if (millis < 0) { 763 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 764 } 765 // The ObjectMonitor* can't be async deflated because the _waiters 766 // field is incremented before ownership is dropped and decremented 767 // after ownership is regained. 768 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 769 770 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 771 monitor->wait(millis, true, THREAD); 772 773 // This dummy call is in place to get around dtrace bug 6254741. Once 774 // that's fixed we can uncomment the following line, remove the call 775 // and change this function back into a "void" func. 776 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 777 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 778 return ret_code; 779 } 780 781 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 782 if (UseBiasedLocking) { 783 BiasedLocking::revoke(obj, THREAD); 784 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 785 } 786 if (millis < 0) { 787 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 788 } 789 // The ObjectMonitor* can't be async deflated because the _waiters 790 // field is incremented before ownership is dropped and decremented 791 // after ownership is regained. 792 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 793 monitor->wait(millis, false, THREAD); 794 } 795 796 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 797 if (UseBiasedLocking) { 798 BiasedLocking::revoke(obj, THREAD); 799 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 800 } 801 802 markWord mark = obj->mark(); 803 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 804 return; 805 } 806 // The ObjectMonitor* can't be async deflated until ownership is 807 // dropped by the calling thread. 808 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 809 monitor->notify(THREAD); 810 } 811 812 // NOTE: see comment of notify() 813 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 814 if (UseBiasedLocking) { 815 BiasedLocking::revoke(obj, THREAD); 816 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 817 } 818 819 markWord mark = obj->mark(); 820 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 821 return; 822 } 823 // The ObjectMonitor* can't be async deflated until ownership is 824 // dropped by the calling thread. 825 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 826 monitor->notifyAll(THREAD); 827 } 828 829 // ----------------------------------------------------------------------------- 830 // Hash Code handling 831 // 832 // Performance concern: 833 // OrderAccess::storestore() calls release() which at one time stored 0 834 // into the global volatile OrderAccess::dummy variable. This store was 835 // unnecessary for correctness. Many threads storing into a common location 836 // causes considerable cache migration or "sloshing" on large SMP systems. 837 // As such, I avoided using OrderAccess::storestore(). In some cases 838 // OrderAccess::fence() -- which incurs local latency on the executing 839 // processor -- is a better choice as it scales on SMP systems. 840 // 841 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 842 // a discussion of coherency costs. Note that all our current reference 843 // platforms provide strong ST-ST order, so the issue is moot on IA32, 844 // x64, and SPARC. 845 // 846 // As a general policy we use "volatile" to control compiler-based reordering 847 // and explicit fences (barriers) to control for architectural reordering 848 // performed by the CPU(s) or platform. 849 850 struct SharedGlobals { 851 char _pad_prefix[OM_CACHE_LINE_SIZE]; 852 // These are highly shared mostly-read variables. 853 // To avoid false-sharing they need to be the sole occupants of a cache line. 854 volatile int stw_random; 855 volatile int stw_cycle; 856 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 857 // Hot RW variable -- Sequester to avoid false-sharing 858 volatile int hc_sequence; 859 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 860 }; 861 862 static SharedGlobals GVars; 863 static int _forceMonitorScavenge = 0; // Scavenge required and pending 864 865 static markWord read_stable_mark(oop obj) { 866 markWord mark = obj->mark(); 867 if (!mark.is_being_inflated()) { 868 return mark; // normal fast-path return 869 } 870 871 int its = 0; 872 for (;;) { 873 markWord mark = obj->mark(); 874 if (!mark.is_being_inflated()) { 875 return mark; // normal fast-path return 876 } 877 878 // The object is being inflated by some other thread. 879 // The caller of read_stable_mark() must wait for inflation to complete. 880 // Avoid live-lock 881 // TODO: consider calling SafepointSynchronize::do_call_back() while 882 // spinning to see if there's a safepoint pending. If so, immediately 883 // yielding or blocking would be appropriate. Avoid spinning while 884 // there is a safepoint pending. 885 // TODO: add inflation contention performance counters. 886 // TODO: restrict the aggregate number of spinners. 887 888 ++its; 889 if (its > 10000 || !os::is_MP()) { 890 if (its & 1) { 891 os::naked_yield(); 892 } else { 893 // Note that the following code attenuates the livelock problem but is not 894 // a complete remedy. A more complete solution would require that the inflating 895 // thread hold the associated inflation lock. The following code simply restricts 896 // the number of spinners to at most one. We'll have N-2 threads blocked 897 // on the inflationlock, 1 thread holding the inflation lock and using 898 // a yield/park strategy, and 1 thread in the midst of inflation. 899 // A more refined approach would be to change the encoding of INFLATING 900 // to allow encapsulation of a native thread pointer. Threads waiting for 901 // inflation to complete would use CAS to push themselves onto a singly linked 902 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 903 // and calling park(). When inflation was complete the thread that accomplished inflation 904 // would detach the list and set the markword to inflated with a single CAS and 905 // then for each thread on the list, set the flag and unpark() the thread. 906 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 907 // wakes at most one thread whereas we need to wake the entire list. 908 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 909 int YieldThenBlock = 0; 910 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 911 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 912 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 913 while (obj->mark() == markWord::INFLATING()) { 914 // Beware: NakedYield() is advisory and has almost no effect on some platforms 915 // so we periodically call self->_ParkEvent->park(1). 916 // We use a mixed spin/yield/block mechanism. 917 if ((YieldThenBlock++) >= 16) { 918 Thread::current()->_ParkEvent->park(1); 919 } else { 920 os::naked_yield(); 921 } 922 } 923 Thread::muxRelease(gInflationLocks + ix); 924 } 925 } else { 926 SpinPause(); // SMP-polite spinning 927 } 928 } 929 } 930 931 // hashCode() generation : 932 // 933 // Possibilities: 934 // * MD5Digest of {obj,stw_random} 935 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 936 // * A DES- or AES-style SBox[] mechanism 937 // * One of the Phi-based schemes, such as: 938 // 2654435761 = 2^32 * Phi (golden ratio) 939 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 940 // * A variation of Marsaglia's shift-xor RNG scheme. 941 // * (obj ^ stw_random) is appealing, but can result 942 // in undesirable regularity in the hashCode values of adjacent objects 943 // (objects allocated back-to-back, in particular). This could potentially 944 // result in hashtable collisions and reduced hashtable efficiency. 945 // There are simple ways to "diffuse" the middle address bits over the 946 // generated hashCode values: 947 948 static inline intptr_t get_next_hash(Thread* self, oop obj) { 949 intptr_t value = 0; 950 if (hashCode == 0) { 951 // This form uses global Park-Miller RNG. 952 // On MP system we'll have lots of RW access to a global, so the 953 // mechanism induces lots of coherency traffic. 954 value = os::random(); 955 } else if (hashCode == 1) { 956 // This variation has the property of being stable (idempotent) 957 // between STW operations. This can be useful in some of the 1-0 958 // synchronization schemes. 959 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 960 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 961 } else if (hashCode == 2) { 962 value = 1; // for sensitivity testing 963 } else if (hashCode == 3) { 964 value = ++GVars.hc_sequence; 965 } else if (hashCode == 4) { 966 value = cast_from_oop<intptr_t>(obj); 967 } else { 968 // Marsaglia's xor-shift scheme with thread-specific state 969 // This is probably the best overall implementation -- we'll 970 // likely make this the default in future releases. 971 unsigned t = self->_hashStateX; 972 t ^= (t << 11); 973 self->_hashStateX = self->_hashStateY; 974 self->_hashStateY = self->_hashStateZ; 975 self->_hashStateZ = self->_hashStateW; 976 unsigned v = self->_hashStateW; 977 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 978 self->_hashStateW = v; 979 value = v; 980 } 981 982 value &= markWord::hash_mask; 983 if (value == 0) value = 0xBAD; 984 assert(value != markWord::no_hash, "invariant"); 985 return value; 986 } 987 988 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 989 if (UseBiasedLocking) { 990 // NOTE: many places throughout the JVM do not expect a safepoint 991 // to be taken here, in particular most operations on perm gen 992 // objects. However, we only ever bias Java instances and all of 993 // the call sites of identity_hash that might revoke biases have 994 // been checked to make sure they can handle a safepoint. The 995 // added check of the bias pattern is to avoid useless calls to 996 // thread-local storage. 997 if (obj->mark().has_bias_pattern()) { 998 // Handle for oop obj in case of STW safepoint 999 Handle hobj(self, obj); 1000 // Relaxing assertion for bug 6320749. 1001 assert(Universe::verify_in_progress() || 1002 !SafepointSynchronize::is_at_safepoint(), 1003 "biases should not be seen by VM thread here"); 1004 BiasedLocking::revoke(hobj, JavaThread::current()); 1005 obj = hobj(); 1006 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1007 } 1008 } 1009 1010 // hashCode() is a heap mutator ... 1011 // Relaxing assertion for bug 6320749. 1012 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1013 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1014 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1015 self->is_Java_thread() , "invariant"); 1016 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1017 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 1018 1019 while (true) { 1020 ObjectMonitor* monitor = NULL; 1021 markWord temp, test; 1022 intptr_t hash; 1023 markWord mark = read_stable_mark(obj); 1024 1025 // object should remain ineligible for biased locking 1026 assert(!mark.has_bias_pattern(), "invariant"); 1027 1028 if (mark.is_neutral()) { // if this is a normal header 1029 hash = mark.hash(); 1030 if (hash != 0) { // if it has a hash, just return it 1031 return hash; 1032 } 1033 hash = get_next_hash(self, obj); // get a new hash 1034 temp = mark.copy_set_hash(hash); // merge the hash into header 1035 // try to install the hash 1036 test = obj->cas_set_mark(temp, mark); 1037 if (test == mark) { // if the hash was installed, return it 1038 return hash; 1039 } 1040 // Failed to install the hash. It could be that another thread 1041 // installed the hash just before our attempt or inflation has 1042 // occurred or... so we fall thru to inflate the monitor for 1043 // stability and then install the hash. 1044 } else if (mark.has_monitor()) { 1045 // The first stage of a racing async deflation won't affect the 1046 // hash value if this ObjectMonitor happens to already have one. 1047 monitor = mark.monitor(); 1048 temp = monitor->header(); 1049 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1050 hash = temp.hash(); 1051 if (hash != 0) { // if it has a hash, just return it 1052 return hash; 1053 } 1054 // Fall thru so we only have one place that installs the hash in 1055 // the ObjectMonitor. 1056 } else if (self->is_lock_owned((address)mark.locker())) { 1057 // This is a stack lock owned by the calling thread so fetch the 1058 // displaced markWord from the BasicLock on the stack. 1059 temp = mark.displaced_mark_helper(); 1060 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1061 hash = temp.hash(); 1062 if (hash != 0) { // if it has a hash, just return it 1063 return hash; 1064 } 1065 // WARNING: 1066 // The displaced header in the BasicLock on a thread's stack 1067 // is strictly immutable. It CANNOT be changed in ANY cases. 1068 // So we have to inflate the stack lock into an ObjectMonitor 1069 // even if the current thread owns the lock. The BasicLock on 1070 // a thread's stack can be asynchronously read by other threads 1071 // during an inflate() call so any change to that stack memory 1072 // may not propagate to other threads correctly. 1073 } 1074 1075 // Inflate the monitor to set the hash. 1076 1077 // An async deflation can race after the inflate() call and before we 1078 // can update the ObjectMonitor's header with the hash value below. 1079 monitor = inflate(self, obj, inflate_cause_hash_code); 1080 // Load ObjectMonitor's header/dmw field and see if it has a hash. 1081 mark = monitor->header(); 1082 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1083 hash = mark.hash(); 1084 if (hash == 0) { // if it does not have a hash 1085 hash = get_next_hash(self, obj); // get a new hash 1086 temp = mark.copy_set_hash(hash); // merge the hash into header 1087 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1088 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1089 test = markWord(v); 1090 if (test != mark) { 1091 // The attempt to update the ObjectMonitor's header/dmw field 1092 // did not work. This can happen if another thread managed to 1093 // merge in the hash just before our cmpxchg(). 1094 // If we add any new usages of the header/dmw field, this code 1095 // will need to be updated. 1096 hash = test.hash(); 1097 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1098 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1099 } 1100 if (monitor->is_being_async_deflated()) { 1101 // If we detect that async deflation has occurred, then we 1102 // simply retry so that the hash value can be stored in either 1103 // the object's header or in the re-inflated ObjectMonitor's 1104 // header as appropriate. 1105 continue; 1106 } 1107 } 1108 // We finally get the hash. 1109 return hash; 1110 } 1111 } 1112 1113 // Deprecated -- use FastHashCode() instead. 1114 1115 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1116 return FastHashCode(Thread::current(), obj()); 1117 } 1118 1119 1120 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1121 Handle h_obj) { 1122 if (UseBiasedLocking) { 1123 BiasedLocking::revoke(h_obj, thread); 1124 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1125 } 1126 1127 assert(thread == JavaThread::current(), "Can only be called on current thread"); 1128 oop obj = h_obj(); 1129 1130 markWord mark = read_stable_mark(obj); 1131 1132 // Uncontended case, header points to stack 1133 if (mark.has_locker()) { 1134 return thread->is_lock_owned((address)mark.locker()); 1135 } 1136 // Contended case, header points to ObjectMonitor (tagged pointer) 1137 if (mark.has_monitor()) { 1138 // The first stage of async deflation does not affect any field 1139 // used by this comparison so the ObjectMonitor* is usable here. 1140 ObjectMonitor* monitor = mark.monitor(); 1141 return monitor->is_entered(thread) != 0; 1142 } 1143 // Unlocked case, header in place 1144 assert(mark.is_neutral(), "sanity check"); 1145 return false; 1146 } 1147 1148 // Be aware of this method could revoke bias of the lock object. 1149 // This method queries the ownership of the lock handle specified by 'h_obj'. 1150 // If the current thread owns the lock, it returns owner_self. If no 1151 // thread owns the lock, it returns owner_none. Otherwise, it will return 1152 // owner_other. 1153 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1154 (JavaThread *self, Handle h_obj) { 1155 // The caller must beware this method can revoke bias, and 1156 // revocation can result in a safepoint. 1157 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 1158 assert(self->thread_state() != _thread_blocked, "invariant"); 1159 1160 // Possible mark states: neutral, biased, stack-locked, inflated 1161 1162 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 1163 // CASE: biased 1164 BiasedLocking::revoke(h_obj, self); 1165 assert(!h_obj->mark().has_bias_pattern(), 1166 "biases should be revoked by now"); 1167 } 1168 1169 assert(self == JavaThread::current(), "Can only be called on current thread"); 1170 oop obj = h_obj(); 1171 markWord mark = read_stable_mark(obj); 1172 1173 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1174 if (mark.has_locker()) { 1175 return self->is_lock_owned((address)mark.locker()) ? 1176 owner_self : owner_other; 1177 } 1178 1179 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 1180 // The Object:ObjectMonitor relationship is stable as long as we're 1181 // not at a safepoint and AsyncDeflateIdleMonitors is false. 1182 if (mark.has_monitor()) { 1183 // The first stage of async deflation does not affect any field 1184 // used by this comparison so the ObjectMonitor* is usable here. 1185 ObjectMonitor* monitor = mark.monitor(); 1186 void* owner = monitor->owner(); 1187 if (owner == NULL) return owner_none; 1188 return (owner == self || 1189 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1190 } 1191 1192 // CASE: neutral 1193 assert(mark.is_neutral(), "sanity check"); 1194 return owner_none; // it's unlocked 1195 } 1196 1197 // FIXME: jvmti should call this 1198 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1199 if (UseBiasedLocking) { 1200 if (SafepointSynchronize::is_at_safepoint()) { 1201 BiasedLocking::revoke_at_safepoint(h_obj); 1202 } else { 1203 BiasedLocking::revoke(h_obj, JavaThread::current()); 1204 } 1205 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1206 } 1207 1208 oop obj = h_obj(); 1209 address owner = NULL; 1210 1211 markWord mark = read_stable_mark(obj); 1212 1213 // Uncontended case, header points to stack 1214 if (mark.has_locker()) { 1215 owner = (address) mark.locker(); 1216 } 1217 1218 // Contended case, header points to ObjectMonitor (tagged pointer) 1219 else if (mark.has_monitor()) { 1220 // The first stage of async deflation does not affect any field 1221 // used by this comparison so the ObjectMonitor* is usable here. 1222 ObjectMonitor* monitor = mark.monitor(); 1223 assert(monitor != NULL, "monitor should be non-null"); 1224 owner = (address) monitor->owner(); 1225 } 1226 1227 if (owner != NULL) { 1228 // owning_thread_from_monitor_owner() may also return NULL here 1229 return Threads::owning_thread_from_monitor_owner(t_list, owner); 1230 } 1231 1232 // Unlocked case, header in place 1233 // Cannot have assertion since this object may have been 1234 // locked by another thread when reaching here. 1235 // assert(mark.is_neutral(), "sanity check"); 1236 1237 return NULL; 1238 } 1239 1240 // Visitors ... 1241 1242 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1243 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 1244 while (block != NULL) { 1245 assert(block->object() == CHAINMARKER, "must be a block header"); 1246 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1247 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1248 if (!mid->is_free() && mid->object() != NULL) { 1249 // Only process with closure if the object is set. 1250 1251 // monitors_iterate() is only called at a safepoint or when the 1252 // target thread is suspended or when the target thread is 1253 // operating on itself. The closures are only interested in an 1254 // owned ObjectMonitor and ownership cannot be dropped under the 1255 // calling contexts so the ObjectMonitor cannot be async deflated. 1256 closure->do_monitor(mid); 1257 } 1258 } 1259 // unmarked_next() is not needed with g_block_list (no locking 1260 // used with block linkage _next_om fields). 1261 block = (PaddedObjectMonitor*)block->next_om(); 1262 } 1263 } 1264 1265 static bool monitors_used_above_threshold() { 1266 int population = Atomic::load(&om_list_globals._population); 1267 if (population == 0) { 1268 return false; 1269 } 1270 if (MonitorUsedDeflationThreshold > 0) { 1271 int monitors_used = population - Atomic::load(&om_list_globals._free_count) - 1272 Atomic::load(&om_list_globals._wait_count); 1273 int monitor_usage = (monitors_used * 100LL) / population; 1274 return monitor_usage > MonitorUsedDeflationThreshold; 1275 } 1276 return false; 1277 } 1278 1279 // Returns true if MonitorBound is set (> 0) and if the specified 1280 // cnt is > MonitorBound. Otherwise returns false. 1281 static bool is_MonitorBound_exceeded(const int cnt) { 1282 const int mx = MonitorBound; 1283 return mx > 0 && cnt > mx; 1284 } 1285 1286 bool ObjectSynchronizer::is_async_deflation_needed() { 1287 if (!AsyncDeflateIdleMonitors) { 1288 return false; 1289 } 1290 if (is_async_deflation_requested()) { 1291 // Async deflation request. 1292 return true; 1293 } 1294 if (AsyncDeflationInterval > 0 && 1295 time_since_last_async_deflation_ms() > AsyncDeflationInterval && 1296 monitors_used_above_threshold()) { 1297 // It's been longer than our specified deflate interval and there 1298 // are too many monitors in use. We don't deflate more frequently 1299 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1300 // in order to not swamp the ServiceThread. 1301 _last_async_deflation_time_ns = os::javaTimeNanos(); 1302 return true; 1303 } 1304 int monitors_used = Atomic::load(&om_list_globals._population) - 1305 Atomic::load(&om_list_globals._free_count) - 1306 Atomic::load(&om_list_globals._wait_count); 1307 if (is_MonitorBound_exceeded(monitors_used)) { 1308 // Not enough ObjectMonitors on the global free list. 1309 return true; 1310 } 1311 return false; 1312 } 1313 1314 bool ObjectSynchronizer::needs_monitor_scavenge() { 1315 if (Atomic::load(&_forceMonitorScavenge) == 1) { 1316 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup."); 1317 return true; 1318 } 1319 return false; 1320 } 1321 1322 bool ObjectSynchronizer::is_safepoint_deflation_needed() { 1323 if (!AsyncDeflateIdleMonitors) { 1324 if (monitors_used_above_threshold()) { 1325 // Too many monitors in use. 1326 return true; 1327 } 1328 return needs_monitor_scavenge(); 1329 } 1330 if (is_special_deflation_requested()) { 1331 // For AsyncDeflateIdleMonitors only do a safepoint deflation 1332 // if there is a special deflation request. 1333 return true; 1334 } 1335 return false; 1336 } 1337 1338 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1339 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); 1340 } 1341 1342 void ObjectSynchronizer::oops_do(OopClosure* f) { 1343 // We only scan the global used list here (for moribund threads), and 1344 // the thread-local monitors in Thread::oops_do(). 1345 global_used_oops_do(f); 1346 } 1347 1348 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1349 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1350 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f); 1351 } 1352 1353 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1354 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1355 list_oops_do(thread->om_in_use_list, f); 1356 } 1357 1358 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1359 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1360 // The oops_do() phase does not overlap with monitor deflation 1361 // so no need to lock ObjectMonitors for the list traversal. 1362 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { 1363 if (mid->object() != NULL) { 1364 f->do_oop((oop*)mid->object_addr()); 1365 } 1366 } 1367 } 1368 1369 1370 // ----------------------------------------------------------------------------- 1371 // ObjectMonitor Lifecycle 1372 // ----------------------- 1373 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread 1374 // free list and associates them with objects. Deflation -- which occurs at 1375 // STW-time or asynchronously -- disassociates idle monitors from objects. 1376 // Such scavenged monitors are returned to the om_list_globals._free_list. 1377 // 1378 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1379 // 1380 // Lifecycle: 1381 // -- unassigned and on the om_list_globals._free_list 1382 // -- unassigned and on a per-thread free list 1383 // -- assigned to an object. The object is inflated and the mark refers 1384 // to the ObjectMonitor. 1385 1386 1387 // Constraining monitor pool growth via MonitorBound ... 1388 // 1389 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled. 1390 // 1391 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors): 1392 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1393 // the rate of scavenging is driven primarily by GC. As such, we can find 1394 // an inordinate number of monitors in circulation. 1395 // To avoid that scenario we can artificially induce a STW safepoint 1396 // if the pool appears to be growing past some reasonable bound. 1397 // Generally we favor time in space-time tradeoffs, but as there's no 1398 // natural back-pressure on the # of extant monitors we need to impose some 1399 // type of limit. Beware that if MonitorBound is set to too low a value 1400 // we could just loop. In addition, if MonitorBound is set to a low value 1401 // we'll incur more safepoints, which are harmful to performance. 1402 // See also: GuaranteedSafepointInterval 1403 // 1404 // When safepoint deflation is being used and MonitorBound is set, the 1405 // boundry applies to 1406 // (om_list_globals._population - om_list_globals._free_count) 1407 // i.e., if there are not enough ObjectMonitors on the global free list, 1408 // then a safepoint deflation is induced. Picking a good MonitorBound value 1409 // is non-trivial. 1410 // 1411 // When async deflation is being used: 1412 // The monitor pool is still grow-only. Async deflation is requested 1413 // by a safepoint's cleanup phase or by the ServiceThread at periodic 1414 // intervals when is_async_deflation_needed() returns true. In 1415 // addition to other policies that are checked, if there are not 1416 // enough ObjectMonitors on the global free list, then 1417 // is_async_deflation_needed() will return true. The ServiceThread 1418 // calls deflate_global_idle_monitors_using_JT() and also calls 1419 // deflate_per_thread_idle_monitors_using_JT() as needed. 1420 1421 static void InduceScavenge(Thread* self, const char * Whence) { 1422 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation"); 1423 1424 // Induce STW safepoint to trim monitors 1425 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1426 // More precisely, trigger a cleanup safepoint as the number 1427 // of active monitors passes the specified threshold. 1428 // TODO: assert thread state is reasonable 1429 1430 if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) { 1431 VMThread::check_for_forced_cleanup(); 1432 } 1433 } 1434 1435 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { 1436 // A large MAXPRIVATE value reduces both list lock contention 1437 // and list coherency traffic, but also tends to increase the 1438 // number of ObjectMonitors in circulation as well as the STW 1439 // scavenge costs. As usual, we lean toward time in space-time 1440 // tradeoffs. 1441 const int MAXPRIVATE = 1024; 1442 NoSafepointVerifier nsv; 1443 1444 for (;;) { 1445 ObjectMonitor* m; 1446 1447 // 1: try to allocate from the thread's local om_free_list. 1448 // Threads will attempt to allocate first from their local list, then 1449 // from the global list, and only after those attempts fail will the 1450 // thread attempt to instantiate new monitors. Thread-local free lists 1451 // improve allocation latency, as well as reducing coherency traffic 1452 // on the shared global list. 1453 m = take_from_start_of_om_free_list(self); 1454 if (m != NULL) { 1455 guarantee(m->object() == NULL, "invariant"); 1456 m->set_allocation_state(ObjectMonitor::New); 1457 prepend_to_om_in_use_list(self, m); 1458 return m; 1459 } 1460 1461 // 2: try to allocate from the global om_list_globals._free_list 1462 // If we're using thread-local free lists then try 1463 // to reprovision the caller's free list. 1464 if (Atomic::load(&om_list_globals._free_list) != NULL) { 1465 // Reprovision the thread's om_free_list. 1466 // Use bulk transfers to reduce the allocation rate and heat 1467 // on various locks. 1468 for (int i = self->om_free_provision; --i >= 0;) { 1469 ObjectMonitor* take = take_from_start_of_global_free_list(); 1470 if (take == NULL) { 1471 break; // No more are available. 1472 } 1473 guarantee(take->object() == NULL, "invariant"); 1474 if (AsyncDeflateIdleMonitors) { 1475 // We allowed 3 field values to linger during async deflation. 1476 // Clear or restore them as appropriate. 1477 take->set_header(markWord::zero()); 1478 // DEFLATER_MARKER is the only non-NULL value we should see here. 1479 take->try_set_owner_from(DEFLATER_MARKER, NULL); 1480 if (take->contentions() < 0) { 1481 // Add back max_jint to restore the contentions field to its 1482 // proper value. 1483 Atomic::add(&take->_contentions, max_jint); 1484 1485 #ifdef ASSERT 1486 jint l_contentions = take->contentions(); 1487 #endif 1488 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d", 1489 l_contentions, take->contentions()); 1490 } 1491 } 1492 take->Recycle(); 1493 // Since we're taking from the global free-list, take must be Free. 1494 // om_release() also sets the allocation state to Free because it 1495 // is called from other code paths. 1496 assert(take->is_free(), "invariant"); 1497 om_release(self, take, false); 1498 } 1499 self->om_free_provision += 1 + (self->om_free_provision / 2); 1500 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1501 1502 if (!AsyncDeflateIdleMonitors && 1503 is_MonitorBound_exceeded(Atomic::load(&om_list_globals._population) - 1504 Atomic::load(&om_list_globals._free_count))) { 1505 // Not enough ObjectMonitors on the global free list. 1506 // We can't safely induce a STW safepoint from om_alloc() as our thread 1507 // state may not be appropriate for such activities and callers may hold 1508 // naked oops, so instead we defer the action. 1509 InduceScavenge(self, "om_alloc"); 1510 } 1511 continue; 1512 } 1513 1514 // 3: allocate a block of new ObjectMonitors 1515 // Both the local and global free lists are empty -- resort to malloc(). 1516 // In the current implementation ObjectMonitors are TSM - immortal. 1517 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1518 // each ObjectMonitor to start at the beginning of a cache line, 1519 // so we use align_up(). 1520 // A better solution would be to use C++ placement-new. 1521 // BEWARE: As it stands currently, we don't run the ctors! 1522 assert(_BLOCKSIZE > 1, "invariant"); 1523 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1524 PaddedObjectMonitor* temp; 1525 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1526 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); 1527 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1528 (void)memset((void *) temp, 0, neededsize); 1529 1530 // Format the block. 1531 // initialize the linked list, each monitor points to its next 1532 // forming the single linked free list, the very first monitor 1533 // will points to next block, which forms the block list. 1534 // The trick of using the 1st element in the block as g_block_list 1535 // linkage should be reconsidered. A better implementation would 1536 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1537 1538 for (int i = 1; i < _BLOCKSIZE; i++) { 1539 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]); 1540 assert(temp[i].is_free(), "invariant"); 1541 } 1542 1543 // terminate the last monitor as the end of list 1544 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL); 1545 1546 // Element [0] is reserved for global list linkage 1547 temp[0].set_object(CHAINMARKER); 1548 1549 // Consider carving out this thread's current request from the 1550 // block in hand. This avoids some lock traffic and redundant 1551 // list activity. 1552 1553 prepend_block_to_lists(temp); 1554 } 1555 } 1556 1557 // Place "m" on the caller's private per-thread om_free_list. 1558 // In practice there's no need to clamp or limit the number of 1559 // monitors on a thread's om_free_list as the only non-allocation time 1560 // we'll call om_release() is to return a monitor to the free list after 1561 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1562 // accumulate on a thread's free list. 1563 // 1564 // Key constraint: all ObjectMonitors on a thread's free list and the global 1565 // free list must have their object field set to null. This prevents the 1566 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1567 // -- from reclaiming them while we are trying to release them. 1568 1569 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1570 bool from_per_thread_alloc) { 1571 guarantee(m->header().value() == 0, "invariant"); 1572 guarantee(m->object() == NULL, "invariant"); 1573 NoSafepointVerifier nsv; 1574 1575 if ((m->is_busy() | m->_recursions) != 0) { 1576 stringStream ss; 1577 fatal("freeing in-use monitor: %s, recursions=" INTX_FORMAT, 1578 m->is_busy_to_string(&ss), m->_recursions); 1579 } 1580 m->set_allocation_state(ObjectMonitor::Free); 1581 // _next_om is used for both per-thread in-use and free lists so 1582 // we have to remove 'm' from the in-use list first (as needed). 1583 if (from_per_thread_alloc) { 1584 // Need to remove 'm' from om_in_use_list. 1585 ObjectMonitor* mid = NULL; 1586 ObjectMonitor* next = NULL; 1587 1588 // This list walk can race with another list walker or with async 1589 // deflation so we have to worry about an ObjectMonitor being 1590 // removed from this list while we are walking it. 1591 1592 // Lock the list head to avoid racing with another list walker 1593 // or with async deflation. 1594 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { 1595 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); 1596 } 1597 next = unmarked_next(mid); 1598 if (m == mid) { 1599 // First special case: 1600 // 'm' matches mid, is the list head and is locked. Switch the list 1601 // head to next which unlocks the list head, but leaves the extracted 1602 // mid locked: 1603 Atomic::store(&self->om_in_use_list, next); 1604 } else if (m == next) { 1605 // Second special case: 1606 // 'm' matches next after the list head and we already have the list 1607 // head locked so set mid to what we are extracting: 1608 mid = next; 1609 // Lock mid to prevent races with a list walker or an async 1610 // deflater thread that's ahead of us. The locked list head 1611 // prevents races from behind us. 1612 om_lock(mid); 1613 // Update next to what follows mid (if anything): 1614 next = unmarked_next(mid); 1615 // Switch next after the list head to new next which unlocks the 1616 // list head, but leaves the extracted mid locked: 1617 self->om_in_use_list->set_next_om(next); 1618 } else { 1619 // We have to search the list to find 'm'. 1620 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT 1621 " is too short.", p2i(self), p2i(self->om_in_use_list)); 1622 // Our starting anchor is next after the list head which is the 1623 // last ObjectMonitor we checked: 1624 ObjectMonitor* anchor = next; 1625 // Lock anchor to prevent races with a list walker or an async 1626 // deflater thread that's ahead of us. The locked list head 1627 // prevents races from behind us. 1628 om_lock(anchor); 1629 om_unlock(mid); // Unlock the list head now that anchor is locked. 1630 while ((mid = unmarked_next(anchor)) != NULL) { 1631 if (m == mid) { 1632 // We found 'm' on the per-thread in-use list so extract it. 1633 // Update next to what follows mid (if anything): 1634 next = unmarked_next(mid); 1635 // Switch next after the anchor to new next which unlocks the 1636 // anchor, but leaves the extracted mid locked: 1637 anchor->set_next_om(next); 1638 break; 1639 } else { 1640 // Lock the next anchor to prevent races with a list walker 1641 // or an async deflater thread that's ahead of us. The locked 1642 // current anchor prevents races from behind us. 1643 om_lock(mid); 1644 // Unlock current anchor now that next anchor is locked: 1645 om_unlock(anchor); 1646 anchor = mid; // Advance to new anchor and try again. 1647 } 1648 } 1649 } 1650 1651 if (mid == NULL) { 1652 // Reached end of the list and didn't find 'm' so: 1653 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list=" 1654 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list)); 1655 } 1656 1657 // At this point mid is disconnected from the in-use list so 1658 // its lock no longer has any effects on the in-use list. 1659 Atomic::dec(&self->om_in_use_count); 1660 // Unlock mid, but leave the next value for any lagging list 1661 // walkers. It will get cleaned up when mid is prepended to 1662 // the thread's free list: 1663 om_unlock(mid); 1664 } 1665 1666 prepend_to_om_free_list(self, m); 1667 guarantee(m->is_free(), "invariant"); 1668 } 1669 1670 // Return ObjectMonitors on a moribund thread's free and in-use 1671 // lists to the appropriate global lists. The ObjectMonitors on the 1672 // per-thread in-use list may still be in use by other threads. 1673 // 1674 // We currently call om_flush() from Threads::remove() before the 1675 // thread has been excised from the thread list and is no longer a 1676 // mutator. This means that om_flush() cannot run concurrently with 1677 // a safepoint and interleave with deflate_idle_monitors(). In 1678 // particular, this ensures that the thread's in-use monitors are 1679 // scanned by a GC safepoint, either via Thread::oops_do() (before 1680 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1681 // om_flush() is called). 1682 // 1683 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1684 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1685 // run at the same time as om_flush() so we have to follow a careful 1686 // protocol to prevent list corruption. 1687 1688 void ObjectSynchronizer::om_flush(Thread* self) { 1689 // Process the per-thread in-use list first to be consistent. 1690 int in_use_count = 0; 1691 ObjectMonitor* in_use_list = NULL; 1692 ObjectMonitor* in_use_tail = NULL; 1693 NoSafepointVerifier nsv; 1694 1695 // This function can race with a list walker or with an async 1696 // deflater thread so we lock the list head to prevent confusion. 1697 // An async deflater thread checks to see if the target thread 1698 // is exiting, but if it has made it past that check before we 1699 // started exiting, then it is racing to get to the in-use list. 1700 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { 1701 // At this point, we have locked the in-use list head so a racing 1702 // thread cannot come in after us. However, a racing thread could 1703 // be ahead of us; we'll detect that and delay to let it finish. 1704 // 1705 // The thread is going away, however the ObjectMonitors on the 1706 // om_in_use_list may still be in-use by other threads. Link 1707 // them to in_use_tail, which will be linked into the global 1708 // in-use list (om_list_globals._in_use_list) below. 1709 // 1710 // Account for the in-use list head before the loop since it is 1711 // already locked (by this thread): 1712 in_use_tail = in_use_list; 1713 in_use_count++; 1714 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) { 1715 if (is_locked(cur_om)) { 1716 // cur_om is locked so there must be a racing walker or async 1717 // deflater thread ahead of us so we'll give it a chance to finish. 1718 while (is_locked(cur_om)) { 1719 os::naked_short_sleep(1); 1720 } 1721 // Refetch the possibly changed next field and try again. 1722 cur_om = unmarked_next(in_use_tail); 1723 continue; 1724 } 1725 if (cur_om->is_free()) { 1726 // cur_om was deflated and the allocation state was changed 1727 // to Free while it was locked. We happened to see it just 1728 // after it was unlocked (and added to the free list). 1729 // Refetch the possibly changed next field and try again. 1730 cur_om = unmarked_next(in_use_tail); 1731 continue; 1732 } 1733 in_use_tail = cur_om; 1734 in_use_count++; 1735 cur_om = unmarked_next(cur_om); 1736 } 1737 guarantee(in_use_tail != NULL, "invariant"); 1738 int l_om_in_use_count = Atomic::load(&self->om_in_use_count); 1739 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: " 1740 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); 1741 Atomic::store(&self->om_in_use_count, 0); 1742 // Clear the in-use list head (which also unlocks it): 1743 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); 1744 om_unlock(in_use_list); 1745 } 1746 1747 int free_count = 0; 1748 ObjectMonitor* free_list = NULL; 1749 ObjectMonitor* free_tail = NULL; 1750 // This function can race with a list walker thread so we lock the 1751 // list head to prevent confusion. 1752 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) { 1753 // At this point, we have locked the free list head so a racing 1754 // thread cannot come in after us. However, a racing thread could 1755 // be ahead of us; we'll detect that and delay to let it finish. 1756 // 1757 // The thread is going away. Set 'free_tail' to the last per-thread free 1758 // monitor which will be linked to om_list_globals._free_list below. 1759 // 1760 // Account for the free list head before the loop since it is 1761 // already locked (by this thread): 1762 free_tail = free_list; 1763 free_count++; 1764 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) { 1765 if (is_locked(s)) { 1766 // s is locked so there must be a racing walker thread ahead 1767 // of us so we'll give it a chance to finish. 1768 while (is_locked(s)) { 1769 os::naked_short_sleep(1); 1770 } 1771 } 1772 free_tail = s; 1773 free_count++; 1774 guarantee(s->object() == NULL, "invariant"); 1775 if (s->is_busy()) { 1776 stringStream ss; 1777 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss)); 1778 } 1779 } 1780 guarantee(free_tail != NULL, "invariant"); 1781 int l_om_free_count = Atomic::load(&self->om_free_count); 1782 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " 1783 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); 1784 Atomic::store(&self->om_free_count, 0); 1785 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); 1786 om_unlock(free_list); 1787 } 1788 1789 if (free_tail != NULL) { 1790 prepend_list_to_global_free_list(free_list, free_tail, free_count); 1791 } 1792 1793 if (in_use_tail != NULL) { 1794 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); 1795 } 1796 1797 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1798 LogStreamHandle(Info, monitorinflation) lsh_info; 1799 LogStream* ls = NULL; 1800 if (log_is_enabled(Debug, monitorinflation)) { 1801 ls = &lsh_debug; 1802 } else if ((free_count != 0 || in_use_count != 0) && 1803 log_is_enabled(Info, monitorinflation)) { 1804 ls = &lsh_info; 1805 } 1806 if (ls != NULL) { 1807 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1808 ", in_use_count=%d" ", om_free_provision=%d", 1809 p2i(self), free_count, in_use_count, self->om_free_provision); 1810 } 1811 } 1812 1813 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1814 const oop obj, 1815 ObjectSynchronizer::InflateCause cause) { 1816 assert(event != NULL, "invariant"); 1817 assert(event->should_commit(), "invariant"); 1818 event->set_monitorClass(obj->klass()); 1819 event->set_address((uintptr_t)(void*)obj); 1820 event->set_cause((u1)cause); 1821 event->commit(); 1822 } 1823 1824 // Fast path code shared by multiple functions 1825 void ObjectSynchronizer::inflate_helper(oop obj) { 1826 markWord mark = obj->mark(); 1827 if (mark.has_monitor()) { 1828 ObjectMonitor* monitor = mark.monitor(); 1829 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor=" INTPTR_FORMAT " is invalid", p2i(monitor)); 1830 markWord dmw = monitor->header(); 1831 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1832 return; 1833 } 1834 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal); 1835 } 1836 1837 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object, 1838 const InflateCause cause) { 1839 // Inflate mutates the heap ... 1840 // Relaxing assertion for bug 6320749. 1841 assert(Universe::verify_in_progress() || 1842 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1843 1844 EventJavaMonitorInflate event; 1845 1846 for (;;) { 1847 const markWord mark = object->mark(); 1848 assert(!mark.has_bias_pattern(), "invariant"); 1849 1850 // The mark can be in one of the following states: 1851 // * Inflated - just return 1852 // * Stack-locked - coerce it to inflated 1853 // * INFLATING - busy wait for conversion to complete 1854 // * Neutral - aggressively inflate the object. 1855 // * BIASED - Illegal. We should never see this 1856 1857 // CASE: inflated 1858 if (mark.has_monitor()) { 1859 ObjectMonitor* inf = mark.monitor(); 1860 markWord dmw = inf->header(); 1861 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1862 assert(AsyncDeflateIdleMonitors || inf->object() == object, "invariant"); 1863 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1864 return inf; 1865 } 1866 1867 // CASE: inflation in progress - inflating over a stack-lock. 1868 // Some other thread is converting from stack-locked to inflated. 1869 // Only that thread can complete inflation -- other threads must wait. 1870 // The INFLATING value is transient. 1871 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1872 // We could always eliminate polling by parking the thread on some auxiliary list. 1873 if (mark == markWord::INFLATING()) { 1874 read_stable_mark(object); 1875 continue; 1876 } 1877 1878 // CASE: stack-locked 1879 // Could be stack-locked either by this thread or by some other thread. 1880 // 1881 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1882 // to install INFLATING into the mark word. We originally installed INFLATING, 1883 // allocated the objectmonitor, and then finally STed the address of the 1884 // objectmonitor into the mark. This was correct, but artificially lengthened 1885 // the interval in which INFLATED appeared in the mark, thus increasing 1886 // the odds of inflation contention. 1887 // 1888 // We now use per-thread private objectmonitor free lists. 1889 // These list are reprovisioned from the global free list outside the 1890 // critical INFLATING...ST interval. A thread can transfer 1891 // multiple objectmonitors en-mass from the global free list to its local free list. 1892 // This reduces coherency traffic and lock contention on the global free list. 1893 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1894 // before or after the CAS(INFLATING) operation. 1895 // See the comments in om_alloc(). 1896 1897 LogStreamHandle(Trace, monitorinflation) lsh; 1898 1899 if (mark.has_locker()) { 1900 ObjectMonitor* m = om_alloc(self); 1901 // Optimistically prepare the objectmonitor - anticipate successful CAS 1902 // We do this before the CAS in order to minimize the length of time 1903 // in which INFLATING appears in the mark. 1904 m->Recycle(); 1905 m->_Responsible = NULL; 1906 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1907 1908 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1909 if (cmp != mark) { 1910 // om_release() will reset the allocation state from New to Free. 1911 om_release(self, m, true); 1912 continue; // Interference -- just retry 1913 } 1914 1915 // We've successfully installed INFLATING (0) into the mark-word. 1916 // This is the only case where 0 will appear in a mark-word. 1917 // Only the singular thread that successfully swings the mark-word 1918 // to 0 can perform (or more precisely, complete) inflation. 1919 // 1920 // Why do we CAS a 0 into the mark-word instead of just CASing the 1921 // mark-word from the stack-locked value directly to the new inflated state? 1922 // Consider what happens when a thread unlocks a stack-locked object. 1923 // It attempts to use CAS to swing the displaced header value from the 1924 // on-stack BasicLock back into the object header. Recall also that the 1925 // header value (hash code, etc) can reside in (a) the object header, or 1926 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1927 // header in an ObjectMonitor. The inflate() routine must copy the header 1928 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1929 // the while preserving the hashCode stability invariants. If the owner 1930 // decides to release the lock while the value is 0, the unlock will fail 1931 // and control will eventually pass from slow_exit() to inflate. The owner 1932 // will then spin, waiting for the 0 value to disappear. Put another way, 1933 // the 0 causes the owner to stall if the owner happens to try to 1934 // drop the lock (restoring the header from the BasicLock to the object) 1935 // while inflation is in-progress. This protocol avoids races that might 1936 // would otherwise permit hashCode values to change or "flicker" for an object. 1937 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1938 // 0 serves as a "BUSY" inflate-in-progress indicator. 1939 1940 1941 // fetch the displaced mark from the owner's stack. 1942 // The owner can't die or unwind past the lock while our INFLATING 1943 // object is in the mark. Furthermore the owner can't complete 1944 // an unlock on the object, either. 1945 markWord dmw = mark.displaced_mark_helper(); 1946 // Catch if the object's header is not neutral (not locked and 1947 // not marked is what we care about here). 1948 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1949 1950 // Setup monitor fields to proper values -- prepare the monitor 1951 m->set_header(dmw); 1952 1953 // Optimization: if the mark.locker stack address is associated 1954 // with this thread we could simply set m->_owner = self. 1955 // Note that a thread can inflate an object 1956 // that it has stack-locked -- as might happen in wait() -- directly 1957 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1958 if (AsyncDeflateIdleMonitors) { 1959 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker()); 1960 } else { 1961 m->set_owner_from(NULL, mark.locker()); 1962 } 1963 m->set_object(object); 1964 // TODO-FIXME: assert BasicLock->dhw != 0. 1965 1966 // Must preserve store ordering. The monitor state must 1967 // be stable at the time of publishing the monitor address. 1968 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1969 object->release_set_mark(markWord::encode(m)); 1970 1971 // Once ObjectMonitor is configured and the object is associated 1972 // with the ObjectMonitor, it is safe to allow async deflation: 1973 assert(m->is_new(), "freshly allocated monitor must be new"); 1974 m->set_allocation_state(ObjectMonitor::Old); 1975 1976 // Hopefully the performance counters are allocated on distinct cache lines 1977 // to avoid false sharing on MP systems ... 1978 OM_PERFDATA_OP(Inflations, inc()); 1979 if (log_is_enabled(Trace, monitorinflation)) { 1980 ResourceMark rm(self); 1981 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1982 INTPTR_FORMAT ", type='%s'", p2i(object), 1983 object->mark().value(), object->klass()->external_name()); 1984 } 1985 if (event.should_commit()) { 1986 post_monitor_inflate_event(&event, object, cause); 1987 } 1988 return m; 1989 } 1990 1991 // CASE: neutral 1992 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1993 // If we know we're inflating for entry it's better to inflate by swinging a 1994 // pre-locked ObjectMonitor pointer into the object header. A successful 1995 // CAS inflates the object *and* confers ownership to the inflating thread. 1996 // In the current implementation we use a 2-step mechanism where we CAS() 1997 // to inflate and then CAS() again to try to swing _owner from NULL to self. 1998 // An inflateTry() method that we could call from enter() would be useful. 1999 2000 // Catch if the object's header is not neutral (not locked and 2001 // not marked is what we care about here). 2002 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 2003 ObjectMonitor* m = om_alloc(self); 2004 // prepare m for installation - set monitor to initial state 2005 m->Recycle(); 2006 m->set_header(mark); 2007 if (AsyncDeflateIdleMonitors) { 2008 // DEFLATER_MARKER is the only non-NULL value we should see here. 2009 m->try_set_owner_from(DEFLATER_MARKER, NULL); 2010 } 2011 m->set_object(object); 2012 m->_Responsible = NULL; 2013 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 2014 2015 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 2016 m->set_header(markWord::zero()); 2017 m->set_object(NULL); 2018 m->Recycle(); 2019 // om_release() will reset the allocation state from New to Free. 2020 om_release(self, m, true); 2021 m = NULL; 2022 continue; 2023 // interference - the markword changed - just retry. 2024 // The state-transitions are one-way, so there's no chance of 2025 // live-lock -- "Inflated" is an absorbing state. 2026 } 2027 2028 // Once the ObjectMonitor is configured and object is associated 2029 // with the ObjectMonitor, it is safe to allow async deflation: 2030 assert(m->is_new(), "freshly allocated monitor must be new"); 2031 m->set_allocation_state(ObjectMonitor::Old); 2032 2033 // Hopefully the performance counters are allocated on distinct 2034 // cache lines to avoid false sharing on MP systems ... 2035 OM_PERFDATA_OP(Inflations, inc()); 2036 if (log_is_enabled(Trace, monitorinflation)) { 2037 ResourceMark rm(self); 2038 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 2039 INTPTR_FORMAT ", type='%s'", p2i(object), 2040 object->mark().value(), object->klass()->external_name()); 2041 } 2042 if (event.should_commit()) { 2043 post_monitor_inflate_event(&event, object, cause); 2044 } 2045 return m; 2046 } 2047 } 2048 2049 2050 // We maintain a list of in-use monitors for each thread. 2051 // 2052 // For safepoint based deflation: 2053 // deflate_thread_local_monitors() scans a single thread's in-use list, while 2054 // deflate_idle_monitors() scans only a global list of in-use monitors which 2055 // is populated only as a thread dies (see om_flush()). 2056 // 2057 // These operations are called at all safepoints, immediately after mutators 2058 // are stopped, but before any objects have moved. Collectively they traverse 2059 // the population of in-use monitors, deflating where possible. The scavenged 2060 // monitors are returned to the global monitor free list. 2061 // 2062 // Beware that we scavenge at *every* stop-the-world point. Having a large 2063 // number of monitors in-use could negatively impact performance. We also want 2064 // to minimize the total # of monitors in circulation, as they incur a small 2065 // footprint penalty. 2066 // 2067 // Perversely, the heap size -- and thus the STW safepoint rate -- 2068 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 2069 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 2070 // This is an unfortunate aspect of this design. 2071 // 2072 // For async deflation: 2073 // If a special deflation request is made, then the safepoint based 2074 // deflation mechanism is used. Otherwise, an async deflation request 2075 // is registered with the ServiceThread and it is notified. 2076 2077 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) { 2078 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2079 2080 // The per-thread in-use lists are handled in 2081 // ParallelSPCleanupThreadClosure::do_thread(). 2082 2083 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { 2084 // Use the older mechanism for the global in-use list or if a 2085 // special deflation has been requested before the safepoint. 2086 ObjectSynchronizer::deflate_idle_monitors(counters); 2087 return; 2088 } 2089 2090 log_debug(monitorinflation)("requesting async deflation of idle monitors."); 2091 // Request deflation of idle monitors by the ServiceThread: 2092 set_is_async_deflation_requested(true); 2093 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 2094 ml.notify_all(); 2095 2096 if (log_is_enabled(Debug, monitorinflation)) { 2097 // exit_globals()'s call to audit_and_print_stats() is done 2098 // at the Info level and not at a safepoint. 2099 // For safepoint based deflation, audit_and_print_stats() is called 2100 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the 2101 // Debug level at a safepoint. 2102 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2103 } 2104 } 2105 2106 // Deflate a single monitor if not in-use 2107 // Return true if deflated, false if in-use 2108 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 2109 ObjectMonitor** free_head_p, 2110 ObjectMonitor** free_tail_p) { 2111 bool deflated; 2112 // Normal case ... The monitor is associated with obj. 2113 const markWord mark = obj->mark(); 2114 guarantee(mark == markWord::encode(mid), "should match: mark=" 2115 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 2116 markWord::encode(mid).value()); 2117 // Make sure that mark.monitor() and markWord::encode() agree: 2118 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 2119 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 2120 const markWord dmw = mid->header(); 2121 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 2122 2123 if (mid->is_busy()) { 2124 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2125 deflated = false; 2126 } else { 2127 // Deflate the monitor if it is no longer being used 2128 // It's idle - scavenge and return to the global free list 2129 // plain old deflation ... 2130 if (log_is_enabled(Trace, monitorinflation)) { 2131 ResourceMark rm; 2132 log_trace(monitorinflation)("deflate_monitor: " 2133 "object=" INTPTR_FORMAT ", mark=" 2134 INTPTR_FORMAT ", type='%s'", p2i(obj), 2135 mark.value(), obj->klass()->external_name()); 2136 } 2137 2138 // Restore the header back to obj 2139 obj->release_set_mark(dmw); 2140 if (AsyncDeflateIdleMonitors) { 2141 // clear() expects the owner field to be NULL. 2142 // DEFLATER_MARKER is the only non-NULL value we should see here. 2143 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2144 } 2145 mid->clear(); 2146 2147 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 2148 p2i(mid->object())); 2149 assert(mid->is_free(), "invariant"); 2150 2151 // Move the deflated ObjectMonitor to the working free list 2152 // defined by free_head_p and free_tail_p. 2153 if (*free_head_p == NULL) *free_head_p = mid; 2154 if (*free_tail_p != NULL) { 2155 // We append to the list so the caller can use mid->_next_om 2156 // to fix the linkages in its context. 2157 ObjectMonitor* prevtail = *free_tail_p; 2158 // Should have been cleaned up by the caller: 2159 // Note: Should not have to lock prevtail here since we're at a 2160 // safepoint and ObjectMonitors on the local free list should 2161 // not be accessed in parallel. 2162 #ifdef ASSERT 2163 ObjectMonitor* l_next_om = prevtail->next_om(); 2164 #endif 2165 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2166 prevtail->set_next_om(mid); 2167 } 2168 *free_tail_p = mid; 2169 // At this point, mid->_next_om still refers to its current 2170 // value and another ObjectMonitor's _next_om field still 2171 // refers to this ObjectMonitor. Those linkages have to be 2172 // cleaned up by the caller who has the complete context. 2173 deflated = true; 2174 } 2175 return deflated; 2176 } 2177 2178 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 2179 // Returns true if it was deflated and false otherwise. 2180 // 2181 // The async deflation protocol sets owner to DEFLATER_MARKER and 2182 // makes contentions negative as signals to contending threads that 2183 // an async deflation is in progress. There are a number of checks 2184 // as part of the protocol to make sure that the calling thread has 2185 // not lost the race to a contending thread. 2186 // 2187 // The ObjectMonitor has been successfully async deflated when: 2188 // (owner == DEFLATER_MARKER && contentions < 0) 2189 // Contending threads that see those values know to retry their operation. 2190 // 2191 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 2192 ObjectMonitor** free_head_p, 2193 ObjectMonitor** free_tail_p) { 2194 assert(AsyncDeflateIdleMonitors, "sanity check"); 2195 assert(Thread::current()->is_Java_thread(), "precondition"); 2196 // A newly allocated ObjectMonitor should not be seen here so we 2197 // avoid an endless inflate/deflate cycle. 2198 assert(mid->is_old(), "must be old: allocation_state=%d", 2199 (int) mid->allocation_state()); 2200 2201 if (mid->is_busy()) { 2202 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2203 return false; 2204 } 2205 2206 // Set a NULL owner to DEFLATER_MARKER to force any contending thread 2207 // through the slow path. This is just the first part of the async 2208 // deflation dance. 2209 if (mid->try_set_owner_from(NULL, DEFLATER_MARKER) != NULL) { 2210 // The owner field is no longer NULL so we lost the race since the 2211 // ObjectMonitor is now busy. 2212 return false; 2213 } 2214 2215 if (mid->contentions() > 0 || mid->_waiters != 0) { 2216 // Another thread has raced to enter the ObjectMonitor after 2217 // mid->is_busy() above or has already entered and waited on 2218 // it which makes it busy so no deflation. Restore owner to 2219 // NULL if it is still DEFLATER_MARKER. 2220 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2221 return false; 2222 } 2223 2224 // Make contentions negative to force any contending threads to 2225 // retry. This is the second part of the async deflation dance. 2226 if (Atomic::cmpxchg(&mid->_contentions, (jint)0, -max_jint) != 0) { 2227 // The contentions was no longer 0 so we lost the race since the 2228 // ObjectMonitor is now busy. Restore owner to NULL if it is 2229 // still DEFLATER_MARKER: 2230 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2231 return false; 2232 } 2233 2234 // If owner is still DEFLATER_MARKER, then we have successfully 2235 // signaled any contending threads to retry. 2236 if (!mid->owner_is_DEFLATER_MARKER()) { 2237 // If it is not, then we have lost the race to an entering thread 2238 // and the ObjectMonitor is now busy. This is the third and final 2239 // part of the async deflation dance. 2240 // Note: This owner check solves the ABA problem with contentions 2241 // where another thread acquired the ObjectMonitor, finished 2242 // using it and restored contentions to zero. 2243 2244 // Add back max_jint to restore the contentions field to its 2245 // proper value (which may not be what we saw above): 2246 Atomic::add(&mid->_contentions, max_jint); 2247 2248 #ifdef ASSERT 2249 jint l_contentions = mid->contentions(); 2250 #endif 2251 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d", 2252 l_contentions, mid->contentions()); 2253 return false; 2254 } 2255 2256 // Sanity checks for the races: 2257 guarantee(mid->contentions() < 0, "must be negative: contentions=%d", 2258 mid->contentions()); 2259 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 2260 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 2261 INTPTR_FORMAT, p2i(mid->_cxq)); 2262 guarantee(mid->_EntryList == NULL, 2263 "must be no entering threads: EntryList=" INTPTR_FORMAT, 2264 p2i(mid->_EntryList)); 2265 2266 const oop obj = (oop) mid->object(); 2267 if (log_is_enabled(Trace, monitorinflation)) { 2268 ResourceMark rm; 2269 log_trace(monitorinflation)("deflate_monitor_using_JT: " 2270 "object=" INTPTR_FORMAT ", mark=" 2271 INTPTR_FORMAT ", type='%s'", 2272 p2i(obj), obj->mark().value(), 2273 obj->klass()->external_name()); 2274 } 2275 2276 // Install the old mark word if nobody else has already done it. 2277 mid->install_displaced_markword_in_object(obj); 2278 mid->clear_using_JT(); 2279 2280 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 2281 p2i(mid->object())); 2282 assert(mid->is_free(), "must be free: allocation_state=%d", 2283 (int)mid->allocation_state()); 2284 2285 // Move the deflated ObjectMonitor to the working free list 2286 // defined by free_head_p and free_tail_p. 2287 if (*free_head_p == NULL) { 2288 // First one on the list. 2289 *free_head_p = mid; 2290 } 2291 if (*free_tail_p != NULL) { 2292 // We append to the list so the caller can use mid->_next_om 2293 // to fix the linkages in its context. 2294 ObjectMonitor* prevtail = *free_tail_p; 2295 // prevtail should have been cleaned up by the caller: 2296 #ifdef ASSERT 2297 ObjectMonitor* l_next_om = unmarked_next(prevtail); 2298 #endif 2299 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2300 om_lock(prevtail); 2301 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked) 2302 } 2303 *free_tail_p = mid; 2304 2305 // At this point, mid->_next_om still refers to its current 2306 // value and another ObjectMonitor's _next_om field still 2307 // refers to this ObjectMonitor. Those linkages have to be 2308 // cleaned up by the caller who has the complete context. 2309 2310 // We leave owner == DEFLATER_MARKER and contentions < 0 2311 // to force any racing threads to retry. 2312 return true; // Success, ObjectMonitor has been deflated. 2313 } 2314 2315 // Walk a given monitor list, and deflate idle monitors. 2316 // The given list could be a per-thread list or a global list. 2317 // 2318 // In the case of parallel processing of thread local monitor lists, 2319 // work is done by Threads::parallel_threads_do() which ensures that 2320 // each Java thread is processed by exactly one worker thread, and 2321 // thus avoid conflicts that would arise when worker threads would 2322 // process the same monitor lists concurrently. 2323 // 2324 // See also ParallelSPCleanupTask and 2325 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 2326 // Threads::parallel_java_threads_do() in thread.cpp. 2327 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 2328 int* count_p, 2329 ObjectMonitor** free_head_p, 2330 ObjectMonitor** free_tail_p) { 2331 ObjectMonitor* cur_mid_in_use = NULL; 2332 ObjectMonitor* mid = NULL; 2333 ObjectMonitor* next = NULL; 2334 int deflated_count = 0; 2335 2336 // This list walk executes at a safepoint and does not race with any 2337 // other list walkers. 2338 2339 for (mid = Atomic::load(list_p); mid != NULL; mid = next) { 2340 next = unmarked_next(mid); 2341 oop obj = (oop) mid->object(); 2342 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 2343 // Deflation succeeded and already updated free_head_p and 2344 // free_tail_p as needed. Finish the move to the local free list 2345 // by unlinking mid from the global or per-thread in-use list. 2346 if (cur_mid_in_use == NULL) { 2347 // mid is the list head so switch the list head to next: 2348 Atomic::store(list_p, next); 2349 } else { 2350 // Switch cur_mid_in_use's next field to next: 2351 cur_mid_in_use->set_next_om(next); 2352 } 2353 // At this point mid is disconnected from the in-use list. 2354 deflated_count++; 2355 Atomic::dec(count_p); 2356 // mid is current tail in the free_head_p list so NULL terminate it: 2357 mid->set_next_om(NULL); 2358 } else { 2359 cur_mid_in_use = mid; 2360 } 2361 } 2362 return deflated_count; 2363 } 2364 2365 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 2366 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 2367 // list could be a per-thread in-use list or the global in-use list. 2368 // If a safepoint has started, then we save state via saved_mid_in_use_p 2369 // and return to the caller to honor the safepoint. 2370 // 2371 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, 2372 int* count_p, 2373 ObjectMonitor** free_head_p, 2374 ObjectMonitor** free_tail_p, 2375 ObjectMonitor** saved_mid_in_use_p) { 2376 assert(AsyncDeflateIdleMonitors, "sanity check"); 2377 JavaThread* self = JavaThread::current(); 2378 2379 ObjectMonitor* cur_mid_in_use = NULL; 2380 ObjectMonitor* mid = NULL; 2381 ObjectMonitor* next = NULL; 2382 ObjectMonitor* next_next = NULL; 2383 int deflated_count = 0; 2384 NoSafepointVerifier nsv; 2385 2386 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go 2387 // protocol because om_release() can do list deletions in parallel; 2388 // this also prevents races with a list walker thread. We also 2389 // lock-next-next-as-we-go to prevent an om_flush() that is behind 2390 // this thread from passing us. 2391 if (*saved_mid_in_use_p == NULL) { 2392 // No saved state so start at the beginning. 2393 // Lock the list head so we can possibly deflate it: 2394 if ((mid = get_list_head_locked(list_p)) == NULL) { 2395 return 0; // The list is empty so nothing to deflate. 2396 } 2397 next = unmarked_next(mid); 2398 } else { 2399 // We're restarting after a safepoint so restore the necessary state 2400 // before we resume. 2401 cur_mid_in_use = *saved_mid_in_use_p; 2402 // Lock cur_mid_in_use so we can possibly update its 2403 // next field to extract a deflated ObjectMonitor. 2404 om_lock(cur_mid_in_use); 2405 mid = unmarked_next(cur_mid_in_use); 2406 if (mid == NULL) { 2407 om_unlock(cur_mid_in_use); 2408 *saved_mid_in_use_p = NULL; 2409 return 0; // The remainder is empty so nothing more to deflate. 2410 } 2411 // Lock mid so we can possibly deflate it: 2412 om_lock(mid); 2413 next = unmarked_next(mid); 2414 } 2415 2416 while (true) { 2417 // The current mid is locked at this point. If we have a 2418 // cur_mid_in_use, then it is also locked at this point. 2419 2420 if (next != NULL) { 2421 // We lock next so that an om_flush() thread that is behind us 2422 // cannot pass us when we unlock the current mid. 2423 om_lock(next); 2424 next_next = unmarked_next(next); 2425 } 2426 2427 // Only try to deflate if there is an associated Java object and if 2428 // mid is old (is not newly allocated and is not newly freed). 2429 if (mid->object() != NULL && mid->is_old() && 2430 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { 2431 // Deflation succeeded and already updated free_head_p and 2432 // free_tail_p as needed. Finish the move to the local free list 2433 // by unlinking mid from the global or per-thread in-use list. 2434 if (cur_mid_in_use == NULL) { 2435 // mid is the list head and it is locked. Switch the list head 2436 // to next which is also locked (if not NULL) and also leave 2437 // mid locked: 2438 Atomic::store(list_p, next); 2439 } else { 2440 ObjectMonitor* locked_next = mark_om_ptr(next); 2441 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's 2442 // next field to locked_next and also leave mid locked: 2443 cur_mid_in_use->set_next_om(locked_next); 2444 } 2445 // At this point mid is disconnected from the in-use list so 2446 // its lock longer has any effects on in-use list. 2447 deflated_count++; 2448 Atomic::dec(count_p); 2449 // mid is current tail in the free_head_p list so NULL terminate it 2450 // (which also unlocks it): 2451 mid->set_next_om(NULL); 2452 2453 // All the list management is done so move on to the next one: 2454 mid = next; // mid keeps non-NULL next's locked state 2455 next = next_next; 2456 } else { 2457 // mid is considered in-use if it does not have an associated 2458 // Java object or mid is not old or deflation did not succeed. 2459 // A mid->is_new() node can be seen here when it is freshly 2460 // returned by om_alloc() (and skips the deflation code path). 2461 // A mid->is_old() node can be seen here when deflation failed. 2462 // A mid->is_free() node can be seen here when a fresh node from 2463 // om_alloc() is released by om_release() due to losing the race 2464 // in inflate(). 2465 2466 // All the list management is done so move on to the next one: 2467 if (cur_mid_in_use != NULL) { 2468 om_unlock(cur_mid_in_use); 2469 } 2470 // The next cur_mid_in_use keeps mid's lock state so 2471 // that it is stable for a possible next field change. It 2472 // cannot be modified by om_release() while it is locked. 2473 cur_mid_in_use = mid; 2474 mid = next; // mid keeps non-NULL next's locked state 2475 next = next_next; 2476 2477 if (SafepointMechanism::should_block(self) && 2478 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { 2479 // If a safepoint has started and cur_mid_in_use is not the list 2480 // head and is old, then it is safe to use as saved state. Return 2481 // to the caller before blocking. 2482 *saved_mid_in_use_p = cur_mid_in_use; 2483 om_unlock(cur_mid_in_use); 2484 if (mid != NULL) { 2485 om_unlock(mid); 2486 } 2487 return deflated_count; 2488 } 2489 } 2490 if (mid == NULL) { 2491 if (cur_mid_in_use != NULL) { 2492 om_unlock(cur_mid_in_use); 2493 } 2494 break; // Reached end of the list so nothing more to deflate. 2495 } 2496 2497 // The current mid's next field is locked at this point. If we have 2498 // a cur_mid_in_use, then it is also locked at this point. 2499 } 2500 // We finished the list without a safepoint starting so there's 2501 // no need to save state. 2502 *saved_mid_in_use_p = NULL; 2503 return deflated_count; 2504 } 2505 2506 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2507 counters->n_in_use = 0; // currently associated with objects 2508 counters->n_in_circulation = 0; // extant 2509 counters->n_scavenged = 0; // reclaimed (global and per-thread) 2510 counters->per_thread_scavenged = 0; // per-thread scavenge total 2511 counters->per_thread_times = 0.0; // per-thread scavenge times 2512 } 2513 2514 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2515 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2516 2517 if (AsyncDeflateIdleMonitors) { 2518 // Nothing to do when global idle ObjectMonitors are deflated using 2519 // a JavaThread unless a special deflation has been requested. 2520 if (!is_special_deflation_requested()) { 2521 return; 2522 } 2523 } 2524 2525 bool deflated = false; 2526 2527 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2528 ObjectMonitor* free_tail_p = NULL; 2529 elapsedTimer timer; 2530 2531 if (log_is_enabled(Info, monitorinflation)) { 2532 timer.start(); 2533 } 2534 2535 // Note: the thread-local monitors lists get deflated in 2536 // a separate pass. See deflate_thread_local_monitors(). 2537 2538 // For moribund threads, scan om_list_globals._in_use_list 2539 int deflated_count = 0; 2540 if (Atomic::load(&om_list_globals._in_use_list) != NULL) { 2541 // Update n_in_circulation before om_list_globals._in_use_count is 2542 // updated by deflation. 2543 Atomic::add(&counters->n_in_circulation, 2544 Atomic::load(&om_list_globals._in_use_count)); 2545 2546 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list, 2547 &om_list_globals._in_use_count, 2548 &free_head_p, &free_tail_p); 2549 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count)); 2550 } 2551 2552 if (free_head_p != NULL) { 2553 // Move the deflated ObjectMonitors back to the global free list. 2554 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2555 #ifdef ASSERT 2556 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2557 #endif 2558 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2559 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2560 Atomic::add(&counters->n_scavenged, deflated_count); 2561 } 2562 timer.stop(); 2563 2564 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2565 LogStreamHandle(Info, monitorinflation) lsh_info; 2566 LogStream* ls = NULL; 2567 if (log_is_enabled(Debug, monitorinflation)) { 2568 ls = &lsh_debug; 2569 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2570 ls = &lsh_info; 2571 } 2572 if (ls != NULL) { 2573 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2574 } 2575 } 2576 2577 class HandshakeForDeflation : public HandshakeClosure { 2578 public: 2579 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {} 2580 2581 void do_thread(Thread* thread) { 2582 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" 2583 INTPTR_FORMAT, p2i(thread)); 2584 } 2585 }; 2586 2587 void ObjectSynchronizer::deflate_idle_monitors_using_JT() { 2588 assert(AsyncDeflateIdleMonitors, "sanity check"); 2589 2590 // Deflate any global idle monitors. 2591 deflate_global_idle_monitors_using_JT(); 2592 2593 int count = 0; 2594 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2595 if (Atomic::load(&jt->om_in_use_count) > 0 && !jt->is_exiting()) { 2596 // This JavaThread is using ObjectMonitors so deflate any that 2597 // are idle unless this JavaThread is exiting; do not race with 2598 // ObjectSynchronizer::om_flush(). 2599 deflate_per_thread_idle_monitors_using_JT(jt); 2600 count++; 2601 } 2602 } 2603 if (count > 0) { 2604 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); 2605 } 2606 2607 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, " 2608 "global_free_count=%d, global_wait_count=%d", 2609 Atomic::load(&om_list_globals._population), 2610 Atomic::load(&om_list_globals._in_use_count), 2611 Atomic::load(&om_list_globals._free_count), 2612 Atomic::load(&om_list_globals._wait_count)); 2613 2614 // The ServiceThread's async deflation request has been processed. 2615 set_is_async_deflation_requested(false); 2616 2617 if (Atomic::load(&om_list_globals._wait_count) > 0) { 2618 // There are deflated ObjectMonitors waiting for a handshake 2619 // (or a safepoint) for safety. 2620 2621 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list); 2622 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL"); 2623 int count = Atomic::load(&om_list_globals._wait_count); 2624 Atomic::store(&om_list_globals._wait_count, 0); 2625 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL); 2626 2627 // Find the tail for prepend_list_to_common(). No need to mark 2628 // ObjectMonitors for this list walk since only the deflater 2629 // thread manages the wait list. 2630 int l_count = 0; 2631 ObjectMonitor* tail = NULL; 2632 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { 2633 tail = n; 2634 l_count++; 2635 } 2636 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count); 2637 2638 // Will execute a safepoint if !ThreadLocalHandshakes: 2639 HandshakeForDeflation hfd_hc; 2640 Handshake::execute(&hfd_hc); 2641 2642 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 2643 &om_list_globals._free_count); 2644 2645 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); 2646 } 2647 } 2648 2649 // Deflate global idle ObjectMonitors using a JavaThread. 2650 // 2651 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2652 assert(AsyncDeflateIdleMonitors, "sanity check"); 2653 assert(Thread::current()->is_Java_thread(), "precondition"); 2654 JavaThread* self = JavaThread::current(); 2655 2656 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2657 } 2658 2659 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread. 2660 // 2661 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) { 2662 assert(AsyncDeflateIdleMonitors, "sanity check"); 2663 assert(Thread::current()->is_Java_thread(), "precondition"); 2664 2665 deflate_common_idle_monitors_using_JT(false /* !is_global */, target); 2666 } 2667 2668 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2669 // 2670 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) { 2671 JavaThread* self = JavaThread::current(); 2672 2673 int deflated_count = 0; 2674 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors 2675 ObjectMonitor* free_tail_p = NULL; 2676 ObjectMonitor* saved_mid_in_use_p = NULL; 2677 elapsedTimer timer; 2678 2679 if (log_is_enabled(Info, monitorinflation)) { 2680 timer.start(); 2681 } 2682 2683 if (is_global) { 2684 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count))); 2685 } else { 2686 OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count))); 2687 } 2688 2689 do { 2690 int local_deflated_count; 2691 if (is_global) { 2692 local_deflated_count = 2693 deflate_monitor_list_using_JT(&om_list_globals._in_use_list, 2694 &om_list_globals._in_use_count, 2695 &free_head_p, &free_tail_p, 2696 &saved_mid_in_use_p); 2697 } else { 2698 local_deflated_count = 2699 deflate_monitor_list_using_JT(&target->om_in_use_list, 2700 &target->om_in_use_count, &free_head_p, 2701 &free_tail_p, &saved_mid_in_use_p); 2702 } 2703 deflated_count += local_deflated_count; 2704 2705 if (free_head_p != NULL) { 2706 // Move the deflated ObjectMonitors to the global free list. 2707 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); 2708 // Note: The target thread can be doing an om_alloc() that 2709 // is trying to prepend an ObjectMonitor on its in-use list 2710 // at the same time that we have deflated the current in-use 2711 // list head and put it on the local free list. prepend_to_common() 2712 // will detect the race and retry which avoids list corruption, 2713 // but the next field in free_tail_p can flicker to marked 2714 // and then unmarked while prepend_to_common() is sorting it 2715 // all out. 2716 #ifdef ASSERT 2717 ObjectMonitor* l_next_om = unmarked_next(free_tail_p); 2718 #endif 2719 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2720 2721 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count); 2722 2723 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2724 } 2725 2726 if (saved_mid_in_use_p != NULL) { 2727 // deflate_monitor_list_using_JT() detected a safepoint starting. 2728 timer.stop(); 2729 { 2730 if (is_global) { 2731 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2732 } else { 2733 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); 2734 } 2735 assert(SafepointMechanism::should_block(self), "sanity check"); 2736 ThreadBlockInVM blocker(self); 2737 } 2738 // Prepare for another loop after the safepoint. 2739 free_head_p = NULL; 2740 free_tail_p = NULL; 2741 if (log_is_enabled(Info, monitorinflation)) { 2742 timer.start(); 2743 } 2744 } 2745 } while (saved_mid_in_use_p != NULL); 2746 timer.stop(); 2747 2748 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2749 LogStreamHandle(Info, monitorinflation) lsh_info; 2750 LogStream* ls = NULL; 2751 if (log_is_enabled(Debug, monitorinflation)) { 2752 ls = &lsh_debug; 2753 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2754 ls = &lsh_info; 2755 } 2756 if (ls != NULL) { 2757 if (is_global) { 2758 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2759 } else { 2760 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count); 2761 } 2762 } 2763 } 2764 2765 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2766 // Report the cumulative time for deflating each thread's idle 2767 // monitors. Note: if the work is split among more than one 2768 // worker thread, then the reported time will likely be more 2769 // than a beginning to end measurement of the phase. 2770 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 2771 2772 bool needs_special_deflation = is_special_deflation_requested(); 2773 if (AsyncDeflateIdleMonitors && !needs_special_deflation) { 2774 // Nothing to do when idle ObjectMonitors are deflated using 2775 // a JavaThread unless a special deflation has been requested. 2776 return; 2777 } 2778 2779 if (log_is_enabled(Debug, monitorinflation)) { 2780 // exit_globals()'s call to audit_and_print_stats() is done 2781 // at the Info level and not at a safepoint. 2782 // For async deflation, audit_and_print_stats() is called in 2783 // ObjectSynchronizer::do_safepoint_work() at the Debug level 2784 // at a safepoint. 2785 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2786 } else if (log_is_enabled(Info, monitorinflation)) { 2787 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " 2788 "global_free_count=%d, global_wait_count=%d", 2789 Atomic::load(&om_list_globals._population), 2790 Atomic::load(&om_list_globals._in_use_count), 2791 Atomic::load(&om_list_globals._free_count), 2792 Atomic::load(&om_list_globals._wait_count)); 2793 } 2794 2795 Atomic::store(&_forceMonitorScavenge, 0); // Reset 2796 2797 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 2798 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 2799 2800 GVars.stw_random = os::random(); 2801 GVars.stw_cycle++; 2802 2803 if (needs_special_deflation) { 2804 set_is_special_deflation_requested(false); // special deflation is done 2805 } 2806 } 2807 2808 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2809 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2810 2811 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) { 2812 // Nothing to do if a special deflation has NOT been requested. 2813 return; 2814 } 2815 2816 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2817 ObjectMonitor* free_tail_p = NULL; 2818 elapsedTimer timer; 2819 2820 if (log_is_enabled(Info, safepoint, cleanup) || 2821 log_is_enabled(Info, monitorinflation)) { 2822 timer.start(); 2823 } 2824 2825 // Update n_in_circulation before om_in_use_count is updated by deflation. 2826 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count)); 2827 2828 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 2829 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count)); 2830 2831 if (free_head_p != NULL) { 2832 // Move the deflated ObjectMonitors back to the global free list. 2833 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2834 #ifdef ASSERT 2835 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2836 #endif 2837 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2838 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2839 Atomic::add(&counters->n_scavenged, deflated_count); 2840 Atomic::add(&counters->per_thread_scavenged, deflated_count); 2841 } 2842 2843 timer.stop(); 2844 counters->per_thread_times += timer.seconds(); 2845 2846 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2847 LogStreamHandle(Info, monitorinflation) lsh_info; 2848 LogStream* ls = NULL; 2849 if (log_is_enabled(Debug, monitorinflation)) { 2850 ls = &lsh_debug; 2851 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2852 ls = &lsh_info; 2853 } 2854 if (ls != NULL) { 2855 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2856 } 2857 } 2858 2859 // Monitor cleanup on JavaThread::exit 2860 2861 // Iterate through monitor cache and attempt to release thread's monitors 2862 // Gives up on a particular monitor if an exception occurs, but continues 2863 // the overall iteration, swallowing the exception. 2864 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2865 private: 2866 TRAPS; 2867 2868 public: 2869 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2870 void do_monitor(ObjectMonitor* mid) { 2871 if (mid->owner() == THREAD) { 2872 (void)mid->complete_exit(CHECK); 2873 } 2874 } 2875 }; 2876 2877 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2878 // ignored. This is meant to be called during JNI thread detach which assumes 2879 // all remaining monitors are heavyweight. All exceptions are swallowed. 2880 // Scanning the extant monitor list can be time consuming. 2881 // A simple optimization is to add a per-thread flag that indicates a thread 2882 // called jni_monitorenter() during its lifetime. 2883 // 2884 // Instead of No_Savepoint_Verifier it might be cheaper to 2885 // use an idiom of the form: 2886 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2887 // <code that must not run at safepoint> 2888 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2889 // Since the tests are extremely cheap we could leave them enabled 2890 // for normal product builds. 2891 2892 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2893 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2894 NoSafepointVerifier nsv; 2895 ReleaseJavaMonitorsClosure rjmc(THREAD); 2896 ObjectSynchronizer::monitors_iterate(&rjmc); 2897 THREAD->clear_pending_exception(); 2898 } 2899 2900 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2901 switch (cause) { 2902 case inflate_cause_vm_internal: return "VM Internal"; 2903 case inflate_cause_monitor_enter: return "Monitor Enter"; 2904 case inflate_cause_wait: return "Monitor Wait"; 2905 case inflate_cause_notify: return "Monitor Notify"; 2906 case inflate_cause_hash_code: return "Monitor Hash Code"; 2907 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2908 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2909 default: 2910 ShouldNotReachHere(); 2911 } 2912 return "Unknown"; 2913 } 2914 2915 //------------------------------------------------------------------------------ 2916 // Debugging code 2917 2918 u_char* ObjectSynchronizer::get_gvars_addr() { 2919 return (u_char*)&GVars; 2920 } 2921 2922 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2923 return (u_char*)&GVars.hc_sequence; 2924 } 2925 2926 size_t ObjectSynchronizer::get_gvars_size() { 2927 return sizeof(SharedGlobals); 2928 } 2929 2930 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2931 return (u_char*)&GVars.stw_random; 2932 } 2933 2934 // This function can be called at a safepoint or it can be called when 2935 // we are trying to exit the VM. When we are trying to exit the VM, the 2936 // list walker functions can run in parallel with the other list 2937 // operations so spin-locking is used for safety. 2938 // 2939 // Calls to this function can be added in various places as a debugging 2940 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor 2941 // details logged at the Info level and 'false' for the 'on_exit' 2942 // parameter to have in-use monitor details logged at the Trace level. 2943 // deflate_monitor_list() no longer uses spin-locking so be careful 2944 // when adding audit_and_print_stats() calls at a safepoint. 2945 // 2946 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2947 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2948 2949 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2950 LogStreamHandle(Info, monitorinflation) lsh_info; 2951 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2952 LogStream* ls = NULL; 2953 if (log_is_enabled(Trace, monitorinflation)) { 2954 ls = &lsh_trace; 2955 } else if (log_is_enabled(Debug, monitorinflation)) { 2956 ls = &lsh_debug; 2957 } else if (log_is_enabled(Info, monitorinflation)) { 2958 ls = &lsh_info; 2959 } 2960 assert(ls != NULL, "sanity check"); 2961 2962 // Log counts for the global and per-thread monitor lists: 2963 int chk_om_population = log_monitor_list_counts(ls); 2964 int error_cnt = 0; 2965 2966 ls->print_cr("Checking global lists:"); 2967 2968 // Check om_list_globals._population: 2969 if (Atomic::load(&om_list_globals._population) == chk_om_population) { 2970 ls->print_cr("global_population=%d equals chk_om_population=%d", 2971 Atomic::load(&om_list_globals._population), chk_om_population); 2972 } else { 2973 // With fine grained locks on the monitor lists, it is possible for 2974 // log_monitor_list_counts() to return a value that doesn't match 2975 // om_list_globals._population. So far a higher value has been 2976 // seen in testing so something is being double counted by 2977 // log_monitor_list_counts(). 2978 ls->print_cr("WARNING: global_population=%d is not equal to " 2979 "chk_om_population=%d", 2980 Atomic::load(&om_list_globals._population), chk_om_population); 2981 } 2982 2983 // Check om_list_globals._in_use_list and om_list_globals._in_use_count: 2984 chk_global_in_use_list_and_count(ls, &error_cnt); 2985 2986 // Check om_list_globals._free_list and om_list_globals._free_count: 2987 chk_global_free_list_and_count(ls, &error_cnt); 2988 2989 // Check om_list_globals._wait_list and om_list_globals._wait_count: 2990 chk_global_wait_list_and_count(ls, &error_cnt); 2991 2992 ls->print_cr("Checking per-thread lists:"); 2993 2994 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2995 // Check om_in_use_list and om_in_use_count: 2996 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2997 2998 // Check om_free_list and om_free_count: 2999 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 3000 } 3001 3002 if (error_cnt == 0) { 3003 ls->print_cr("No errors found in monitor list checks."); 3004 } else { 3005 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 3006 } 3007 3008 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 3009 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 3010 // When exiting this log output is at the Info level. When called 3011 // at a safepoint, this log output is at the Trace level since 3012 // there can be a lot of it. 3013 log_in_use_monitor_details(ls); 3014 } 3015 3016 ls->flush(); 3017 3018 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 3019 } 3020 3021 // Check a free monitor entry; log any errors. 3022 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 3023 outputStream * out, int *error_cnt_p) { 3024 stringStream ss; 3025 if (n->is_busy()) { 3026 if (jt != NULL) { 3027 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3028 ": free per-thread monitor must not be busy: %s", p2i(jt), 3029 p2i(n), n->is_busy_to_string(&ss)); 3030 } else { 3031 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3032 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 3033 } 3034 *error_cnt_p = *error_cnt_p + 1; 3035 } 3036 if (n->header().value() != 0) { 3037 if (jt != NULL) { 3038 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3039 ": free per-thread monitor must have NULL _header " 3040 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 3041 n->header().value()); 3042 *error_cnt_p = *error_cnt_p + 1; 3043 } else if (!AsyncDeflateIdleMonitors) { 3044 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3045 "must have NULL _header field: _header=" INTPTR_FORMAT, 3046 p2i(n), n->header().value()); 3047 *error_cnt_p = *error_cnt_p + 1; 3048 } 3049 } 3050 if (n->object() != NULL) { 3051 if (jt != NULL) { 3052 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3053 ": free per-thread monitor must have NULL _object " 3054 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 3055 p2i(n->object())); 3056 } else { 3057 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3058 "must have NULL _object field: _object=" INTPTR_FORMAT, 3059 p2i(n), p2i(n->object())); 3060 } 3061 *error_cnt_p = *error_cnt_p + 1; 3062 } 3063 } 3064 3065 // Lock the next ObjectMonitor for traversal and unlock the current 3066 // ObjectMonitor. Returns the next ObjectMonitor if there is one. 3067 // Otherwise returns NULL (after unlocking the current ObjectMonitor). 3068 // This function is used by the various list walker functions to 3069 // safely walk a list without allowing an ObjectMonitor to be moved 3070 // to another list in the middle of a walk. 3071 static ObjectMonitor* lock_next_for_traversal(ObjectMonitor* cur) { 3072 assert(is_locked(cur), "cur=" INTPTR_FORMAT " must be locked", p2i(cur)); 3073 ObjectMonitor* next = unmarked_next(cur); 3074 if (next == NULL) { // Reached the end of the list. 3075 om_unlock(cur); 3076 return NULL; 3077 } 3078 om_lock(next); // Lock next before unlocking current to keep 3079 om_unlock(cur); // from being by-passed by another thread. 3080 return next; 3081 } 3082 3083 // Check the global free list and count; log the results of the checks. 3084 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 3085 int *error_cnt_p) { 3086 int chk_om_free_count = 0; 3087 ObjectMonitor* cur = NULL; 3088 if ((cur = get_list_head_locked(&om_list_globals._free_list)) != NULL) { 3089 // Marked the global free list head so process the list. 3090 while (true) { 3091 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3092 chk_om_free_count++; 3093 3094 cur = lock_next_for_traversal(cur); 3095 if (cur == NULL) { 3096 break; 3097 } 3098 } 3099 } 3100 int l_free_count = Atomic::load(&om_list_globals._free_count); 3101 if (l_free_count == chk_om_free_count) { 3102 out->print_cr("global_free_count=%d equals chk_om_free_count=%d", 3103 l_free_count, chk_om_free_count); 3104 } else { 3105 // With fine grained locks on om_list_globals._free_list, it 3106 // is possible for an ObjectMonitor to be prepended to 3107 // om_list_globals._free_list after we started calculating 3108 // chk_om_free_count so om_list_globals._free_count may not 3109 // match anymore. 3110 out->print_cr("WARNING: global_free_count=%d is not equal to " 3111 "chk_om_free_count=%d", l_free_count, chk_om_free_count); 3112 } 3113 } 3114 3115 // Check the global wait list and count; log the results of the checks. 3116 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, 3117 int *error_cnt_p) { 3118 int chk_om_wait_count = 0; 3119 ObjectMonitor* cur = NULL; 3120 if ((cur = get_list_head_locked(&om_list_globals._wait_list)) != NULL) { 3121 // Marked the global wait list head so process the list. 3122 while (true) { 3123 // Rules for om_list_globals._wait_list are the same as for 3124 // om_list_globals._free_list: 3125 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3126 chk_om_wait_count++; 3127 3128 cur = lock_next_for_traversal(cur); 3129 if (cur == NULL) { 3130 break; 3131 } 3132 } 3133 } 3134 if (Atomic::load(&om_list_globals._wait_count) == chk_om_wait_count) { 3135 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d", 3136 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3137 } else { 3138 out->print_cr("ERROR: global_wait_count=%d is not equal to " 3139 "chk_om_wait_count=%d", 3140 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3141 *error_cnt_p = *error_cnt_p + 1; 3142 } 3143 } 3144 3145 // Check the global in-use list and count; log the results of the checks. 3146 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 3147 int *error_cnt_p) { 3148 int chk_om_in_use_count = 0; 3149 ObjectMonitor* cur = NULL; 3150 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3151 // Marked the global in-use list head so process the list. 3152 while (true) { 3153 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); 3154 chk_om_in_use_count++; 3155 3156 cur = lock_next_for_traversal(cur); 3157 if (cur == NULL) { 3158 break; 3159 } 3160 } 3161 } 3162 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3163 if (l_in_use_count == chk_om_in_use_count) { 3164 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", 3165 l_in_use_count, chk_om_in_use_count); 3166 } else { 3167 // With fine grained locks on the monitor lists, it is possible for 3168 // an exiting JavaThread to put its in-use ObjectMonitors on the 3169 // global in-use list after chk_om_in_use_count is calculated above. 3170 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", 3171 l_in_use_count, chk_om_in_use_count); 3172 } 3173 } 3174 3175 // Check an in-use monitor entry; log any errors. 3176 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 3177 outputStream * out, int *error_cnt_p) { 3178 if (n->header().value() == 0) { 3179 if (jt != NULL) { 3180 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3181 ": in-use per-thread monitor must have non-NULL _header " 3182 "field.", p2i(jt), p2i(n)); 3183 } else { 3184 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3185 "must have non-NULL _header field.", p2i(n)); 3186 } 3187 *error_cnt_p = *error_cnt_p + 1; 3188 } 3189 if (n->object() == NULL) { 3190 if (jt != NULL) { 3191 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3192 ": in-use per-thread monitor must have non-NULL _object " 3193 "field.", p2i(jt), p2i(n)); 3194 } else { 3195 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3196 "must have non-NULL _object field.", p2i(n)); 3197 } 3198 *error_cnt_p = *error_cnt_p + 1; 3199 } 3200 const oop obj = (oop)n->object(); 3201 const markWord mark = obj->mark(); 3202 if (!mark.has_monitor()) { 3203 if (jt != NULL) { 3204 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3205 ": in-use per-thread monitor's object does not think " 3206 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 3207 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 3208 } else { 3209 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3210 "monitor's object does not think it has a monitor: obj=" 3211 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 3212 p2i(obj), mark.value()); 3213 } 3214 *error_cnt_p = *error_cnt_p + 1; 3215 } 3216 ObjectMonitor* const obj_mon = mark.monitor(); 3217 if (n != obj_mon) { 3218 if (jt != NULL) { 3219 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3220 ": in-use per-thread monitor's object does not refer " 3221 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 3222 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 3223 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3224 } else { 3225 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3226 "monitor's object does not refer to the same monitor: obj=" 3227 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 3228 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3229 } 3230 *error_cnt_p = *error_cnt_p + 1; 3231 } 3232 } 3233 3234 // Check the thread's free list and count; log the results of the checks. 3235 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 3236 outputStream * out, 3237 int *error_cnt_p) { 3238 int chk_om_free_count = 0; 3239 ObjectMonitor* cur = NULL; 3240 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { 3241 // Marked the per-thread free list head so process the list. 3242 while (true) { 3243 chk_free_entry(jt, cur, out, error_cnt_p); 3244 chk_om_free_count++; 3245 3246 cur = lock_next_for_traversal(cur); 3247 if (cur == NULL) { 3248 break; 3249 } 3250 } 3251 } 3252 int l_om_free_count = Atomic::load(&jt->om_free_count); 3253 if (l_om_free_count == chk_om_free_count) { 3254 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 3255 "chk_om_free_count=%d", p2i(jt), l_om_free_count, chk_om_free_count); 3256 } else { 3257 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 3258 "equal to chk_om_free_count=%d", p2i(jt), l_om_free_count, 3259 chk_om_free_count); 3260 *error_cnt_p = *error_cnt_p + 1; 3261 } 3262 } 3263 3264 // Check the thread's in-use list and count; log the results of the checks. 3265 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 3266 outputStream * out, 3267 int *error_cnt_p) { 3268 int chk_om_in_use_count = 0; 3269 ObjectMonitor* cur = NULL; 3270 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3271 // Marked the per-thread in-use list head so process the list. 3272 while (true) { 3273 chk_in_use_entry(jt, cur, out, error_cnt_p); 3274 chk_om_in_use_count++; 3275 3276 cur = lock_next_for_traversal(cur); 3277 if (cur == NULL) { 3278 break; 3279 } 3280 } 3281 } 3282 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3283 if (l_om_in_use_count == chk_om_in_use_count) { 3284 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 3285 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3286 chk_om_in_use_count); 3287 } else { 3288 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 3289 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3290 chk_om_in_use_count); 3291 *error_cnt_p = *error_cnt_p + 1; 3292 } 3293 } 3294 3295 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 3296 // flags indicate why the entry is in-use, 'object' and 'object type' 3297 // indicate the associated object and its type. 3298 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { 3299 stringStream ss; 3300 if (Atomic::load(&om_list_globals._in_use_count) > 0) { 3301 out->print_cr("In-use global monitor info:"); 3302 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3303 out->print_cr("%18s %s %18s %18s", 3304 "monitor", "BHL", "object", "object type"); 3305 out->print_cr("================== === ================== =================="); 3306 ObjectMonitor* cur = NULL; 3307 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3308 // Marked the global in-use list head so process the list. 3309 while (true) { 3310 const oop obj = (oop) cur->object(); 3311 const markWord mark = cur->header(); 3312 ResourceMark rm; 3313 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(cur), 3314 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL, 3315 p2i(obj), obj->klass()->external_name()); 3316 if (cur->is_busy() != 0) { 3317 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3318 ss.reset(); 3319 } 3320 out->cr(); 3321 3322 cur = lock_next_for_traversal(cur); 3323 if (cur == NULL) { 3324 break; 3325 } 3326 } 3327 } 3328 } 3329 3330 out->print_cr("In-use per-thread monitor info:"); 3331 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3332 out->print_cr("%18s %18s %s %18s %18s", 3333 "jt", "monitor", "BHL", "object", "object type"); 3334 out->print_cr("================== ================== === ================== =================="); 3335 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3336 ObjectMonitor* cur = NULL; 3337 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3338 // Marked the global in-use list head so process the list. 3339 while (true) { 3340 const oop obj = (oop) cur->object(); 3341 const markWord mark = cur->header(); 3342 ResourceMark rm; 3343 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT 3344 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, 3345 mark.hash() != 0, cur->owner() != NULL, p2i(obj), 3346 obj->klass()->external_name()); 3347 if (cur->is_busy() != 0) { 3348 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3349 ss.reset(); 3350 } 3351 out->cr(); 3352 3353 cur = lock_next_for_traversal(cur); 3354 if (cur == NULL) { 3355 break; 3356 } 3357 } 3358 } 3359 } 3360 3361 out->flush(); 3362 } 3363 3364 // Log counts for the global and per-thread monitor lists and return 3365 // the population count. 3366 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 3367 int pop_count = 0; 3368 out->print_cr("%18s %10s %10s %10s %10s", 3369 "Global Lists:", "InUse", "Free", "Wait", "Total"); 3370 out->print_cr("================== ========== ========== ========== =========="); 3371 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3372 int l_free_count = Atomic::load(&om_list_globals._free_count); 3373 int l_wait_count = Atomic::load(&om_list_globals._wait_count); 3374 out->print_cr("%18s %10d %10d %10d %10d", "", l_in_use_count, 3375 l_free_count, l_wait_count, 3376 Atomic::load(&om_list_globals._population)); 3377 pop_count += l_in_use_count + l_free_count + l_wait_count; 3378 3379 out->print_cr("%18s %10s %10s %10s", 3380 "Per-Thread Lists:", "InUse", "Free", "Provision"); 3381 out->print_cr("================== ========== ========== =========="); 3382 3383 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3384 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3385 int l_om_free_count = Atomic::load(&jt->om_free_count); 3386 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 3387 l_om_in_use_count, l_om_free_count, jt->om_free_provision); 3388 pop_count += l_om_in_use_count + l_om_free_count; 3389 } 3390 return pop_count; 3391 } 3392 3393 #ifndef PRODUCT 3394 3395 // Check if monitor belongs to the monitor cache 3396 // The list is grow-only so it's *relatively* safe to traverse 3397 // the list of extant blocks without taking a lock. 3398 3399 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 3400 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 3401 while (block != NULL) { 3402 assert(block->object() == CHAINMARKER, "must be a block header"); 3403 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 3404 address mon = (address)monitor; 3405 address blk = (address)block; 3406 size_t diff = mon - blk; 3407 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 3408 return 1; 3409 } 3410 // unmarked_next() is not needed with g_block_list (no locking 3411 // used with block linkage _next_om fields). 3412 block = (PaddedObjectMonitor*)block->next_om(); 3413 } 3414 return 0; 3415 } 3416 3417 #endif