1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/handshake.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/objectMonitor.inline.hpp" 45 #include "runtime/osThread.hpp" 46 #include "runtime/safepointMechanism.inline.hpp" 47 #include "runtime/safepointVerifiers.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "runtime/thread.inline.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/align.hpp" 56 #include "utilities/dtrace.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/preserveException.hpp" 59 60 // The "core" versions of monitor enter and exit reside in this file. 61 // The interpreter and compilers contain specialized transliterated 62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 63 // for instance. If you make changes here, make sure to modify the 64 // interpreter, and both C1 and C2 fast-path inline locking code emission. 65 // 66 // ----------------------------------------------------------------------------- 67 68 #ifdef DTRACE_ENABLED 69 70 // Only bother with this argument setup if dtrace is available 71 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 72 73 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 74 char* bytes = NULL; \ 75 int len = 0; \ 76 jlong jtid = SharedRuntime::get_java_tid(thread); \ 77 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 78 if (klassname != NULL) { \ 79 bytes = (char*)klassname->bytes(); \ 80 len = klassname->utf8_length(); \ 81 } 82 83 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 84 { \ 85 if (DTraceMonitorProbes) { \ 86 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 87 HOTSPOT_MONITOR_WAIT(jtid, \ 88 (uintptr_t)(monitor), bytes, len, (millis)); \ 89 } \ 90 } 91 92 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 93 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 94 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 95 96 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 97 { \ 98 if (DTraceMonitorProbes) { \ 99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 100 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 101 (uintptr_t)(monitor), bytes, len); \ 102 } \ 103 } 104 105 #else // ndef DTRACE_ENABLED 106 107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 109 110 #endif // ndef DTRACE_ENABLED 111 112 // This exists only as a workaround of dtrace bug 6254741 113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 115 return 0; 116 } 117 118 #define NINFLATIONLOCKS 256 119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 120 121 // global list of blocks of monitors 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 126 127 struct ObjectMonitorListGlobals { 128 char _pad_prefix[OM_CACHE_LINE_SIZE]; 129 // These are highly shared list related variables. 130 // To avoid false-sharing they need to be the sole occupants of a cache line. 131 132 // Global ObjectMonitor free list. Newly allocated and deflated 133 // ObjectMonitors are prepended here. 134 ObjectMonitor* _free_list; 135 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 136 137 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 138 // ObjectMonitors on its per-thread in-use list are prepended here. 139 ObjectMonitor* _in_use_list; 140 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 141 142 // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors 143 // is true, deflated ObjectMonitors wait on this list until after a 144 // handshake or a safepoint for platforms that don't support handshakes. 145 // After the handshake or safepoint, the deflated ObjectMonitors are 146 // prepended to free_list. 147 ObjectMonitor* _wait_list; 148 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 149 150 int _free_count; // # on free_list 151 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); 152 153 int _in_use_count; // # on in_use_list 154 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); 155 156 int _population; // # Extant -- in circulation 157 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int)); 158 159 int _wait_count; // # on wait_list 160 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int)); 161 }; 162 static ObjectMonitorListGlobals om_list_globals; 163 164 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 165 166 167 // =====================> Spin-lock functions 168 169 // ObjectMonitors are not lockable outside of this file. We use spin-locks 170 // implemented using a bit in the _next_om field instead of the heavier 171 // weight locking mechanisms for faster list management. 172 173 #define OM_LOCK_BIT 0x1 174 175 // Return true if the ObjectMonitor is locked. 176 // Otherwise returns false. 177 static bool is_locked(ObjectMonitor* om) { 178 return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT; 179 } 180 181 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. 182 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { 183 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); 184 } 185 186 // Return the unmarked next field in an ObjectMonitor. Note: the next 187 // field may or may not have been marked with OM_LOCK_BIT originally. 188 static ObjectMonitor* unmarked_next(ObjectMonitor* om) { 189 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT); 190 } 191 192 // Try to lock an ObjectMonitor. Returns true if locking was successful. 193 // Otherwise returns false. 194 static bool try_om_lock(ObjectMonitor* om) { 195 // Get current next field without any OM_LOCK_BIT value. 196 ObjectMonitor* next = unmarked_next(om); 197 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) { 198 return false; // Cannot lock the ObjectMonitor. 199 } 200 return true; 201 } 202 203 // Lock an ObjectMonitor. 204 static void om_lock(ObjectMonitor* om) { 205 while (true) { 206 if (try_om_lock(om)) { 207 return; 208 } 209 } 210 } 211 212 // Unlock an ObjectMonitor. 213 static void om_unlock(ObjectMonitor* om) { 214 ObjectMonitor* next = om->next_om(); 215 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT 216 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); 217 218 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. 219 om->set_next_om(next); 220 } 221 222 // Get the list head after locking it. Returns the list head or NULL 223 // if the list is empty. 224 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { 225 while (true) { 226 ObjectMonitor* mid = Atomic::load(list_p); 227 if (mid == NULL) { 228 return NULL; // The list is empty. 229 } 230 if (try_om_lock(mid)) { 231 if (Atomic::load(list_p) != mid) { 232 // The list head changed before we could lock it so we have to retry. 233 om_unlock(mid); 234 continue; 235 } 236 return mid; 237 } 238 } 239 } 240 241 #undef OM_LOCK_BIT 242 243 244 // =====================> List Management functions 245 246 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is 247 // the last ObjectMonitor in the list and there are 'count' on the list. 248 // Also updates the specified *count_p. 249 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, 250 int count, ObjectMonitor** list_p, 251 int* count_p) { 252 while (true) { 253 ObjectMonitor* cur = Atomic::load(list_p); 254 // Prepend list to *list_p. 255 if (!try_om_lock(tail)) { 256 // Failed to lock tail due to a list walker so try it all again. 257 continue; 258 } 259 tail->set_next_om(cur); // tail now points to cur (and unlocks tail) 260 if (cur == NULL) { 261 // No potential race with takers or other prependers since 262 // *list_p is empty. 263 if (Atomic::cmpxchg(list_p, cur, list) == cur) { 264 // Successfully switched *list_p to the list value. 265 Atomic::add(count_p, count); 266 break; 267 } 268 // Implied else: try it all again 269 } else { 270 if (!try_om_lock(cur)) { 271 continue; // failed to lock cur so try it all again 272 } 273 // We locked cur so try to switch *list_p to the list value. 274 if (Atomic::cmpxchg(list_p, cur, list) != cur) { 275 // The list head has changed so unlock cur and try again: 276 om_unlock(cur); 277 continue; 278 } 279 Atomic::add(count_p, count); 280 om_unlock(cur); 281 break; 282 } 283 } 284 } 285 286 // Prepend a newly allocated block of ObjectMonitors to g_block_list and 287 // om_list_globals._free_list. Also updates om_list_globals._population 288 // and om_list_globals._free_count. 289 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { 290 // First we handle g_block_list: 291 while (true) { 292 PaddedObjectMonitor* cur = Atomic::load(&g_block_list); 293 // Prepend new_blk to g_block_list. The first ObjectMonitor in 294 // a block is reserved for use as linkage to the next block. 295 new_blk[0].set_next_om(cur); 296 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { 297 // Successfully switched g_block_list to the new_blk value. 298 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1); 299 break; 300 } 301 // Implied else: try it all again 302 } 303 304 // Second we handle om_list_globals._free_list: 305 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, 306 &om_list_globals._free_list, &om_list_globals._free_count); 307 } 308 309 // Prepend a list of ObjectMonitors to om_list_globals._free_list. 310 // 'tail' is the last ObjectMonitor in the list and there are 'count' 311 // on the list. Also updates om_list_globals._free_count. 312 static void prepend_list_to_global_free_list(ObjectMonitor* list, 313 ObjectMonitor* tail, int count) { 314 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 315 &om_list_globals._free_count); 316 } 317 318 // Prepend a list of ObjectMonitors to om_list_globals._wait_list. 319 // 'tail' is the last ObjectMonitor in the list and there are 'count' 320 // on the list. Also updates om_list_globals._wait_count. 321 static void prepend_list_to_global_wait_list(ObjectMonitor* list, 322 ObjectMonitor* tail, int count) { 323 assert(HandshakeAfterDeflateIdleMonitors, "sanity check"); 324 prepend_list_to_common(list, tail, count, &om_list_globals._wait_list, 325 &om_list_globals._wait_count); 326 } 327 328 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list. 329 // 'tail' is the last ObjectMonitor in the list and there are 'count' 330 // on the list. Also updates om_list_globals._in_use_list. 331 static void prepend_list_to_global_in_use_list(ObjectMonitor* list, 332 ObjectMonitor* tail, int count) { 333 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list, 334 &om_list_globals._in_use_count); 335 } 336 337 // Prepend an ObjectMonitor to the specified list. Also updates 338 // the specified counter. 339 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, 340 int* count_p) { 341 while (true) { 342 om_lock(m); // Lock m so we can safely update its next field. 343 ObjectMonitor* cur = NULL; 344 // Lock the list head to guard against races with a list walker 345 // or async deflater thread (which only races in om_in_use_list): 346 if ((cur = get_list_head_locked(list_p)) != NULL) { 347 // List head is now locked so we can safely switch it. 348 m->set_next_om(cur); // m now points to cur (and unlocks m) 349 Atomic::store(list_p, m); // Switch list head to unlocked m. 350 om_unlock(cur); 351 break; 352 } 353 // The list is empty so try to set the list head. 354 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); 355 m->set_next_om(cur); // m now points to NULL (and unlocks m) 356 if (Atomic::cmpxchg(list_p, cur, m) == cur) { 357 // List head is now unlocked m. 358 break; 359 } 360 // Implied else: try it all again 361 } 362 Atomic::inc(count_p); 363 } 364 365 // Prepend an ObjectMonitor to a per-thread om_free_list. 366 // Also updates the per-thread om_free_count. 367 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) { 368 prepend_to_common(m, &self->om_free_list, &self->om_free_count); 369 } 370 371 // Prepend an ObjectMonitor to a per-thread om_in_use_list. 372 // Also updates the per-thread om_in_use_count. 373 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) { 374 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count); 375 } 376 377 // Take an ObjectMonitor from the start of the specified list. Also 378 // decrements the specified counter. Returns NULL if none are available. 379 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, 380 int* count_p) { 381 ObjectMonitor* take = NULL; 382 // Lock the list head to guard against races with a list walker 383 // or async deflater thread (which only races in om_list_globals._free_list): 384 if ((take = get_list_head_locked(list_p)) == NULL) { 385 return NULL; // None are available. 386 } 387 ObjectMonitor* next = unmarked_next(take); 388 // Switch locked list head to next (which unlocks the list head, but 389 // leaves take locked): 390 Atomic::store(list_p, next); 391 Atomic::dec(count_p); 392 // Unlock take, but leave the next value for any lagging list 393 // walkers. It will get cleaned up when take is prepended to 394 // the in-use list: 395 om_unlock(take); 396 return take; 397 } 398 399 // Take an ObjectMonitor from the start of the om_list_globals._free_list. 400 // Also updates om_list_globals._free_count. Returns NULL if none are 401 // available. 402 static ObjectMonitor* take_from_start_of_global_free_list() { 403 return take_from_start_of_common(&om_list_globals._free_list, 404 &om_list_globals._free_count); 405 } 406 407 // Take an ObjectMonitor from the start of a per-thread free-list. 408 // Also updates om_free_count. Returns NULL if none are available. 409 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) { 410 return take_from_start_of_common(&self->om_free_list, &self->om_free_count); 411 } 412 413 414 // =====================> Quick functions 415 416 // The quick_* forms are special fast-path variants used to improve 417 // performance. In the simplest case, a "quick_*" implementation could 418 // simply return false, in which case the caller will perform the necessary 419 // state transitions and call the slow-path form. 420 // The fast-path is designed to handle frequently arising cases in an efficient 421 // manner and is just a degenerate "optimistic" variant of the slow-path. 422 // returns true -- to indicate the call was satisfied. 423 // returns false -- to indicate the call needs the services of the slow-path. 424 // A no-loitering ordinance is in effect for code in the quick_* family 425 // operators: safepoints or indefinite blocking (blocking that might span a 426 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 427 // entry. 428 // 429 // Consider: An interesting optimization is to have the JIT recognize the 430 // following common idiom: 431 // synchronized (someobj) { .... ; notify(); } 432 // That is, we find a notify() or notifyAll() call that immediately precedes 433 // the monitorexit operation. In that case the JIT could fuse the operations 434 // into a single notifyAndExit() runtime primitive. 435 436 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 437 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 438 assert(self->is_Java_thread(), "invariant"); 439 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 440 NoSafepointVerifier nsv; 441 if (obj == NULL) return false; // slow-path for invalid obj 442 const markWord mark = obj->mark(); 443 444 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 445 // Degenerate notify 446 // stack-locked by caller so by definition the implied waitset is empty. 447 return true; 448 } 449 450 if (mark.has_monitor()) { 451 ObjectMonitor* const mon = mark.monitor(); 452 assert(mon->object() == obj, "invariant"); 453 if (mon->owner() != self) return false; // slow-path for IMS exception 454 455 if (mon->first_waiter() != NULL) { 456 // We have one or more waiters. Since this is an inflated monitor 457 // that we own, we can transfer one or more threads from the waitset 458 // to the entrylist here and now, avoiding the slow-path. 459 if (all) { 460 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 461 } else { 462 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 463 } 464 int free_count = 0; 465 do { 466 mon->INotify(self); 467 ++free_count; 468 } while (mon->first_waiter() != NULL && all); 469 OM_PERFDATA_OP(Notifications, inc(free_count)); 470 } 471 return true; 472 } 473 474 // biased locking and any other IMS exception states take the slow-path 475 return false; 476 } 477 478 479 // The LockNode emitted directly at the synchronization site would have 480 // been too big if it were to have included support for the cases of inflated 481 // recursive enter and exit, so they go here instead. 482 // Note that we can't safely call AsyncPrintJavaStack() from within 483 // quick_enter() as our thread state remains _in_Java. 484 485 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 486 BasicLock * lock) { 487 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 488 assert(self->is_Java_thread(), "invariant"); 489 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 490 NoSafepointVerifier nsv; 491 if (obj == NULL) return false; // Need to throw NPE 492 493 while (true) { 494 const markWord mark = obj->mark(); 495 496 if (mark.has_monitor()) { 497 ObjectMonitorHandle omh; 498 if (!omh.save_om_ptr(obj, mark)) { 499 // Lost a race with async deflation so try again. 500 assert(AsyncDeflateIdleMonitors, "sanity check"); 501 continue; 502 } 503 ObjectMonitor* const m = omh.om_ptr(); 504 assert(m->object() == obj, "invariant"); 505 Thread* const owner = (Thread *) m->_owner; 506 507 // Lock contention and Transactional Lock Elision (TLE) diagnostics 508 // and observability 509 // Case: light contention possibly amenable to TLE 510 // Case: TLE inimical operations such as nested/recursive synchronization 511 512 if (owner == self) { 513 m->_recursions++; 514 return true; 515 } 516 517 // This Java Monitor is inflated so obj's header will never be 518 // displaced to this thread's BasicLock. Make the displaced header 519 // non-NULL so this BasicLock is not seen as recursive nor as 520 // being locked. We do this unconditionally so that this thread's 521 // BasicLock cannot be mis-interpreted by any stack walkers. For 522 // performance reasons, stack walkers generally first check for 523 // Biased Locking in the object's header, the second check is for 524 // stack-locking in the object's header, the third check is for 525 // recursive stack-locking in the displaced header in the BasicLock, 526 // and last are the inflated Java Monitor (ObjectMonitor) checks. 527 lock->set_displaced_header(markWord::unused_mark()); 528 529 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) { 530 assert(m->_recursions == 0, "invariant"); 531 return true; 532 } 533 534 if (AsyncDeflateIdleMonitors && 535 m->try_set_owner_from(DEFLATER_MARKER, self) == DEFLATER_MARKER) { 536 // The deflation protocol finished the first part (setting owner), 537 // but it failed the second part (making ref_count negative) and 538 // bailed. Or the ObjectMonitor was async deflated and reused. 539 // Acquired the monitor. 540 assert(m->_recursions == 0, "invariant"); 541 return true; 542 } 543 } 544 break; 545 } 546 547 // Note that we could inflate in quick_enter. 548 // This is likely a useful optimization 549 // Critically, in quick_enter() we must not: 550 // -- perform bias revocation, or 551 // -- block indefinitely, or 552 // -- reach a safepoint 553 554 return false; // revert to slow-path 555 } 556 557 // ----------------------------------------------------------------------------- 558 // Monitor Enter/Exit 559 // The interpreter and compiler assembly code tries to lock using the fast path 560 // of this algorithm. Make sure to update that code if the following function is 561 // changed. The implementation is extremely sensitive to race condition. Be careful. 562 563 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 564 if (UseBiasedLocking) { 565 if (!SafepointSynchronize::is_at_safepoint()) { 566 BiasedLocking::revoke(obj, THREAD); 567 } else { 568 BiasedLocking::revoke_at_safepoint(obj); 569 } 570 } 571 572 markWord mark = obj->mark(); 573 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 574 575 if (mark.is_neutral()) { 576 // Anticipate successful CAS -- the ST of the displaced mark must 577 // be visible <= the ST performed by the CAS. 578 lock->set_displaced_header(mark); 579 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 580 return; 581 } 582 // Fall through to inflate() ... 583 } else if (mark.has_locker() && 584 THREAD->is_lock_owned((address)mark.locker())) { 585 assert(lock != mark.locker(), "must not re-lock the same lock"); 586 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 587 lock->set_displaced_header(markWord::from_pointer(NULL)); 588 return; 589 } 590 591 // The object header will never be displaced to this lock, 592 // so it does not matter what the value is, except that it 593 // must be non-zero to avoid looking like a re-entrant lock, 594 // and must not look locked either. 595 lock->set_displaced_header(markWord::unused_mark()); 596 ObjectMonitorHandle omh; 597 inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); 598 omh.om_ptr()->enter(THREAD); 599 } 600 601 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 602 markWord mark = object->mark(); 603 // We cannot check for Biased Locking if we are racing an inflation. 604 assert(mark == markWord::INFLATING() || 605 !mark.has_bias_pattern(), "should not see bias pattern here"); 606 607 markWord dhw = lock->displaced_header(); 608 if (dhw.value() == 0) { 609 // If the displaced header is NULL, then this exit matches up with 610 // a recursive enter. No real work to do here except for diagnostics. 611 #ifndef PRODUCT 612 if (mark != markWord::INFLATING()) { 613 // Only do diagnostics if we are not racing an inflation. Simply 614 // exiting a recursive enter of a Java Monitor that is being 615 // inflated is safe; see the has_monitor() comment below. 616 assert(!mark.is_neutral(), "invariant"); 617 assert(!mark.has_locker() || 618 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 619 if (mark.has_monitor()) { 620 // The BasicLock's displaced_header is marked as a recursive 621 // enter and we have an inflated Java Monitor (ObjectMonitor). 622 // This is a special case where the Java Monitor was inflated 623 // after this thread entered the stack-lock recursively. When a 624 // Java Monitor is inflated, we cannot safely walk the Java 625 // Monitor owner's stack and update the BasicLocks because a 626 // Java Monitor can be asynchronously inflated by a thread that 627 // does not own the Java Monitor. 628 ObjectMonitor* m = mark.monitor(); 629 assert(((oop)(m->object()))->mark() == mark, "invariant"); 630 assert(m->is_entered(THREAD), "invariant"); 631 } 632 } 633 #endif 634 return; 635 } 636 637 if (mark == markWord::from_pointer(lock)) { 638 // If the object is stack-locked by the current thread, try to 639 // swing the displaced header from the BasicLock back to the mark. 640 assert(dhw.is_neutral(), "invariant"); 641 if (object->cas_set_mark(dhw, mark) == mark) { 642 return; 643 } 644 } 645 646 // We have to take the slow-path of possible inflation and then exit. 647 ObjectMonitorHandle omh; 648 inflate(&omh, THREAD, object, inflate_cause_vm_internal); 649 omh.om_ptr()->exit(true, THREAD); 650 } 651 652 // ----------------------------------------------------------------------------- 653 // Class Loader support to workaround deadlocks on the class loader lock objects 654 // Also used by GC 655 // complete_exit()/reenter() are used to wait on a nested lock 656 // i.e. to give up an outer lock completely and then re-enter 657 // Used when holding nested locks - lock acquisition order: lock1 then lock2 658 // 1) complete_exit lock1 - saving recursion count 659 // 2) wait on lock2 660 // 3) when notified on lock2, unlock lock2 661 // 4) reenter lock1 with original recursion count 662 // 5) lock lock2 663 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 664 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 665 if (UseBiasedLocking) { 666 BiasedLocking::revoke(obj, THREAD); 667 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 668 } 669 670 ObjectMonitorHandle omh; 671 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 672 intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD); 673 return ret_code; 674 } 675 676 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 677 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) { 678 if (UseBiasedLocking) { 679 BiasedLocking::revoke(obj, THREAD); 680 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 681 } 682 683 ObjectMonitorHandle omh; 684 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 685 omh.om_ptr()->reenter(recursions, THREAD); 686 } 687 // ----------------------------------------------------------------------------- 688 // JNI locks on java objects 689 // NOTE: must use heavy weight monitor to handle jni monitor enter 690 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 691 // the current locking is from JNI instead of Java code 692 if (UseBiasedLocking) { 693 BiasedLocking::revoke(obj, THREAD); 694 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 695 } 696 THREAD->set_current_pending_monitor_is_from_java(false); 697 ObjectMonitorHandle omh; 698 inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); 699 omh.om_ptr()->enter(THREAD); 700 THREAD->set_current_pending_monitor_is_from_java(true); 701 } 702 703 // NOTE: must use heavy weight monitor to handle jni monitor exit 704 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 705 if (UseBiasedLocking) { 706 Handle h_obj(THREAD, obj); 707 BiasedLocking::revoke(h_obj, THREAD); 708 obj = h_obj(); 709 } 710 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 711 712 ObjectMonitorHandle omh; 713 inflate(&omh, THREAD, obj, inflate_cause_jni_exit); 714 ObjectMonitor* monitor = omh.om_ptr(); 715 // If this thread has locked the object, exit the monitor. We 716 // intentionally do not use CHECK here because we must exit the 717 // monitor even if an exception is pending. 718 if (monitor->check_owner(THREAD)) { 719 monitor->exit(true, THREAD); 720 } 721 } 722 723 // ----------------------------------------------------------------------------- 724 // Internal VM locks on java objects 725 // standard constructor, allows locking failures 726 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 727 _dolock = do_lock; 728 _thread = thread; 729 _thread->check_for_valid_safepoint_state(); 730 _obj = obj; 731 732 if (_dolock) { 733 ObjectSynchronizer::enter(_obj, &_lock, _thread); 734 } 735 } 736 737 ObjectLocker::~ObjectLocker() { 738 if (_dolock) { 739 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 740 } 741 } 742 743 744 // ----------------------------------------------------------------------------- 745 // Wait/Notify/NotifyAll 746 // NOTE: must use heavy weight monitor to handle wait() 747 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 748 if (UseBiasedLocking) { 749 BiasedLocking::revoke(obj, THREAD); 750 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 751 } 752 if (millis < 0) { 753 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 754 } 755 ObjectMonitorHandle omh; 756 inflate(&omh, THREAD, obj(), inflate_cause_wait); 757 ObjectMonitor* monitor = omh.om_ptr(); 758 759 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 760 monitor->wait(millis, true, THREAD); 761 762 // This dummy call is in place to get around dtrace bug 6254741. Once 763 // that's fixed we can uncomment the following line, remove the call 764 // and change this function back into a "void" func. 765 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 766 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 767 return ret_code; 768 } 769 770 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 771 if (UseBiasedLocking) { 772 BiasedLocking::revoke(obj, THREAD); 773 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 774 } 775 if (millis < 0) { 776 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 777 } 778 ObjectMonitorHandle omh; 779 inflate(&omh, THREAD, obj(), inflate_cause_wait); 780 omh.om_ptr()->wait(millis, false, THREAD); 781 } 782 783 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 784 if (UseBiasedLocking) { 785 BiasedLocking::revoke(obj, THREAD); 786 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 787 } 788 789 markWord mark = obj->mark(); 790 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 791 return; 792 } 793 ObjectMonitorHandle omh; 794 inflate(&omh, THREAD, obj(), inflate_cause_notify); 795 omh.om_ptr()->notify(THREAD); 796 } 797 798 // NOTE: see comment of notify() 799 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 800 if (UseBiasedLocking) { 801 BiasedLocking::revoke(obj, THREAD); 802 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 803 } 804 805 markWord mark = obj->mark(); 806 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 807 return; 808 } 809 ObjectMonitorHandle omh; 810 inflate(&omh, THREAD, obj(), inflate_cause_notify); 811 omh.om_ptr()->notifyAll(THREAD); 812 } 813 814 // ----------------------------------------------------------------------------- 815 // Hash Code handling 816 // 817 // Performance concern: 818 // OrderAccess::storestore() calls release() which at one time stored 0 819 // into the global volatile OrderAccess::dummy variable. This store was 820 // unnecessary for correctness. Many threads storing into a common location 821 // causes considerable cache migration or "sloshing" on large SMP systems. 822 // As such, I avoided using OrderAccess::storestore(). In some cases 823 // OrderAccess::fence() -- which incurs local latency on the executing 824 // processor -- is a better choice as it scales on SMP systems. 825 // 826 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 827 // a discussion of coherency costs. Note that all our current reference 828 // platforms provide strong ST-ST order, so the issue is moot on IA32, 829 // x64, and SPARC. 830 // 831 // As a general policy we use "volatile" to control compiler-based reordering 832 // and explicit fences (barriers) to control for architectural reordering 833 // performed by the CPU(s) or platform. 834 835 struct SharedGlobals { 836 char _pad_prefix[OM_CACHE_LINE_SIZE]; 837 // These are highly shared mostly-read variables. 838 // To avoid false-sharing they need to be the sole occupants of a cache line. 839 volatile int stw_random; 840 volatile int stw_cycle; 841 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 842 // Hot RW variable -- Sequester to avoid false-sharing 843 volatile int hc_sequence; 844 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 845 }; 846 847 static SharedGlobals GVars; 848 static int _forceMonitorScavenge = 0; // Scavenge required and pending 849 850 static markWord read_stable_mark(oop obj) { 851 markWord mark = obj->mark(); 852 if (!mark.is_being_inflated()) { 853 return mark; // normal fast-path return 854 } 855 856 int its = 0; 857 for (;;) { 858 markWord mark = obj->mark(); 859 if (!mark.is_being_inflated()) { 860 return mark; // normal fast-path return 861 } 862 863 // The object is being inflated by some other thread. 864 // The caller of read_stable_mark() must wait for inflation to complete. 865 // Avoid live-lock 866 // TODO: consider calling SafepointSynchronize::do_call_back() while 867 // spinning to see if there's a safepoint pending. If so, immediately 868 // yielding or blocking would be appropriate. Avoid spinning while 869 // there is a safepoint pending. 870 // TODO: add inflation contention performance counters. 871 // TODO: restrict the aggregate number of spinners. 872 873 ++its; 874 if (its > 10000 || !os::is_MP()) { 875 if (its & 1) { 876 os::naked_yield(); 877 } else { 878 // Note that the following code attenuates the livelock problem but is not 879 // a complete remedy. A more complete solution would require that the inflating 880 // thread hold the associated inflation lock. The following code simply restricts 881 // the number of spinners to at most one. We'll have N-2 threads blocked 882 // on the inflationlock, 1 thread holding the inflation lock and using 883 // a yield/park strategy, and 1 thread in the midst of inflation. 884 // A more refined approach would be to change the encoding of INFLATING 885 // to allow encapsulation of a native thread pointer. Threads waiting for 886 // inflation to complete would use CAS to push themselves onto a singly linked 887 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 888 // and calling park(). When inflation was complete the thread that accomplished inflation 889 // would detach the list and set the markword to inflated with a single CAS and 890 // then for each thread on the list, set the flag and unpark() the thread. 891 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 892 // wakes at most one thread whereas we need to wake the entire list. 893 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 894 int YieldThenBlock = 0; 895 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 896 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 897 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 898 while (obj->mark() == markWord::INFLATING()) { 899 // Beware: NakedYield() is advisory and has almost no effect on some platforms 900 // so we periodically call self->_ParkEvent->park(1). 901 // We use a mixed spin/yield/block mechanism. 902 if ((YieldThenBlock++) >= 16) { 903 Thread::current()->_ParkEvent->park(1); 904 } else { 905 os::naked_yield(); 906 } 907 } 908 Thread::muxRelease(gInflationLocks + ix); 909 } 910 } else { 911 SpinPause(); // SMP-polite spinning 912 } 913 } 914 } 915 916 // hashCode() generation : 917 // 918 // Possibilities: 919 // * MD5Digest of {obj,stw_random} 920 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 921 // * A DES- or AES-style SBox[] mechanism 922 // * One of the Phi-based schemes, such as: 923 // 2654435761 = 2^32 * Phi (golden ratio) 924 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 925 // * A variation of Marsaglia's shift-xor RNG scheme. 926 // * (obj ^ stw_random) is appealing, but can result 927 // in undesirable regularity in the hashCode values of adjacent objects 928 // (objects allocated back-to-back, in particular). This could potentially 929 // result in hashtable collisions and reduced hashtable efficiency. 930 // There are simple ways to "diffuse" the middle address bits over the 931 // generated hashCode values: 932 933 static inline intptr_t get_next_hash(Thread* self, oop obj) { 934 intptr_t value = 0; 935 if (hashCode == 0) { 936 // This form uses global Park-Miller RNG. 937 // On MP system we'll have lots of RW access to a global, so the 938 // mechanism induces lots of coherency traffic. 939 value = os::random(); 940 } else if (hashCode == 1) { 941 // This variation has the property of being stable (idempotent) 942 // between STW operations. This can be useful in some of the 1-0 943 // synchronization schemes. 944 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 945 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 946 } else if (hashCode == 2) { 947 value = 1; // for sensitivity testing 948 } else if (hashCode == 3) { 949 value = ++GVars.hc_sequence; 950 } else if (hashCode == 4) { 951 value = cast_from_oop<intptr_t>(obj); 952 } else { 953 // Marsaglia's xor-shift scheme with thread-specific state 954 // This is probably the best overall implementation -- we'll 955 // likely make this the default in future releases. 956 unsigned t = self->_hashStateX; 957 t ^= (t << 11); 958 self->_hashStateX = self->_hashStateY; 959 self->_hashStateY = self->_hashStateZ; 960 self->_hashStateZ = self->_hashStateW; 961 unsigned v = self->_hashStateW; 962 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 963 self->_hashStateW = v; 964 value = v; 965 } 966 967 value &= markWord::hash_mask; 968 if (value == 0) value = 0xBAD; 969 assert(value != markWord::no_hash, "invariant"); 970 return value; 971 } 972 973 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 974 if (UseBiasedLocking) { 975 // NOTE: many places throughout the JVM do not expect a safepoint 976 // to be taken here, in particular most operations on perm gen 977 // objects. However, we only ever bias Java instances and all of 978 // the call sites of identity_hash that might revoke biases have 979 // been checked to make sure they can handle a safepoint. The 980 // added check of the bias pattern is to avoid useless calls to 981 // thread-local storage. 982 if (obj->mark().has_bias_pattern()) { 983 // Handle for oop obj in case of STW safepoint 984 Handle hobj(self, obj); 985 // Relaxing assertion for bug 6320749. 986 assert(Universe::verify_in_progress() || 987 !SafepointSynchronize::is_at_safepoint(), 988 "biases should not be seen by VM thread here"); 989 BiasedLocking::revoke(hobj, JavaThread::current()); 990 obj = hobj(); 991 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 992 } 993 } 994 995 // hashCode() is a heap mutator ... 996 // Relaxing assertion for bug 6320749. 997 assert(Universe::verify_in_progress() || DumpSharedSpaces || 998 !SafepointSynchronize::is_at_safepoint(), "invariant"); 999 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1000 self->is_Java_thread() , "invariant"); 1001 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1002 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 1003 1004 while (true) { 1005 ObjectMonitor* monitor = NULL; 1006 markWord temp, test; 1007 intptr_t hash; 1008 markWord mark = read_stable_mark(obj); 1009 1010 // object should remain ineligible for biased locking 1011 assert(!mark.has_bias_pattern(), "invariant"); 1012 1013 if (mark.is_neutral()) { // if this is a normal header 1014 hash = mark.hash(); 1015 if (hash != 0) { // if it has a hash, just return it 1016 return hash; 1017 } 1018 hash = get_next_hash(self, obj); // get a new hash 1019 temp = mark.copy_set_hash(hash); // merge the hash into header 1020 // try to install the hash 1021 test = obj->cas_set_mark(temp, mark); 1022 if (test == mark) { // if the hash was installed, return it 1023 return hash; 1024 } 1025 // Failed to install the hash. It could be that another thread 1026 // installed the hash just before our attempt or inflation has 1027 // occurred or... so we fall thru to inflate the monitor for 1028 // stability and then install the hash. 1029 } else if (mark.has_monitor()) { 1030 ObjectMonitorHandle omh; 1031 if (!omh.save_om_ptr(obj, mark)) { 1032 // Lost a race with async deflation so try again. 1033 assert(AsyncDeflateIdleMonitors, "sanity check"); 1034 continue; 1035 } 1036 monitor = omh.om_ptr(); 1037 temp = monitor->header(); 1038 // Allow for a lagging install_displaced_markword_in_object() to 1039 // have marked the ObjectMonitor's header/dmw field. 1040 assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()), 1041 "invariant: header=" INTPTR_FORMAT, temp.value()); 1042 hash = temp.hash(); 1043 if (hash != 0) { // if it has a hash, just return it 1044 return hash; 1045 } 1046 // Fall thru so we only have one place that installs the hash in 1047 // the ObjectMonitor. 1048 } else if (self->is_lock_owned((address)mark.locker())) { 1049 // This is a stack lock owned by the calling thread so fetch the 1050 // displaced markWord from the BasicLock on the stack. 1051 temp = mark.displaced_mark_helper(); 1052 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1053 hash = temp.hash(); 1054 if (hash != 0) { // if it has a hash, just return it 1055 return hash; 1056 } 1057 // WARNING: 1058 // The displaced header in the BasicLock on a thread's stack 1059 // is strictly immutable. It CANNOT be changed in ANY cases. 1060 // So we have to inflate the stack lock into an ObjectMonitor 1061 // even if the current thread owns the lock. The BasicLock on 1062 // a thread's stack can be asynchronously read by other threads 1063 // during an inflate() call so any change to that stack memory 1064 // may not propagate to other threads correctly. 1065 } 1066 1067 // Inflate the monitor to set the hash. 1068 ObjectMonitorHandle omh; 1069 inflate(&omh, self, obj, inflate_cause_hash_code); 1070 monitor = omh.om_ptr(); 1071 // Load ObjectMonitor's header/dmw field and see if it has a hash. 1072 mark = monitor->header(); 1073 // Allow for a lagging install_displaced_markword_in_object() to 1074 // have marked the ObjectMonitor's header/dmw field. 1075 assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()), 1076 "invariant: header=" INTPTR_FORMAT, mark.value()); 1077 hash = mark.hash(); 1078 if (hash == 0) { // if it does not have a hash 1079 hash = get_next_hash(self, obj); // get a new hash 1080 temp = mark.copy_set_hash(hash); // merge the hash into header 1081 if (AsyncDeflateIdleMonitors && temp.is_marked()) { 1082 // A lagging install_displaced_markword_in_object() has marked 1083 // the ObjectMonitor's header/dmw field. We clear it to avoid 1084 // any confusion if we are able to set the hash. 1085 temp.set_unmarked(); 1086 } 1087 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1088 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1089 test = markWord(v); 1090 if (test != mark) { 1091 // The attempt to update the ObjectMonitor's header/dmw field 1092 // did not work. This can happen if another thread managed to 1093 // merge in the hash just before our cmpxchg(). With async 1094 // deflation, a lagging install_displaced_markword_in_object() 1095 // could have just marked or just unmarked the header/dmw field. 1096 // If we add any new usages of the header/dmw field, this code 1097 // will need to be updated. 1098 if (AsyncDeflateIdleMonitors) { 1099 // Since async deflation gives us two possible reasons for 1100 // the cmwxchg() to fail, it is easier to simply retry. 1101 continue; 1102 } 1103 hash = test.hash(); 1104 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1105 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1106 } 1107 } 1108 // We finally get the hash. 1109 return hash; 1110 } 1111 } 1112 1113 // Deprecated -- use FastHashCode() instead. 1114 1115 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1116 return FastHashCode(Thread::current(), obj()); 1117 } 1118 1119 1120 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1121 Handle h_obj) { 1122 if (UseBiasedLocking) { 1123 BiasedLocking::revoke(h_obj, thread); 1124 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1125 } 1126 1127 assert(thread == JavaThread::current(), "Can only be called on current thread"); 1128 oop obj = h_obj(); 1129 1130 while (true) { 1131 markWord mark = read_stable_mark(obj); 1132 1133 // Uncontended case, header points to stack 1134 if (mark.has_locker()) { 1135 return thread->is_lock_owned((address)mark.locker()); 1136 } 1137 // Contended case, header points to ObjectMonitor (tagged pointer) 1138 if (mark.has_monitor()) { 1139 ObjectMonitorHandle omh; 1140 if (!omh.save_om_ptr(obj, mark)) { 1141 // Lost a race with async deflation so try again. 1142 assert(AsyncDeflateIdleMonitors, "sanity check"); 1143 continue; 1144 } 1145 bool ret_code = omh.om_ptr()->is_entered(thread) != 0; 1146 return ret_code; 1147 } 1148 // Unlocked case, header in place 1149 assert(mark.is_neutral(), "sanity check"); 1150 return false; 1151 } 1152 } 1153 1154 // Be aware of this method could revoke bias of the lock object. 1155 // This method queries the ownership of the lock handle specified by 'h_obj'. 1156 // If the current thread owns the lock, it returns owner_self. If no 1157 // thread owns the lock, it returns owner_none. Otherwise, it will return 1158 // owner_other. 1159 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1160 (JavaThread *self, Handle h_obj) { 1161 // The caller must beware this method can revoke bias, and 1162 // revocation can result in a safepoint. 1163 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 1164 assert(self->thread_state() != _thread_blocked, "invariant"); 1165 1166 // Possible mark states: neutral, biased, stack-locked, inflated 1167 1168 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 1169 // CASE: biased 1170 BiasedLocking::revoke(h_obj, self); 1171 assert(!h_obj->mark().has_bias_pattern(), 1172 "biases should be revoked by now"); 1173 } 1174 1175 assert(self == JavaThread::current(), "Can only be called on current thread"); 1176 oop obj = h_obj(); 1177 1178 while (true) { 1179 markWord mark = read_stable_mark(obj); 1180 1181 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1182 if (mark.has_locker()) { 1183 return self->is_lock_owned((address)mark.locker()) ? 1184 owner_self : owner_other; 1185 } 1186 1187 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 1188 // The Object:ObjectMonitor relationship is stable as long as we're 1189 // not at a safepoint and AsyncDeflateIdleMonitors is false. 1190 if (mark.has_monitor()) { 1191 ObjectMonitorHandle omh; 1192 if (!omh.save_om_ptr(obj, mark)) { 1193 // Lost a race with async deflation so try again. 1194 assert(AsyncDeflateIdleMonitors, "sanity check"); 1195 continue; 1196 } 1197 ObjectMonitor* monitor = omh.om_ptr(); 1198 void* owner = monitor->_owner; 1199 if (owner == NULL) return owner_none; 1200 return (owner == self || 1201 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1202 } 1203 1204 // CASE: neutral 1205 assert(mark.is_neutral(), "sanity check"); 1206 return owner_none; // it's unlocked 1207 } 1208 } 1209 1210 // FIXME: jvmti should call this 1211 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1212 if (UseBiasedLocking) { 1213 if (SafepointSynchronize::is_at_safepoint()) { 1214 BiasedLocking::revoke_at_safepoint(h_obj); 1215 } else { 1216 BiasedLocking::revoke(h_obj, JavaThread::current()); 1217 } 1218 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1219 } 1220 1221 oop obj = h_obj(); 1222 1223 while (true) { 1224 address owner = NULL; 1225 markWord mark = read_stable_mark(obj); 1226 1227 // Uncontended case, header points to stack 1228 if (mark.has_locker()) { 1229 owner = (address) mark.locker(); 1230 } 1231 1232 // Contended case, header points to ObjectMonitor (tagged pointer) 1233 else if (mark.has_monitor()) { 1234 ObjectMonitorHandle omh; 1235 if (!omh.save_om_ptr(obj, mark)) { 1236 // Lost a race with async deflation so try again. 1237 assert(AsyncDeflateIdleMonitors, "sanity check"); 1238 continue; 1239 } 1240 ObjectMonitor* monitor = omh.om_ptr(); 1241 assert(monitor != NULL, "monitor should be non-null"); 1242 owner = (address) monitor->owner(); 1243 } 1244 1245 if (owner != NULL) { 1246 // owning_thread_from_monitor_owner() may also return NULL here 1247 return Threads::owning_thread_from_monitor_owner(t_list, owner); 1248 } 1249 1250 // Unlocked case, header in place 1251 // Cannot have assertion since this object may have been 1252 // locked by another thread when reaching here. 1253 // assert(mark.is_neutral(), "sanity check"); 1254 1255 return NULL; 1256 } 1257 } 1258 1259 // Visitors ... 1260 1261 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1262 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 1263 while (block != NULL) { 1264 assert(block->object() == CHAINMARKER, "must be a block header"); 1265 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1266 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1267 ObjectMonitorHandle omh; 1268 if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) { 1269 // The ObjectMonitor* is not free and it has been made safe. 1270 if (mid->object() == NULL) { 1271 // Only process with closure if the object is set. 1272 continue; 1273 } 1274 closure->do_monitor(mid); 1275 } 1276 } 1277 // unmarked_next() is not needed with g_block_list (no locking 1278 // used with block linkage _next_om fields). 1279 block = (PaddedObjectMonitor*)block->next_om(); 1280 } 1281 } 1282 1283 static bool monitors_used_above_threshold() { 1284 int population = Atomic::load(&om_list_globals._population); 1285 if (population == 0) { 1286 return false; 1287 } 1288 if (MonitorUsedDeflationThreshold > 0) { 1289 int monitors_used = population - Atomic::load(&om_list_globals._free_count); 1290 if (HandshakeAfterDeflateIdleMonitors) { 1291 monitors_used -= Atomic::load(&om_list_globals._wait_count); 1292 } 1293 int monitor_usage = (monitors_used * 100LL) / population; 1294 return monitor_usage > MonitorUsedDeflationThreshold; 1295 } 1296 return false; 1297 } 1298 1299 // Returns true if MonitorBound is set (> 0) and if the specified 1300 // cnt is > MonitorBound. Otherwise returns false. 1301 static bool is_MonitorBound_exceeded(const int cnt) { 1302 const int mx = MonitorBound; 1303 return mx > 0 && cnt > mx; 1304 } 1305 1306 bool ObjectSynchronizer::is_async_deflation_needed() { 1307 if (!AsyncDeflateIdleMonitors) { 1308 return false; 1309 } 1310 if (is_async_deflation_requested()) { 1311 // Async deflation request. 1312 return true; 1313 } 1314 if (AsyncDeflationInterval > 0 && 1315 time_since_last_async_deflation_ms() > AsyncDeflationInterval && 1316 monitors_used_above_threshold()) { 1317 // It's been longer than our specified deflate interval and there 1318 // are too many monitors in use. We don't deflate more frequently 1319 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1320 // in order to not swamp the ServiceThread. 1321 _last_async_deflation_time_ns = os::javaTimeNanos(); 1322 return true; 1323 } 1324 int monitors_used = Atomic::load(&om_list_globals._population) - 1325 Atomic::load(&om_list_globals._free_count); 1326 if (HandshakeAfterDeflateIdleMonitors) { 1327 monitors_used -= Atomic::load(&om_list_globals._wait_count); 1328 } 1329 if (is_MonitorBound_exceeded(monitors_used)) { 1330 // Not enough ObjectMonitors on the global free list. 1331 return true; 1332 } 1333 return false; 1334 } 1335 1336 bool ObjectSynchronizer::needs_monitor_scavenge() { 1337 if (Atomic::load(&_forceMonitorScavenge) == 1) { 1338 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup."); 1339 return true; 1340 } 1341 return false; 1342 } 1343 1344 bool ObjectSynchronizer::is_safepoint_deflation_needed() { 1345 if (!AsyncDeflateIdleMonitors) { 1346 if (monitors_used_above_threshold()) { 1347 // Too many monitors in use. 1348 return true; 1349 } 1350 return needs_monitor_scavenge(); 1351 } 1352 if (is_special_deflation_requested()) { 1353 // For AsyncDeflateIdleMonitors only do a safepoint deflation 1354 // if there is a special deflation request. 1355 return true; 1356 } 1357 return false; 1358 } 1359 1360 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1361 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); 1362 } 1363 1364 void ObjectSynchronizer::oops_do(OopClosure* f) { 1365 // We only scan the global used list here (for moribund threads), and 1366 // the thread-local monitors in Thread::oops_do(). 1367 global_used_oops_do(f); 1368 } 1369 1370 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1371 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1372 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f); 1373 } 1374 1375 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1376 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1377 list_oops_do(thread->om_in_use_list, f); 1378 } 1379 1380 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1381 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1382 // The oops_do() phase does not overlap with monitor deflation 1383 // so no need to lock ObjectMonitors for the list traversal and 1384 // no need to update the ObjectMonitor's ref_count for this 1385 // ObjectMonitor* use. 1386 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { 1387 if (mid->object() != NULL) { 1388 f->do_oop((oop*)mid->object_addr()); 1389 } 1390 } 1391 } 1392 1393 1394 // ----------------------------------------------------------------------------- 1395 // ObjectMonitor Lifecycle 1396 // ----------------------- 1397 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread 1398 // free list and associates them with objects. Deflation -- which occurs at 1399 // STW-time or asynchronously -- disassociates idle monitors from objects. 1400 // Such scavenged monitors are returned to the om_list_globals._free_list. 1401 // 1402 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1403 // 1404 // Lifecycle: 1405 // -- unassigned and on the om_list_globals._free_list 1406 // -- unassigned and on a per-thread free list 1407 // -- assigned to an object. The object is inflated and the mark refers 1408 // to the ObjectMonitor. 1409 1410 1411 // Constraining monitor pool growth via MonitorBound ... 1412 // 1413 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled. 1414 // 1415 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors): 1416 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1417 // the rate of scavenging is driven primarily by GC. As such, we can find 1418 // an inordinate number of monitors in circulation. 1419 // To avoid that scenario we can artificially induce a STW safepoint 1420 // if the pool appears to be growing past some reasonable bound. 1421 // Generally we favor time in space-time tradeoffs, but as there's no 1422 // natural back-pressure on the # of extant monitors we need to impose some 1423 // type of limit. Beware that if MonitorBound is set to too low a value 1424 // we could just loop. In addition, if MonitorBound is set to a low value 1425 // we'll incur more safepoints, which are harmful to performance. 1426 // See also: GuaranteedSafepointInterval 1427 // 1428 // When safepoint deflation is being used and MonitorBound is set, the 1429 // boundry applies to 1430 // (om_list_globals._population - om_list_globals._free_count) 1431 // i.e., if there are not enough ObjectMonitors on the global free list, 1432 // then a safepoint deflation is induced. Picking a good MonitorBound value 1433 // is non-trivial. 1434 // 1435 // When async deflation is being used: 1436 // The monitor pool is still grow-only. Async deflation is requested 1437 // by a safepoint's cleanup phase or by the ServiceThread at periodic 1438 // intervals when is_async_deflation_needed() returns true. In 1439 // addition to other policies that are checked, if there are not 1440 // enough ObjectMonitors on the global free list, then 1441 // is_async_deflation_needed() will return true. The ServiceThread 1442 // calls deflate_global_idle_monitors_using_JT() and also calls 1443 // deflate_per_thread_idle_monitors_using_JT() as needed. 1444 1445 static void InduceScavenge(Thread* self, const char * Whence) { 1446 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation"); 1447 1448 // Induce STW safepoint to trim monitors 1449 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1450 // More precisely, trigger a cleanup safepoint as the number 1451 // of active monitors passes the specified threshold. 1452 // TODO: assert thread state is reasonable 1453 1454 if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) { 1455 VMThread::check_for_forced_cleanup(); 1456 } 1457 } 1458 1459 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { 1460 // A large MAXPRIVATE value reduces both list lock contention 1461 // and list coherency traffic, but also tends to increase the 1462 // number of ObjectMonitors in circulation as well as the STW 1463 // scavenge costs. As usual, we lean toward time in space-time 1464 // tradeoffs. 1465 const int MAXPRIVATE = 1024; 1466 NoSafepointVerifier nsv; 1467 1468 stringStream ss; 1469 for (;;) { 1470 ObjectMonitor* m; 1471 1472 // 1: try to allocate from the thread's local om_free_list. 1473 // Threads will attempt to allocate first from their local list, then 1474 // from the global list, and only after those attempts fail will the 1475 // thread attempt to instantiate new monitors. Thread-local free lists 1476 // improve allocation latency, as well as reducing coherency traffic 1477 // on the shared global list. 1478 m = take_from_start_of_om_free_list(self); 1479 if (m != NULL) { 1480 guarantee(m->object() == NULL, "invariant"); 1481 m->set_allocation_state(ObjectMonitor::New); 1482 prepend_to_om_in_use_list(self, m); 1483 return m; 1484 } 1485 1486 // 2: try to allocate from the global om_list_globals._free_list 1487 // If we're using thread-local free lists then try 1488 // to reprovision the caller's free list. 1489 if (Atomic::load(&om_list_globals._free_list) != NULL) { 1490 // Reprovision the thread's om_free_list. 1491 // Use bulk transfers to reduce the allocation rate and heat 1492 // on various locks. 1493 for (int i = self->om_free_provision; --i >= 0;) { 1494 ObjectMonitor* take = take_from_start_of_global_free_list(); 1495 if (take == NULL) { 1496 break; // No more are available. 1497 } 1498 guarantee(take->object() == NULL, "invariant"); 1499 if (AsyncDeflateIdleMonitors) { 1500 // We allowed 3 field values to linger during async deflation. 1501 // We clear header and restore ref_count here, but we leave 1502 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor 1503 // enter optimization can no longer race with async deflation 1504 // and reuse. 1505 take->set_header(markWord::zero()); 1506 if (take->ref_count() < 0) { 1507 // Add back max_jint to restore the ref_count field to its 1508 // proper value. 1509 Atomic::add(&take->_ref_count, max_jint); 1510 1511 #ifdef ASSERT 1512 jint l_ref_count = take->ref_count(); 1513 #endif 1514 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", 1515 l_ref_count, take->ref_count()); 1516 } 1517 } 1518 take->Recycle(); 1519 // Since we're taking from the global free-list, take must be Free. 1520 // om_release() also sets the allocation state to Free because it 1521 // is called from other code paths. 1522 assert(take->is_free(), "invariant"); 1523 om_release(self, take, false); 1524 } 1525 self->om_free_provision += 1 + (self->om_free_provision / 2); 1526 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1527 1528 if (!AsyncDeflateIdleMonitors && 1529 is_MonitorBound_exceeded(Atomic::load(&om_list_globals._population) - 1530 Atomic::load(&om_list_globals._free_count))) { 1531 // Not enough ObjectMonitors on the global free list. 1532 // We can't safely induce a STW safepoint from om_alloc() as our thread 1533 // state may not be appropriate for such activities and callers may hold 1534 // naked oops, so instead we defer the action. 1535 InduceScavenge(self, "om_alloc"); 1536 } 1537 continue; 1538 } 1539 1540 // 3: allocate a block of new ObjectMonitors 1541 // Both the local and global free lists are empty -- resort to malloc(). 1542 // In the current implementation ObjectMonitors are TSM - immortal. 1543 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1544 // each ObjectMonitor to start at the beginning of a cache line, 1545 // so we use align_up(). 1546 // A better solution would be to use C++ placement-new. 1547 // BEWARE: As it stands currently, we don't run the ctors! 1548 assert(_BLOCKSIZE > 1, "invariant"); 1549 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1550 PaddedObjectMonitor* temp; 1551 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1552 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); 1553 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1554 (void)memset((void *) temp, 0, neededsize); 1555 1556 // Format the block. 1557 // initialize the linked list, each monitor points to its next 1558 // forming the single linked free list, the very first monitor 1559 // will points to next block, which forms the block list. 1560 // The trick of using the 1st element in the block as g_block_list 1561 // linkage should be reconsidered. A better implementation would 1562 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1563 1564 for (int i = 1; i < _BLOCKSIZE; i++) { 1565 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]); 1566 assert(temp[i].is_free(), "invariant"); 1567 } 1568 1569 // terminate the last monitor as the end of list 1570 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL); 1571 1572 // Element [0] is reserved for global list linkage 1573 temp[0].set_object(CHAINMARKER); 1574 1575 // Consider carving out this thread's current request from the 1576 // block in hand. This avoids some lock traffic and redundant 1577 // list activity. 1578 1579 prepend_block_to_lists(temp); 1580 } 1581 } 1582 1583 // Place "m" on the caller's private per-thread om_free_list. 1584 // In practice there's no need to clamp or limit the number of 1585 // monitors on a thread's om_free_list as the only non-allocation time 1586 // we'll call om_release() is to return a monitor to the free list after 1587 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1588 // accumulate on a thread's free list. 1589 // 1590 // Key constraint: all ObjectMonitors on a thread's free list and the global 1591 // free list must have their object field set to null. This prevents the 1592 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1593 // -- from reclaiming them while we are trying to release them. 1594 1595 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1596 bool from_per_thread_alloc) { 1597 guarantee(m->header().value() == 0, "invariant"); 1598 guarantee(m->object() == NULL, "invariant"); 1599 NoSafepointVerifier nsv; 1600 1601 stringStream ss; 1602 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " 1603 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss), 1604 m->_recursions); 1605 m->set_allocation_state(ObjectMonitor::Free); 1606 // _next_om is used for both per-thread in-use and free lists so 1607 // we have to remove 'm' from the in-use list first (as needed). 1608 if (from_per_thread_alloc) { 1609 // Need to remove 'm' from om_in_use_list. 1610 ObjectMonitor* mid = NULL; 1611 ObjectMonitor* next = NULL; 1612 1613 // This list walk can race with another list walker or with async 1614 // deflation so we have to worry about an ObjectMonitor being 1615 // removed from this list while we are walking it. 1616 1617 // Lock the list head to avoid racing with another list walker 1618 // or with async deflation. 1619 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { 1620 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); 1621 } 1622 next = unmarked_next(mid); 1623 if (m == mid) { 1624 // First special case: 1625 // 'm' matches mid, is the list head and is locked. Switch the list 1626 // head to next which unlocks the list head, but leaves the extracted 1627 // mid locked: 1628 Atomic::store(&self->om_in_use_list, next); 1629 } else if (m == next) { 1630 // Second special case: 1631 // 'm' matches next after the list head and we already have the list 1632 // head locked so set mid to what we are extracting: 1633 mid = next; 1634 // Lock mid to prevent races with a list walker or an async 1635 // deflater thread that's ahead of us. The locked list head 1636 // prevents races from behind us. 1637 om_lock(mid); 1638 // Update next to what follows mid (if anything): 1639 next = unmarked_next(mid); 1640 // Switch next after the list head to new next which unlocks the 1641 // list head, but leaves the extracted mid locked: 1642 self->om_in_use_list->set_next_om(next); 1643 } else { 1644 // We have to search the list to find 'm'. 1645 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT 1646 " is too short.", p2i(self), p2i(self->om_in_use_list)); 1647 // Our starting anchor is next after the list head which is the 1648 // last ObjectMonitor we checked: 1649 ObjectMonitor* anchor = next; 1650 // Lock anchor to prevent races with a list walker or an async 1651 // deflater thread that's ahead of us. The locked list head 1652 // prevents races from behind us. 1653 om_lock(anchor); 1654 om_unlock(mid); // Unlock the list head now that anchor is locked. 1655 while ((mid = unmarked_next(anchor)) != NULL) { 1656 if (m == mid) { 1657 // We found 'm' on the per-thread in-use list so extract it. 1658 // Update next to what follows mid (if anything): 1659 next = unmarked_next(mid); 1660 // Switch next after the anchor to new next which unlocks the 1661 // anchor, but leaves the extracted mid locked: 1662 anchor->set_next_om(next); 1663 break; 1664 } else { 1665 // Lock the next anchor to prevent races with a list walker 1666 // or an async deflater thread that's ahead of us. The locked 1667 // current anchor prevents races from behind us. 1668 om_lock(mid); 1669 // Unlock current anchor now that next anchor is locked: 1670 om_unlock(anchor); 1671 anchor = mid; // Advance to new anchor and try again. 1672 } 1673 } 1674 } 1675 1676 if (mid == NULL) { 1677 // Reached end of the list and didn't find 'm' so: 1678 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list=" 1679 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list)); 1680 } 1681 1682 // At this point mid is disconnected from the in-use list so 1683 // its lock no longer has any effects on the in-use list. 1684 Atomic::dec(&self->om_in_use_count); 1685 // Unlock mid, but leave the next value for any lagging list 1686 // walkers. It will get cleaned up when mid is prepended to 1687 // the thread's free list: 1688 om_unlock(mid); 1689 } 1690 1691 prepend_to_om_free_list(self, m); 1692 guarantee(m->is_free(), "invariant"); 1693 } 1694 1695 // Return ObjectMonitors on a moribund thread's free and in-use 1696 // lists to the appropriate global lists. The ObjectMonitors on the 1697 // per-thread in-use list may still be in use by other threads. 1698 // 1699 // We currently call om_flush() from Threads::remove() before the 1700 // thread has been excised from the thread list and is no longer a 1701 // mutator. This means that om_flush() cannot run concurrently with 1702 // a safepoint and interleave with deflate_idle_monitors(). In 1703 // particular, this ensures that the thread's in-use monitors are 1704 // scanned by a GC safepoint, either via Thread::oops_do() (before 1705 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1706 // om_flush() is called). 1707 // 1708 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1709 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1710 // run at the same time as om_flush() so we have to follow a careful 1711 // protocol to prevent list corruption. 1712 1713 void ObjectSynchronizer::om_flush(Thread* self) { 1714 // Process the per-thread in-use list first to be consistent. 1715 int in_use_count = 0; 1716 ObjectMonitor* in_use_list = NULL; 1717 ObjectMonitor* in_use_tail = NULL; 1718 NoSafepointVerifier nsv; 1719 1720 // This function can race with a list walker or with an async 1721 // deflater thread so we lock the list head to prevent confusion. 1722 // An async deflater thread checks to see if the target thread 1723 // is exiting, but if it has made it past that check before we 1724 // started exiting, then it is racing to get to the in-use list. 1725 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { 1726 // At this point, we have locked the in-use list head so a racing 1727 // thread cannot come in after us. However, a racing thread could 1728 // be ahead of us; we'll detect that and delay to let it finish. 1729 // 1730 // The thread is going away, however the ObjectMonitors on the 1731 // om_in_use_list may still be in-use by other threads. Link 1732 // them to in_use_tail, which will be linked into the global 1733 // in-use list (om_list_globals._in_use_list) below. 1734 // 1735 // Account for the in-use list head before the loop since it is 1736 // already locked (by this thread): 1737 in_use_tail = in_use_list; 1738 in_use_count++; 1739 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) { 1740 if (is_locked(cur_om)) { 1741 // cur_om is locked so there must be a racing walker or async 1742 // deflater thread ahead of us so we'll give it a chance to finish. 1743 while (is_locked(cur_om)) { 1744 os::naked_short_sleep(1); 1745 } 1746 // Refetch the possibly changed next field and try again. 1747 cur_om = unmarked_next(in_use_tail); 1748 continue; 1749 } 1750 if (cur_om->is_free()) { 1751 // cur_om was deflated and the allocation state was changed 1752 // to Free while it was locked. We happened to see it just 1753 // after it was unlocked (and added to the free list). 1754 // Refetch the possibly changed next field and try again. 1755 cur_om = unmarked_next(in_use_tail); 1756 continue; 1757 } 1758 in_use_tail = cur_om; 1759 in_use_count++; 1760 cur_om = unmarked_next(cur_om); 1761 } 1762 guarantee(in_use_tail != NULL, "invariant"); 1763 int l_om_in_use_count = Atomic::load(&self->om_in_use_count); 1764 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: " 1765 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); 1766 Atomic::store(&self->om_in_use_count, 0); 1767 // Clear the in-use list head (which also unlocks it): 1768 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); 1769 om_unlock(in_use_list); 1770 } 1771 1772 int free_count = 0; 1773 ObjectMonitor* free_list = NULL; 1774 ObjectMonitor* free_tail = NULL; 1775 // This function can race with a list walker thread so we lock the 1776 // list head to prevent confusion. 1777 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) { 1778 // At this point, we have locked the free list head so a racing 1779 // thread cannot come in after us. However, a racing thread could 1780 // be ahead of us; we'll detect that and delay to let it finish. 1781 // 1782 // The thread is going away. Set 'free_tail' to the last per-thread free 1783 // monitor which will be linked to om_list_globals._free_list below. 1784 // 1785 // Account for the free list head before the loop since it is 1786 // already locked (by this thread): 1787 free_tail = free_list; 1788 free_count++; 1789 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) { 1790 if (is_locked(s)) { 1791 // s is locked so there must be a racing walker thread ahead 1792 // of us so we'll give it a chance to finish. 1793 while (is_locked(s)) { 1794 os::naked_short_sleep(1); 1795 } 1796 } 1797 free_tail = s; 1798 free_count++; 1799 guarantee(s->object() == NULL, "invariant"); 1800 stringStream ss; 1801 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); 1802 } 1803 guarantee(free_tail != NULL, "invariant"); 1804 int l_om_free_count = Atomic::load(&self->om_free_count); 1805 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " 1806 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); 1807 Atomic::store(&self->om_free_count, 0); 1808 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); 1809 om_unlock(free_list); 1810 } 1811 1812 if (free_tail != NULL) { 1813 prepend_list_to_global_free_list(free_list, free_tail, free_count); 1814 } 1815 1816 if (in_use_tail != NULL) { 1817 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); 1818 } 1819 1820 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1821 LogStreamHandle(Info, monitorinflation) lsh_info; 1822 LogStream* ls = NULL; 1823 if (log_is_enabled(Debug, monitorinflation)) { 1824 ls = &lsh_debug; 1825 } else if ((free_count != 0 || in_use_count != 0) && 1826 log_is_enabled(Info, monitorinflation)) { 1827 ls = &lsh_info; 1828 } 1829 if (ls != NULL) { 1830 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1831 ", in_use_count=%d" ", om_free_provision=%d", 1832 p2i(self), free_count, in_use_count, self->om_free_provision); 1833 } 1834 } 1835 1836 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1837 const oop obj, 1838 ObjectSynchronizer::InflateCause cause) { 1839 assert(event != NULL, "invariant"); 1840 assert(event->should_commit(), "invariant"); 1841 event->set_monitorClass(obj->klass()); 1842 event->set_address((uintptr_t)(void*)obj); 1843 event->set_cause((u1)cause); 1844 event->commit(); 1845 } 1846 1847 // Fast path code shared by multiple functions 1848 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) { 1849 while (true) { 1850 markWord mark = obj->mark(); 1851 if (mark.has_monitor()) { 1852 if (!omh_p->save_om_ptr(obj, mark)) { 1853 // Lost a race with async deflation so try again. 1854 assert(AsyncDeflateIdleMonitors, "sanity check"); 1855 continue; 1856 } 1857 ObjectMonitor* monitor = omh_p->om_ptr(); 1858 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid"); 1859 markWord dmw = monitor->header(); 1860 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1861 return; 1862 } 1863 inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal); 1864 return; 1865 } 1866 } 1867 1868 void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self, 1869 oop object, const InflateCause cause) { 1870 // Inflate mutates the heap ... 1871 // Relaxing assertion for bug 6320749. 1872 assert(Universe::verify_in_progress() || 1873 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1874 1875 EventJavaMonitorInflate event; 1876 1877 for (;;) { 1878 const markWord mark = object->mark(); 1879 assert(!mark.has_bias_pattern(), "invariant"); 1880 1881 // The mark can be in one of the following states: 1882 // * Inflated - just return 1883 // * Stack-locked - coerce it to inflated 1884 // * INFLATING - busy wait for conversion to complete 1885 // * Neutral - aggressively inflate the object. 1886 // * BIASED - Illegal. We should never see this 1887 1888 // CASE: inflated 1889 if (mark.has_monitor()) { 1890 if (!omh_p->save_om_ptr(object, mark)) { 1891 // Lost a race with async deflation so try again. 1892 assert(AsyncDeflateIdleMonitors, "sanity check"); 1893 continue; 1894 } 1895 ObjectMonitor* inf = omh_p->om_ptr(); 1896 markWord dmw = inf->header(); 1897 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1898 assert(inf->object() == object, "invariant"); 1899 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1900 return; 1901 } 1902 1903 // CASE: inflation in progress - inflating over a stack-lock. 1904 // Some other thread is converting from stack-locked to inflated. 1905 // Only that thread can complete inflation -- other threads must wait. 1906 // The INFLATING value is transient. 1907 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1908 // We could always eliminate polling by parking the thread on some auxiliary list. 1909 if (mark == markWord::INFLATING()) { 1910 read_stable_mark(object); 1911 continue; 1912 } 1913 1914 // CASE: stack-locked 1915 // Could be stack-locked either by this thread or by some other thread. 1916 // 1917 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1918 // to install INFLATING into the mark word. We originally installed INFLATING, 1919 // allocated the objectmonitor, and then finally STed the address of the 1920 // objectmonitor into the mark. This was correct, but artificially lengthened 1921 // the interval in which INFLATED appeared in the mark, thus increasing 1922 // the odds of inflation contention. 1923 // 1924 // We now use per-thread private objectmonitor free lists. 1925 // These list are reprovisioned from the global free list outside the 1926 // critical INFLATING...ST interval. A thread can transfer 1927 // multiple objectmonitors en-mass from the global free list to its local free list. 1928 // This reduces coherency traffic and lock contention on the global free list. 1929 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1930 // before or after the CAS(INFLATING) operation. 1931 // See the comments in om_alloc(). 1932 1933 LogStreamHandle(Trace, monitorinflation) lsh; 1934 1935 if (mark.has_locker()) { 1936 ObjectMonitor* m = om_alloc(self); 1937 // Optimistically prepare the objectmonitor - anticipate successful CAS 1938 // We do this before the CAS in order to minimize the length of time 1939 // in which INFLATING appears in the mark. 1940 m->Recycle(); 1941 m->_Responsible = NULL; 1942 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1943 1944 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1945 if (cmp != mark) { 1946 // om_release() will reset the allocation state from New to Free. 1947 om_release(self, m, true); 1948 continue; // Interference -- just retry 1949 } 1950 1951 // We've successfully installed INFLATING (0) into the mark-word. 1952 // This is the only case where 0 will appear in a mark-word. 1953 // Only the singular thread that successfully swings the mark-word 1954 // to 0 can perform (or more precisely, complete) inflation. 1955 // 1956 // Why do we CAS a 0 into the mark-word instead of just CASing the 1957 // mark-word from the stack-locked value directly to the new inflated state? 1958 // Consider what happens when a thread unlocks a stack-locked object. 1959 // It attempts to use CAS to swing the displaced header value from the 1960 // on-stack BasicLock back into the object header. Recall also that the 1961 // header value (hash code, etc) can reside in (a) the object header, or 1962 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1963 // header in an ObjectMonitor. The inflate() routine must copy the header 1964 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1965 // the while preserving the hashCode stability invariants. If the owner 1966 // decides to release the lock while the value is 0, the unlock will fail 1967 // and control will eventually pass from slow_exit() to inflate. The owner 1968 // will then spin, waiting for the 0 value to disappear. Put another way, 1969 // the 0 causes the owner to stall if the owner happens to try to 1970 // drop the lock (restoring the header from the BasicLock to the object) 1971 // while inflation is in-progress. This protocol avoids races that might 1972 // would otherwise permit hashCode values to change or "flicker" for an object. 1973 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1974 // 0 serves as a "BUSY" inflate-in-progress indicator. 1975 1976 1977 // fetch the displaced mark from the owner's stack. 1978 // The owner can't die or unwind past the lock while our INFLATING 1979 // object is in the mark. Furthermore the owner can't complete 1980 // an unlock on the object, either. 1981 markWord dmw = mark.displaced_mark_helper(); 1982 // Catch if the object's header is not neutral (not locked and 1983 // not marked is what we care about here). 1984 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1985 1986 // Setup monitor fields to proper values -- prepare the monitor 1987 m->set_header(dmw); 1988 1989 // Optimization: if the mark.locker stack address is associated 1990 // with this thread we could simply set m->_owner = self. 1991 // Note that a thread can inflate an object 1992 // that it has stack-locked -- as might happen in wait() -- directly 1993 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1994 if (AsyncDeflateIdleMonitors) { 1995 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker()); 1996 } else { 1997 m->set_owner_from(NULL, mark.locker()); 1998 } 1999 m->set_object(object); 2000 // TODO-FIXME: assert BasicLock->dhw != 0. 2001 2002 omh_p->set_om_ptr(m); 2003 2004 // Must preserve store ordering. The monitor state must 2005 // be stable at the time of publishing the monitor address. 2006 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 2007 object->release_set_mark(markWord::encode(m)); 2008 2009 // Once ObjectMonitor is configured and the object is associated 2010 // with the ObjectMonitor, it is safe to allow async deflation: 2011 assert(m->is_new(), "freshly allocated monitor must be new"); 2012 m->set_allocation_state(ObjectMonitor::Old); 2013 2014 // Hopefully the performance counters are allocated on distinct cache lines 2015 // to avoid false sharing on MP systems ... 2016 OM_PERFDATA_OP(Inflations, inc()); 2017 if (log_is_enabled(Trace, monitorinflation)) { 2018 ResourceMark rm(self); 2019 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 2020 INTPTR_FORMAT ", type='%s'", p2i(object), 2021 object->mark().value(), object->klass()->external_name()); 2022 } 2023 if (event.should_commit()) { 2024 post_monitor_inflate_event(&event, object, cause); 2025 } 2026 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 2027 return; 2028 } 2029 2030 // CASE: neutral 2031 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 2032 // If we know we're inflating for entry it's better to inflate by swinging a 2033 // pre-locked ObjectMonitor pointer into the object header. A successful 2034 // CAS inflates the object *and* confers ownership to the inflating thread. 2035 // In the current implementation we use a 2-step mechanism where we CAS() 2036 // to inflate and then CAS() again to try to swing _owner from NULL to self. 2037 // An inflateTry() method that we could call from enter() would be useful. 2038 2039 // Catch if the object's header is not neutral (not locked and 2040 // not marked is what we care about here). 2041 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 2042 ObjectMonitor* m = om_alloc(self); 2043 // prepare m for installation - set monitor to initial state 2044 m->Recycle(); 2045 m->set_header(mark); 2046 // If we leave _owner == DEFLATER_MARKER here, then the simple C2 2047 // ObjectMonitor enter optimization can no longer race with async 2048 // deflation and reuse. 2049 m->set_object(object); 2050 m->_Responsible = NULL; 2051 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 2052 2053 omh_p->set_om_ptr(m); 2054 2055 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 2056 m->set_header(markWord::zero()); 2057 m->set_object(NULL); 2058 m->Recycle(); 2059 omh_p->set_om_ptr(NULL); 2060 // om_release() will reset the allocation state from New to Free. 2061 om_release(self, m, true); 2062 m = NULL; 2063 continue; 2064 // interference - the markword changed - just retry. 2065 // The state-transitions are one-way, so there's no chance of 2066 // live-lock -- "Inflated" is an absorbing state. 2067 } 2068 2069 // Once the ObjectMonitor is configured and object is associated 2070 // with the ObjectMonitor, it is safe to allow async deflation: 2071 assert(m->is_new(), "freshly allocated monitor must be new"); 2072 m->set_allocation_state(ObjectMonitor::Old); 2073 2074 // Hopefully the performance counters are allocated on distinct 2075 // cache lines to avoid false sharing on MP systems ... 2076 OM_PERFDATA_OP(Inflations, inc()); 2077 if (log_is_enabled(Trace, monitorinflation)) { 2078 ResourceMark rm(self); 2079 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 2080 INTPTR_FORMAT ", type='%s'", p2i(object), 2081 object->mark().value(), object->klass()->external_name()); 2082 } 2083 if (event.should_commit()) { 2084 post_monitor_inflate_event(&event, object, cause); 2085 } 2086 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 2087 return; 2088 } 2089 } 2090 2091 2092 // We maintain a list of in-use monitors for each thread. 2093 // 2094 // For safepoint based deflation: 2095 // deflate_thread_local_monitors() scans a single thread's in-use list, while 2096 // deflate_idle_monitors() scans only a global list of in-use monitors which 2097 // is populated only as a thread dies (see om_flush()). 2098 // 2099 // These operations are called at all safepoints, immediately after mutators 2100 // are stopped, but before any objects have moved. Collectively they traverse 2101 // the population of in-use monitors, deflating where possible. The scavenged 2102 // monitors are returned to the global monitor free list. 2103 // 2104 // Beware that we scavenge at *every* stop-the-world point. Having a large 2105 // number of monitors in-use could negatively impact performance. We also want 2106 // to minimize the total # of monitors in circulation, as they incur a small 2107 // footprint penalty. 2108 // 2109 // Perversely, the heap size -- and thus the STW safepoint rate -- 2110 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 2111 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 2112 // This is an unfortunate aspect of this design. 2113 // 2114 // For async deflation: 2115 // If a special deflation request is made, then the safepoint based 2116 // deflation mechanism is used. Otherwise, an async deflation request 2117 // is registered with the ServiceThread and it is notified. 2118 2119 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) { 2120 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2121 2122 // The per-thread in-use lists are handled in 2123 // ParallelSPCleanupThreadClosure::do_thread(). 2124 2125 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { 2126 // Use the older mechanism for the global in-use list or if a 2127 // special deflation has been requested before the safepoint. 2128 ObjectSynchronizer::deflate_idle_monitors(counters); 2129 return; 2130 } 2131 2132 log_debug(monitorinflation)("requesting async deflation of idle monitors."); 2133 // Request deflation of idle monitors by the ServiceThread: 2134 set_is_async_deflation_requested(true); 2135 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 2136 ml.notify_all(); 2137 2138 if (log_is_enabled(Debug, monitorinflation)) { 2139 // exit_globals()'s call to audit_and_print_stats() is done 2140 // at the Info level and not at a safepoint. 2141 // For safepoint based deflation, audit_and_print_stats() is called 2142 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the 2143 // Debug level at a safepoint. 2144 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2145 } 2146 } 2147 2148 // Deflate a single monitor if not in-use 2149 // Return true if deflated, false if in-use 2150 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 2151 ObjectMonitor** free_head_p, 2152 ObjectMonitor** free_tail_p) { 2153 bool deflated; 2154 // Normal case ... The monitor is associated with obj. 2155 const markWord mark = obj->mark(); 2156 guarantee(mark == markWord::encode(mid), "should match: mark=" 2157 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 2158 markWord::encode(mid).value()); 2159 // Make sure that mark.monitor() and markWord::encode() agree: 2160 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 2161 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 2162 const markWord dmw = mid->header(); 2163 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 2164 2165 if (mid->is_busy() || mid->ref_count() != 0) { 2166 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 2167 // is in use so no deflation. 2168 deflated = false; 2169 } else { 2170 // Deflate the monitor if it is no longer being used 2171 // It's idle - scavenge and return to the global free list 2172 // plain old deflation ... 2173 if (log_is_enabled(Trace, monitorinflation)) { 2174 ResourceMark rm; 2175 log_trace(monitorinflation)("deflate_monitor: " 2176 "object=" INTPTR_FORMAT ", mark=" 2177 INTPTR_FORMAT ", type='%s'", p2i(obj), 2178 mark.value(), obj->klass()->external_name()); 2179 } 2180 2181 // Restore the header back to obj 2182 obj->release_set_mark(dmw); 2183 if (AsyncDeflateIdleMonitors) { 2184 // clear() expects the owner field to be NULL and we won't race 2185 // with the simple C2 ObjectMonitor enter optimization since 2186 // we're at a safepoint. DEFLATER_MARKER is the only non-NULL 2187 // value we should see here. 2188 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2189 } 2190 mid->clear(); 2191 2192 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 2193 p2i(mid->object())); 2194 assert(mid->is_free(), "invariant"); 2195 2196 // Move the deflated ObjectMonitor to the working free list 2197 // defined by free_head_p and free_tail_p. 2198 if (*free_head_p == NULL) *free_head_p = mid; 2199 if (*free_tail_p != NULL) { 2200 // We append to the list so the caller can use mid->_next_om 2201 // to fix the linkages in its context. 2202 ObjectMonitor* prevtail = *free_tail_p; 2203 // Should have been cleaned up by the caller: 2204 // Note: Should not have to lock prevtail here since we're at a 2205 // safepoint and ObjectMonitors on the local free list should 2206 // not be accessed in parallel. 2207 #ifdef ASSERT 2208 ObjectMonitor* l_next_om = prevtail->next_om(); 2209 #endif 2210 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2211 prevtail->set_next_om(mid); 2212 } 2213 *free_tail_p = mid; 2214 // At this point, mid->_next_om still refers to its current 2215 // value and another ObjectMonitor's _next_om field still 2216 // refers to this ObjectMonitor. Those linkages have to be 2217 // cleaned up by the caller who has the complete context. 2218 deflated = true; 2219 } 2220 return deflated; 2221 } 2222 2223 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 2224 // Returns true if it was deflated and false otherwise. 2225 // 2226 // The async deflation protocol sets owner to DEFLATER_MARKER and 2227 // makes ref_count negative as signals to contending threads that 2228 // an async deflation is in progress. There are a number of checks 2229 // as part of the protocol to make sure that the calling thread has 2230 // not lost the race to a contending thread or to a thread that just 2231 // wants to use the ObjectMonitor*. 2232 // 2233 // The ObjectMonitor has been successfully async deflated when: 2234 // (owner == DEFLATER_MARKER && ref_count < 0) 2235 // Contending threads or ObjectMonitor* using threads that see those 2236 // values know to retry their operation. 2237 // 2238 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 2239 ObjectMonitor** free_head_p, 2240 ObjectMonitor** free_tail_p) { 2241 assert(AsyncDeflateIdleMonitors, "sanity check"); 2242 assert(Thread::current()->is_Java_thread(), "precondition"); 2243 // A newly allocated ObjectMonitor should not be seen here so we 2244 // avoid an endless inflate/deflate cycle. 2245 assert(mid->is_old(), "must be old: allocation_state=%d", 2246 (int) mid->allocation_state()); 2247 2248 if (mid->is_busy() || mid->ref_count() != 0) { 2249 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 2250 // is in use so no deflation. 2251 return false; 2252 } 2253 2254 if (mid->try_set_owner_from(NULL, DEFLATER_MARKER) == NULL) { 2255 // ObjectMonitor is not owned by another thread. Our setting 2256 // owner to DEFLATER_MARKER forces any contending thread through 2257 // the slow path. This is just the first part of the async 2258 // deflation dance. 2259 2260 if (mid->_contentions != 0 || mid->_waiters != 0) { 2261 // Another thread has raced to enter the ObjectMonitor after 2262 // mid->is_busy() above or has already entered and waited on 2263 // it which makes it busy so no deflation. Restore owner to 2264 // NULL if it is still DEFLATER_MARKER. 2265 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2266 return false; 2267 } 2268 2269 if (Atomic::cmpxchg(&mid->_ref_count, (jint)0, -max_jint) == 0) { 2270 // Make ref_count negative to force any contending threads or 2271 // ObjectMonitor* using threads to retry. This is the second 2272 // part of the async deflation dance. 2273 2274 if (mid->owner_is_DEFLATER_MARKER()) { 2275 // If owner is still DEFLATER_MARKER, then we have successfully 2276 // signaled any contending threads to retry. If it is not, then we 2277 // have lost the race to an entering thread and the ObjectMonitor 2278 // is now busy. This is the third and final part of the async 2279 // deflation dance. 2280 // Note: This owner check solves the ABA problem with ref_count 2281 // where another thread acquired the ObjectMonitor, finished 2282 // using it and restored the ref_count to zero. 2283 2284 // Sanity checks for the races: 2285 guarantee(mid->_contentions == 0, "must be 0: contentions=%d", 2286 mid->_contentions); 2287 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 2288 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 2289 INTPTR_FORMAT, p2i(mid->_cxq)); 2290 guarantee(mid->_EntryList == NULL, 2291 "must be no entering threads: EntryList=" INTPTR_FORMAT, 2292 p2i(mid->_EntryList)); 2293 2294 const oop obj = (oop) mid->object(); 2295 if (log_is_enabled(Trace, monitorinflation)) { 2296 ResourceMark rm; 2297 log_trace(monitorinflation)("deflate_monitor_using_JT: " 2298 "object=" INTPTR_FORMAT ", mark=" 2299 INTPTR_FORMAT ", type='%s'", 2300 p2i(obj), obj->mark().value(), 2301 obj->klass()->external_name()); 2302 } 2303 2304 // Install the old mark word if nobody else has already done it. 2305 mid->install_displaced_markword_in_object(obj); 2306 mid->clear_using_JT(); 2307 2308 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 2309 p2i(mid->object())); 2310 assert(mid->is_free(), "must be free: allocation_state=%d", 2311 (int) mid->allocation_state()); 2312 2313 // Move the deflated ObjectMonitor to the working free list 2314 // defined by free_head_p and free_tail_p. No races on this list 2315 // so no need for load_acquire() or store_release(). 2316 if (*free_head_p == NULL) { 2317 // First one on the list. 2318 *free_head_p = mid; 2319 } 2320 if (*free_tail_p != NULL) { 2321 // We append to the list so the caller can use mid->_next_om 2322 // to fix the linkages in its context. 2323 ObjectMonitor* prevtail = *free_tail_p; 2324 // Should have been cleaned up by the caller: 2325 om_lock(prevtail); 2326 #ifdef ASSERT 2327 ObjectMonitor* l_next_om = unmarked_next(prevtail); 2328 #endif 2329 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2330 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked) 2331 } 2332 *free_tail_p = mid; 2333 2334 // At this point, mid->_next_om still refers to its current 2335 // value and another ObjectMonitor's _next_om field still 2336 // refers to this ObjectMonitor. Those linkages have to be 2337 // cleaned up by the caller who has the complete context. 2338 2339 // We leave owner == DEFLATER_MARKER and ref_count < 0 2340 // to force any racing threads to retry. 2341 return true; // Success, ObjectMonitor has been deflated. 2342 } 2343 2344 // The owner was changed from DEFLATER_MARKER so we lost the 2345 // race since the ObjectMonitor is now busy. 2346 2347 // Add back max_jint to restore the ref_count field to its 2348 // proper value (which may not be what we saw above): 2349 Atomic::add(&mid->_ref_count, max_jint); 2350 2351 #ifdef ASSERT 2352 jint l_ref_count = mid->ref_count(); 2353 #endif 2354 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", 2355 l_ref_count, mid->ref_count()); 2356 return false; 2357 } 2358 2359 // The ref_count was no longer 0 so we lost the race since the 2360 // ObjectMonitor is now busy or the ObjectMonitor* is now is use. 2361 // Restore owner to NULL if it is still DEFLATER_MARKER: 2362 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2363 } 2364 2365 // The owner field is no longer NULL so we lost the race since the 2366 // ObjectMonitor is now busy. 2367 return false; 2368 } 2369 2370 // Walk a given monitor list, and deflate idle monitors. 2371 // The given list could be a per-thread list or a global list. 2372 // 2373 // In the case of parallel processing of thread local monitor lists, 2374 // work is done by Threads::parallel_threads_do() which ensures that 2375 // each Java thread is processed by exactly one worker thread, and 2376 // thus avoid conflicts that would arise when worker threads would 2377 // process the same monitor lists concurrently. 2378 // 2379 // See also ParallelSPCleanupTask and 2380 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 2381 // Threads::parallel_java_threads_do() in thread.cpp. 2382 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 2383 int* count_p, 2384 ObjectMonitor** free_head_p, 2385 ObjectMonitor** free_tail_p) { 2386 ObjectMonitor* cur_mid_in_use = NULL; 2387 ObjectMonitor* mid = NULL; 2388 ObjectMonitor* next = NULL; 2389 int deflated_count = 0; 2390 2391 // This list walk executes at a safepoint and does not race with any 2392 // other list walkers. 2393 2394 for (mid = Atomic::load(list_p); mid != NULL; mid = next) { 2395 next = unmarked_next(mid); 2396 oop obj = (oop) mid->object(); 2397 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 2398 // Deflation succeeded and already updated free_head_p and 2399 // free_tail_p as needed. Finish the move to the local free list 2400 // by unlinking mid from the global or per-thread in-use list. 2401 if (cur_mid_in_use == NULL) { 2402 // mid is the list head so switch the list head to next: 2403 Atomic::store(list_p, next); 2404 } else { 2405 // Switch cur_mid_in_use's next field to next: 2406 cur_mid_in_use->set_next_om(next); 2407 } 2408 // At this point mid is disconnected from the in-use list. 2409 deflated_count++; 2410 Atomic::dec(count_p); 2411 // mid is current tail in the free_head_p list so NULL terminate it: 2412 mid->set_next_om(NULL); 2413 } else { 2414 cur_mid_in_use = mid; 2415 } 2416 } 2417 return deflated_count; 2418 } 2419 2420 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 2421 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 2422 // list could be a per-thread in-use list or the global in-use list. 2423 // If a safepoint has started, then we save state via saved_mid_in_use_p 2424 // and return to the caller to honor the safepoint. 2425 // 2426 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, 2427 int* count_p, 2428 ObjectMonitor** free_head_p, 2429 ObjectMonitor** free_tail_p, 2430 ObjectMonitor** saved_mid_in_use_p) { 2431 assert(AsyncDeflateIdleMonitors, "sanity check"); 2432 JavaThread* self = JavaThread::current(); 2433 2434 ObjectMonitor* cur_mid_in_use = NULL; 2435 ObjectMonitor* mid = NULL; 2436 ObjectMonitor* next = NULL; 2437 ObjectMonitor* next_next = NULL; 2438 int deflated_count = 0; 2439 NoSafepointVerifier nsv; 2440 2441 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go 2442 // protocol because om_release() can do list deletions in parallel; 2443 // this also prevents races with a list walker thread. We also 2444 // lock-next-next-as-we-go to prevent an om_flush() that is behind 2445 // this thread from passing us. 2446 if (*saved_mid_in_use_p == NULL) { 2447 // No saved state so start at the beginning. 2448 // Lock the list head so we can possibly deflate it: 2449 if ((mid = get_list_head_locked(list_p)) == NULL) { 2450 return 0; // The list is empty so nothing to deflate. 2451 } 2452 next = unmarked_next(mid); 2453 } else { 2454 // We're restarting after a safepoint so restore the necessary state 2455 // before we resume. 2456 cur_mid_in_use = *saved_mid_in_use_p; 2457 // Lock cur_mid_in_use so we can possibly update its 2458 // next field to extract a deflated ObjectMonitor. 2459 om_lock(cur_mid_in_use); 2460 mid = unmarked_next(cur_mid_in_use); 2461 if (mid == NULL) { 2462 om_unlock(cur_mid_in_use); 2463 *saved_mid_in_use_p = NULL; 2464 return 0; // The remainder is empty so nothing more to deflate. 2465 } 2466 // Lock mid so we can possibly deflate it: 2467 om_lock(mid); 2468 next = unmarked_next(mid); 2469 } 2470 2471 while (true) { 2472 // The current mid's next field is marked at this point. If we have 2473 // a cur_mid_in_use, then its next field is also marked at this point. 2474 2475 if (next != NULL) { 2476 // We lock next so that an om_flush() thread that is behind us 2477 // cannot pass us when we unlock the current mid. 2478 om_lock(next); 2479 next_next = unmarked_next(next); 2480 } 2481 2482 // Only try to deflate if there is an associated Java object and if 2483 // mid is old (is not newly allocated and is not newly freed). 2484 if (mid->object() != NULL && mid->is_old() && 2485 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { 2486 // Deflation succeeded and already updated free_head_p and 2487 // free_tail_p as needed. Finish the move to the local free list 2488 // by unlinking mid from the global or per-thread in-use list. 2489 if (cur_mid_in_use == NULL) { 2490 // mid is the list head and it is locked. Switch the list head 2491 // to next which is also locked (if not NULL) and also leave 2492 // mid locked: 2493 Atomic::store(list_p, next); 2494 } else { 2495 ObjectMonitor* locked_next = mark_om_ptr(next); 2496 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's 2497 // next field to locked_next and also leave mid locked: 2498 cur_mid_in_use->set_next_om(locked_next); 2499 } 2500 // At this point mid is disconnected from the in-use list so 2501 // its lock longer has any effects on in-use list. 2502 deflated_count++; 2503 Atomic::dec(count_p); 2504 // mid is current tail in the free_head_p list so NULL terminate it 2505 // (which also unlocks it): 2506 mid->set_next_om(NULL); 2507 2508 // All the list management is done so move on to the next one: 2509 mid = next; // mid keeps non-NULL next's locked state 2510 next = next_next; 2511 } else { 2512 // mid is considered in-use if it does not have an associated 2513 // Java object or mid is not old or deflation did not succeed. 2514 // A mid->is_new() node can be seen here when it is freshly 2515 // returned by om_alloc() (and skips the deflation code path). 2516 // A mid->is_old() node can be seen here when deflation failed. 2517 // A mid->is_free() node can be seen here when a fresh node from 2518 // om_alloc() is released by om_release() due to losing the race 2519 // in inflate(). 2520 2521 // All the list management is done so move on to the next one: 2522 if (cur_mid_in_use != NULL) { 2523 om_unlock(cur_mid_in_use); 2524 } 2525 // The next cur_mid_in_use keeps mid's lock state so 2526 // that it is stable for a possible next field change. It 2527 // cannot be modified by om_release() while it is locked. 2528 cur_mid_in_use = mid; 2529 mid = next; // mid keeps non-NULL next's locked state 2530 next = next_next; 2531 2532 if (SafepointMechanism::should_block(self) && 2533 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { 2534 // If a safepoint has started and cur_mid_in_use is not the list 2535 // head and is old, then it is safe to use as saved state. Return 2536 // to the caller before blocking. 2537 *saved_mid_in_use_p = cur_mid_in_use; 2538 om_unlock(cur_mid_in_use); 2539 if (mid != NULL) { 2540 om_unlock(mid); 2541 } 2542 return deflated_count; 2543 } 2544 } 2545 if (mid == NULL) { 2546 if (cur_mid_in_use != NULL) { 2547 om_unlock(cur_mid_in_use); 2548 } 2549 break; // Reached end of the list so nothing more to deflate. 2550 } 2551 2552 // The current mid's next field is locked at this point. If we have 2553 // a cur_mid_in_use, then it is also locked at this point. 2554 } 2555 // We finished the list without a safepoint starting so there's 2556 // no need to save state. 2557 *saved_mid_in_use_p = NULL; 2558 return deflated_count; 2559 } 2560 2561 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2562 counters->n_in_use = 0; // currently associated with objects 2563 counters->n_in_circulation = 0; // extant 2564 counters->n_scavenged = 0; // reclaimed (global and per-thread) 2565 counters->per_thread_scavenged = 0; // per-thread scavenge total 2566 counters->per_thread_times = 0.0; // per-thread scavenge times 2567 } 2568 2569 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2570 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2571 2572 if (AsyncDeflateIdleMonitors) { 2573 // Nothing to do when global idle ObjectMonitors are deflated using 2574 // a JavaThread unless a special deflation has been requested. 2575 if (!is_special_deflation_requested()) { 2576 return; 2577 } 2578 } 2579 2580 bool deflated = false; 2581 2582 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2583 ObjectMonitor* free_tail_p = NULL; 2584 elapsedTimer timer; 2585 2586 if (log_is_enabled(Info, monitorinflation)) { 2587 timer.start(); 2588 } 2589 2590 // Note: the thread-local monitors lists get deflated in 2591 // a separate pass. See deflate_thread_local_monitors(). 2592 2593 // For moribund threads, scan om_list_globals._in_use_list 2594 int deflated_count = 0; 2595 if (Atomic::load(&om_list_globals._in_use_list) != NULL) { 2596 // Update n_in_circulation before om_list_globals._in_use_count is 2597 // updated by deflation. 2598 Atomic::add(&counters->n_in_circulation, 2599 Atomic::load(&om_list_globals._in_use_count)); 2600 2601 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list, 2602 &om_list_globals._in_use_count, 2603 &free_head_p, &free_tail_p); 2604 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count)); 2605 } 2606 2607 if (free_head_p != NULL) { 2608 // Move the deflated ObjectMonitors back to the global free list. 2609 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2610 #ifdef ASSERT 2611 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2612 #endif 2613 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2614 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2615 Atomic::add(&counters->n_scavenged, deflated_count); 2616 } 2617 timer.stop(); 2618 2619 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2620 LogStreamHandle(Info, monitorinflation) lsh_info; 2621 LogStream* ls = NULL; 2622 if (log_is_enabled(Debug, monitorinflation)) { 2623 ls = &lsh_debug; 2624 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2625 ls = &lsh_info; 2626 } 2627 if (ls != NULL) { 2628 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2629 } 2630 } 2631 2632 class HandshakeForDeflation : public HandshakeClosure { 2633 public: 2634 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {} 2635 2636 void do_thread(Thread* thread) { 2637 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" 2638 INTPTR_FORMAT, p2i(thread)); 2639 } 2640 }; 2641 2642 void ObjectSynchronizer::deflate_idle_monitors_using_JT() { 2643 assert(AsyncDeflateIdleMonitors, "sanity check"); 2644 2645 // Deflate any global idle monitors. 2646 deflate_global_idle_monitors_using_JT(); 2647 2648 int count = 0; 2649 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2650 if (Atomic::load(&jt->om_in_use_count) > 0 && !jt->is_exiting()) { 2651 // This JavaThread is using ObjectMonitors so deflate any that 2652 // are idle unless this JavaThread is exiting; do not race with 2653 // ObjectSynchronizer::om_flush(). 2654 deflate_per_thread_idle_monitors_using_JT(jt); 2655 count++; 2656 } 2657 } 2658 if (count > 0) { 2659 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); 2660 } 2661 2662 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, " 2663 "global_free_count=%d, global_wait_count=%d", 2664 Atomic::load(&om_list_globals._population), 2665 Atomic::load(&om_list_globals._in_use_count), 2666 Atomic::load(&om_list_globals._free_count), 2667 Atomic::load(&om_list_globals._wait_count)); 2668 2669 // The ServiceThread's async deflation request has been processed. 2670 set_is_async_deflation_requested(false); 2671 2672 if (HandshakeAfterDeflateIdleMonitors && 2673 Atomic::load(&om_list_globals._wait_count) > 0) { 2674 // There are deflated ObjectMonitors waiting for a handshake 2675 // (or a safepoint) for safety. 2676 2677 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list); 2678 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL"); 2679 int count = Atomic::load(&om_list_globals._wait_count); 2680 Atomic::store(&om_list_globals._wait_count, 0); 2681 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL); 2682 2683 // Find the tail for prepend_list_to_common(). No need to mark 2684 // ObjectMonitors for this list walk since only the deflater 2685 // thread manages the wait list. 2686 int l_count = 0; 2687 ObjectMonitor* tail = NULL; 2688 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { 2689 tail = n; 2690 l_count++; 2691 } 2692 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count); 2693 2694 // Will execute a safepoint if !ThreadLocalHandshakes: 2695 HandshakeForDeflation hfd_hc; 2696 Handshake::execute(&hfd_hc); 2697 2698 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 2699 &om_list_globals._free_count); 2700 2701 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); 2702 } 2703 } 2704 2705 // Deflate global idle ObjectMonitors using a JavaThread. 2706 // 2707 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2708 assert(AsyncDeflateIdleMonitors, "sanity check"); 2709 assert(Thread::current()->is_Java_thread(), "precondition"); 2710 JavaThread* self = JavaThread::current(); 2711 2712 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2713 } 2714 2715 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread. 2716 // 2717 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) { 2718 assert(AsyncDeflateIdleMonitors, "sanity check"); 2719 assert(Thread::current()->is_Java_thread(), "precondition"); 2720 2721 deflate_common_idle_monitors_using_JT(false /* !is_global */, target); 2722 } 2723 2724 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2725 // 2726 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) { 2727 JavaThread* self = JavaThread::current(); 2728 2729 int deflated_count = 0; 2730 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors 2731 ObjectMonitor* free_tail_p = NULL; 2732 ObjectMonitor* saved_mid_in_use_p = NULL; 2733 elapsedTimer timer; 2734 2735 if (log_is_enabled(Info, monitorinflation)) { 2736 timer.start(); 2737 } 2738 2739 if (is_global) { 2740 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count))); 2741 } else { 2742 OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count))); 2743 } 2744 2745 do { 2746 int local_deflated_count; 2747 if (is_global) { 2748 local_deflated_count = 2749 deflate_monitor_list_using_JT(&om_list_globals._in_use_list, 2750 &om_list_globals._in_use_count, 2751 &free_head_p, &free_tail_p, 2752 &saved_mid_in_use_p); 2753 } else { 2754 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); 2755 } 2756 deflated_count += local_deflated_count; 2757 2758 if (free_head_p != NULL) { 2759 // Move the deflated ObjectMonitors to the global free list. 2760 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); 2761 // Note: The target thread can be doing an om_alloc() that 2762 // is trying to prepend an ObjectMonitor on its in-use list 2763 // at the same time that we have deflated the current in-use 2764 // list head and put it on the local free list. prepend_to_common() 2765 // will detect the race and retry which avoids list corruption, 2766 // but the next field in free_tail_p can flicker to marked 2767 // and then unmarked while prepend_to_common() is sorting it 2768 // all out. 2769 #ifdef ASSERT 2770 ObjectMonitor* l_next_om = unmarked_next(free_tail_p); 2771 #endif 2772 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2773 2774 if (HandshakeAfterDeflateIdleMonitors) { 2775 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count); 2776 } else { 2777 prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count); 2778 } 2779 2780 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2781 } 2782 2783 if (saved_mid_in_use_p != NULL) { 2784 // deflate_monitor_list_using_JT() detected a safepoint starting. 2785 timer.stop(); 2786 { 2787 if (is_global) { 2788 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2789 } else { 2790 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); 2791 } 2792 assert(SafepointMechanism::should_block(self), "sanity check"); 2793 ThreadBlockInVM blocker(self); 2794 } 2795 // Prepare for another loop after the safepoint. 2796 free_head_p = NULL; 2797 free_tail_p = NULL; 2798 if (log_is_enabled(Info, monitorinflation)) { 2799 timer.start(); 2800 } 2801 } 2802 } while (saved_mid_in_use_p != NULL); 2803 timer.stop(); 2804 2805 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2806 LogStreamHandle(Info, monitorinflation) lsh_info; 2807 LogStream* ls = NULL; 2808 if (log_is_enabled(Debug, monitorinflation)) { 2809 ls = &lsh_debug; 2810 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2811 ls = &lsh_info; 2812 } 2813 if (ls != NULL) { 2814 if (is_global) { 2815 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2816 } else { 2817 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count); 2818 } 2819 } 2820 } 2821 2822 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2823 // Report the cumulative time for deflating each thread's idle 2824 // monitors. Note: if the work is split among more than one 2825 // worker thread, then the reported time will likely be more 2826 // than a beginning to end measurement of the phase. 2827 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 2828 2829 bool needs_special_deflation = is_special_deflation_requested(); 2830 if (AsyncDeflateIdleMonitors && !needs_special_deflation) { 2831 // Nothing to do when idle ObjectMonitors are deflated using 2832 // a JavaThread unless a special deflation has been requested. 2833 return; 2834 } 2835 2836 if (log_is_enabled(Debug, monitorinflation)) { 2837 // exit_globals()'s call to audit_and_print_stats() is done 2838 // at the Info level and not at a safepoint. 2839 // For async deflation, audit_and_print_stats() is called in 2840 // ObjectSynchronizer::do_safepoint_work() at the Debug level 2841 // at a safepoint. 2842 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2843 } else if (log_is_enabled(Info, monitorinflation)) { 2844 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " 2845 "global_free_count=%d, global_wait_count=%d", 2846 Atomic::load(&om_list_globals._population), 2847 Atomic::load(&om_list_globals._in_use_count), 2848 Atomic::load(&om_list_globals._free_count), 2849 Atomic::load(&om_list_globals._wait_count)); 2850 } 2851 2852 Atomic::store(&_forceMonitorScavenge, 0); // Reset 2853 2854 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 2855 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 2856 2857 GVars.stw_random = os::random(); 2858 GVars.stw_cycle++; 2859 2860 if (needs_special_deflation) { 2861 set_is_special_deflation_requested(false); // special deflation is done 2862 } 2863 } 2864 2865 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2866 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2867 2868 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) { 2869 // Nothing to do if a special deflation has NOT been requested. 2870 return; 2871 } 2872 2873 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2874 ObjectMonitor* free_tail_p = NULL; 2875 elapsedTimer timer; 2876 2877 if (log_is_enabled(Info, safepoint, cleanup) || 2878 log_is_enabled(Info, monitorinflation)) { 2879 timer.start(); 2880 } 2881 2882 // Update n_in_circulation before om_in_use_count is updated by deflation. 2883 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count)); 2884 2885 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 2886 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count)); 2887 2888 if (free_head_p != NULL) { 2889 // Move the deflated ObjectMonitors back to the global free list. 2890 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2891 #ifdef ASSERT 2892 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2893 #endif 2894 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2895 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2896 Atomic::add(&counters->n_scavenged, deflated_count); 2897 Atomic::add(&counters->per_thread_scavenged, deflated_count); 2898 } 2899 2900 timer.stop(); 2901 counters->per_thread_times += timer.seconds(); 2902 2903 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2904 LogStreamHandle(Info, monitorinflation) lsh_info; 2905 LogStream* ls = NULL; 2906 if (log_is_enabled(Debug, monitorinflation)) { 2907 ls = &lsh_debug; 2908 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2909 ls = &lsh_info; 2910 } 2911 if (ls != NULL) { 2912 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2913 } 2914 } 2915 2916 // Monitor cleanup on JavaThread::exit 2917 2918 // Iterate through monitor cache and attempt to release thread's monitors 2919 // Gives up on a particular monitor if an exception occurs, but continues 2920 // the overall iteration, swallowing the exception. 2921 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2922 private: 2923 TRAPS; 2924 2925 public: 2926 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2927 void do_monitor(ObjectMonitor* mid) { 2928 if (mid->owner() == THREAD) { 2929 (void)mid->complete_exit(CHECK); 2930 } 2931 } 2932 }; 2933 2934 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2935 // ignored. This is meant to be called during JNI thread detach which assumes 2936 // all remaining monitors are heavyweight. All exceptions are swallowed. 2937 // Scanning the extant monitor list can be time consuming. 2938 // A simple optimization is to add a per-thread flag that indicates a thread 2939 // called jni_monitorenter() during its lifetime. 2940 // 2941 // Instead of No_Savepoint_Verifier it might be cheaper to 2942 // use an idiom of the form: 2943 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2944 // <code that must not run at safepoint> 2945 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2946 // Since the tests are extremely cheap we could leave them enabled 2947 // for normal product builds. 2948 2949 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2950 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2951 NoSafepointVerifier nsv; 2952 ReleaseJavaMonitorsClosure rjmc(THREAD); 2953 ObjectSynchronizer::monitors_iterate(&rjmc); 2954 THREAD->clear_pending_exception(); 2955 } 2956 2957 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2958 switch (cause) { 2959 case inflate_cause_vm_internal: return "VM Internal"; 2960 case inflate_cause_monitor_enter: return "Monitor Enter"; 2961 case inflate_cause_wait: return "Monitor Wait"; 2962 case inflate_cause_notify: return "Monitor Notify"; 2963 case inflate_cause_hash_code: return "Monitor Hash Code"; 2964 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2965 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2966 default: 2967 ShouldNotReachHere(); 2968 } 2969 return "Unknown"; 2970 } 2971 2972 //------------------------------------------------------------------------------ 2973 // Debugging code 2974 2975 u_char* ObjectSynchronizer::get_gvars_addr() { 2976 return (u_char*)&GVars; 2977 } 2978 2979 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2980 return (u_char*)&GVars.hc_sequence; 2981 } 2982 2983 size_t ObjectSynchronizer::get_gvars_size() { 2984 return sizeof(SharedGlobals); 2985 } 2986 2987 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2988 return (u_char*)&GVars.stw_random; 2989 } 2990 2991 // This function can be called at a safepoint or it can be called when 2992 // we are trying to exit the VM. When we are trying to exit the VM, the 2993 // list walker functions can run in parallel with the other list 2994 // operations so spin-locking is used for safety. 2995 // 2996 // Calls to this function can be added in various places as a debugging 2997 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor 2998 // details logged at the Info level and 'false' for the 'on_exit' 2999 // parameter to have in-use monitor details logged at the Trace level. 3000 // 3001 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 3002 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 3003 3004 LogStreamHandle(Debug, monitorinflation) lsh_debug; 3005 LogStreamHandle(Info, monitorinflation) lsh_info; 3006 LogStreamHandle(Trace, monitorinflation) lsh_trace; 3007 LogStream* ls = NULL; 3008 if (log_is_enabled(Trace, monitorinflation)) { 3009 ls = &lsh_trace; 3010 } else if (log_is_enabled(Debug, monitorinflation)) { 3011 ls = &lsh_debug; 3012 } else if (log_is_enabled(Info, monitorinflation)) { 3013 ls = &lsh_info; 3014 } 3015 assert(ls != NULL, "sanity check"); 3016 3017 // Log counts for the global and per-thread monitor lists: 3018 int chk_om_population = log_monitor_list_counts(ls); 3019 int error_cnt = 0; 3020 3021 ls->print_cr("Checking global lists:"); 3022 3023 // Check om_list_globals._population: 3024 if (Atomic::load(&om_list_globals._population) == chk_om_population) { 3025 ls->print_cr("global_population=%d equals chk_om_population=%d", 3026 Atomic::load(&om_list_globals._population), chk_om_population); 3027 } else { 3028 // With fine grained locks on the monitor lists, it is possible for 3029 // log_monitor_list_counts() to return a value that doesn't match 3030 // om_list_globals._population. So far a higher value has been 3031 // seen in testing so something is being double counted by 3032 // log_monitor_list_counts(). 3033 ls->print_cr("WARNING: global_population=%d is not equal to " 3034 "chk_om_population=%d", 3035 Atomic::load(&om_list_globals._population), chk_om_population); 3036 } 3037 3038 // Check om_list_globals._in_use_list and om_list_globals._in_use_count: 3039 chk_global_in_use_list_and_count(ls, &error_cnt); 3040 3041 // Check om_list_globals._free_list and om_list_globals._free_count: 3042 chk_global_free_list_and_count(ls, &error_cnt); 3043 3044 if (HandshakeAfterDeflateIdleMonitors) { 3045 // Check om_list_globals._wait_list and om_list_globals._wait_count: 3046 chk_global_wait_list_and_count(ls, &error_cnt); 3047 } 3048 3049 ls->print_cr("Checking per-thread lists:"); 3050 3051 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3052 // Check om_in_use_list and om_in_use_count: 3053 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 3054 3055 // Check om_free_list and om_free_count: 3056 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 3057 } 3058 3059 if (error_cnt == 0) { 3060 ls->print_cr("No errors found in monitor list checks."); 3061 } else { 3062 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 3063 } 3064 3065 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 3066 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 3067 // When exiting this log output is at the Info level. When called 3068 // at a safepoint, this log output is at the Trace level since 3069 // there can be a lot of it. 3070 log_in_use_monitor_details(ls); 3071 } 3072 3073 ls->flush(); 3074 3075 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 3076 } 3077 3078 // Check a free monitor entry; log any errors. 3079 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 3080 outputStream * out, int *error_cnt_p) { 3081 stringStream ss; 3082 if (n->is_busy()) { 3083 if (jt != NULL) { 3084 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3085 ": free per-thread monitor must not be busy: %s", p2i(jt), 3086 p2i(n), n->is_busy_to_string(&ss)); 3087 } else { 3088 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3089 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 3090 } 3091 *error_cnt_p = *error_cnt_p + 1; 3092 } 3093 if (n->header().value() != 0) { 3094 if (jt != NULL) { 3095 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3096 ": free per-thread monitor must have NULL _header " 3097 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 3098 n->header().value()); 3099 *error_cnt_p = *error_cnt_p + 1; 3100 } else if (!AsyncDeflateIdleMonitors) { 3101 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3102 "must have NULL _header field: _header=" INTPTR_FORMAT, 3103 p2i(n), n->header().value()); 3104 *error_cnt_p = *error_cnt_p + 1; 3105 } 3106 } 3107 if (n->object() != NULL) { 3108 if (jt != NULL) { 3109 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3110 ": free per-thread monitor must have NULL _object " 3111 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 3112 p2i(n->object())); 3113 } else { 3114 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3115 "must have NULL _object field: _object=" INTPTR_FORMAT, 3116 p2i(n), p2i(n->object())); 3117 } 3118 *error_cnt_p = *error_cnt_p + 1; 3119 } 3120 } 3121 3122 // Lock the next ObjectMonitor for traversal and unlock the current 3123 // ObjectMonitor. Returns the next ObjectMonitor if there is one. 3124 // Otherwise returns NULL (after unlocking the current ObjectMonitor). 3125 // This function is used by the various list walker functions to 3126 // safely walk a list without allowing an ObjectMonitor to be moved 3127 // to another list in the middle of a walk. 3128 static ObjectMonitor* lock_next_for_traversal(ObjectMonitor* cur) { 3129 assert(is_locked(cur), "cur=" INTPTR_FORMAT " must be locked", p2i(cur)); 3130 ObjectMonitor* next = unmarked_next(cur); 3131 if (next == NULL) { // Reached the end of the list. 3132 om_unlock(cur); 3133 return NULL; 3134 } 3135 om_lock(next); // Lock next before unlocking current to keep 3136 om_unlock(cur); // from being by-passed by another thread. 3137 return next; 3138 } 3139 3140 // Check the global free list and count; log the results of the checks. 3141 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 3142 int *error_cnt_p) { 3143 int chk_om_free_count = 0; 3144 ObjectMonitor* cur = NULL; 3145 if ((cur = get_list_head_locked(&om_list_globals._free_list)) != NULL) { 3146 // Marked the global free list head so process the list. 3147 while (true) { 3148 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3149 chk_om_free_count++; 3150 3151 cur = lock_next_for_traversal(cur); 3152 if (cur == NULL) { 3153 break; 3154 } 3155 } 3156 } 3157 int l_free_count = Atomic::load(&om_list_globals._free_count); 3158 if (l_free_count == chk_om_free_count) { 3159 out->print_cr("global_free_count=%d equals chk_om_free_count=%d", 3160 l_free_count, chk_om_free_count); 3161 } else { 3162 // With fine grained locks on om_list_globals._free_list, it 3163 // is possible for an ObjectMonitor to be prepended to 3164 // om_list_globals._free_list after we started calculating 3165 // chk_om_free_count so om_list_globals._free_count may not 3166 // match anymore. 3167 out->print_cr("WARNING: global_free_count=%d is not equal to " 3168 "chk_om_free_count=%d", l_free_count, chk_om_free_count); 3169 } 3170 } 3171 3172 // Check the global wait list and count; log the results of the checks. 3173 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, 3174 int *error_cnt_p) { 3175 int chk_om_wait_count = 0; 3176 ObjectMonitor* cur = NULL; 3177 if ((cur = get_list_head_locked(&om_list_globals._wait_list)) != NULL) { 3178 // Marked the global wait list head so process the list. 3179 while (true) { 3180 // Rules for om_list_globals._wait_list are the same as for 3181 // om_list_globals._free_list: 3182 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3183 chk_om_wait_count++; 3184 3185 cur = lock_next_for_traversal(cur); 3186 if (cur == NULL) { 3187 break; 3188 } 3189 } 3190 } 3191 if (Atomic::load(&om_list_globals._wait_count) == chk_om_wait_count) { 3192 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d", 3193 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3194 } else { 3195 out->print_cr("ERROR: global_wait_count=%d is not equal to " 3196 "chk_om_wait_count=%d", 3197 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3198 *error_cnt_p = *error_cnt_p + 1; 3199 } 3200 } 3201 3202 // Check the global in-use list and count; log the results of the checks. 3203 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 3204 int *error_cnt_p) { 3205 int chk_om_in_use_count = 0; 3206 ObjectMonitor* cur = NULL; 3207 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3208 // Marked the global in-use list head so process the list. 3209 while (true) { 3210 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); 3211 chk_om_in_use_count++; 3212 3213 cur = lock_next_for_traversal(cur); 3214 if (cur == NULL) { 3215 break; 3216 } 3217 } 3218 } 3219 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3220 if (l_in_use_count == chk_om_in_use_count) { 3221 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", 3222 l_in_use_count, chk_om_in_use_count); 3223 } else { 3224 // With fine grained locks on the monitor lists, it is possible for 3225 // an exiting JavaThread to put its in-use ObjectMonitors on the 3226 // global in-use list after chk_om_in_use_count is calculated above. 3227 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", 3228 l_in_use_count, chk_om_in_use_count); 3229 } 3230 } 3231 3232 // Check an in-use monitor entry; log any errors. 3233 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 3234 outputStream * out, int *error_cnt_p) { 3235 if (n->header().value() == 0) { 3236 if (jt != NULL) { 3237 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3238 ": in-use per-thread monitor must have non-NULL _header " 3239 "field.", p2i(jt), p2i(n)); 3240 } else { 3241 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3242 "must have non-NULL _header field.", p2i(n)); 3243 } 3244 *error_cnt_p = *error_cnt_p + 1; 3245 } 3246 if (n->object() == NULL) { 3247 if (jt != NULL) { 3248 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3249 ": in-use per-thread monitor must have non-NULL _object " 3250 "field.", p2i(jt), p2i(n)); 3251 } else { 3252 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3253 "must have non-NULL _object field.", p2i(n)); 3254 } 3255 *error_cnt_p = *error_cnt_p + 1; 3256 } 3257 const oop obj = (oop)n->object(); 3258 const markWord mark = obj->mark(); 3259 if (!mark.has_monitor()) { 3260 if (jt != NULL) { 3261 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3262 ": in-use per-thread monitor's object does not think " 3263 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 3264 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 3265 } else { 3266 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3267 "monitor's object does not think it has a monitor: obj=" 3268 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 3269 p2i(obj), mark.value()); 3270 } 3271 *error_cnt_p = *error_cnt_p + 1; 3272 } 3273 ObjectMonitor* const obj_mon = mark.monitor(); 3274 if (n != obj_mon) { 3275 if (jt != NULL) { 3276 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3277 ": in-use per-thread monitor's object does not refer " 3278 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 3279 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 3280 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3281 } else { 3282 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3283 "monitor's object does not refer to the same monitor: obj=" 3284 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 3285 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3286 } 3287 *error_cnt_p = *error_cnt_p + 1; 3288 } 3289 } 3290 3291 // Check the thread's free list and count; log the results of the checks. 3292 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 3293 outputStream * out, 3294 int *error_cnt_p) { 3295 int chk_om_free_count = 0; 3296 ObjectMonitor* cur = NULL; 3297 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { 3298 // Marked the per-thread free list head so process the list. 3299 while (true) { 3300 chk_free_entry(jt, cur, out, error_cnt_p); 3301 chk_om_free_count++; 3302 3303 cur = lock_next_for_traversal(cur); 3304 if (cur == NULL) { 3305 break; 3306 } 3307 } 3308 } 3309 int l_om_free_count = Atomic::load(&jt->om_free_count); 3310 if (l_om_free_count == chk_om_free_count) { 3311 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 3312 "chk_om_free_count=%d", p2i(jt), l_om_free_count, chk_om_free_count); 3313 } else { 3314 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 3315 "equal to chk_om_free_count=%d", p2i(jt), l_om_free_count, 3316 chk_om_free_count); 3317 *error_cnt_p = *error_cnt_p + 1; 3318 } 3319 } 3320 3321 // Check the thread's in-use list and count; log the results of the checks. 3322 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 3323 outputStream * out, 3324 int *error_cnt_p) { 3325 int chk_om_in_use_count = 0; 3326 ObjectMonitor* cur = NULL; 3327 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3328 // Marked the per-thread in-use list head so process the list. 3329 while (true) { 3330 chk_in_use_entry(jt, cur, out, error_cnt_p); 3331 chk_om_in_use_count++; 3332 3333 cur = lock_next_for_traversal(cur); 3334 if (cur == NULL) { 3335 break; 3336 } 3337 } 3338 } 3339 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3340 if (l_om_in_use_count == chk_om_in_use_count) { 3341 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 3342 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3343 chk_om_in_use_count); 3344 } else { 3345 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 3346 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3347 chk_om_in_use_count); 3348 *error_cnt_p = *error_cnt_p + 1; 3349 } 3350 } 3351 3352 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 3353 // flags indicate why the entry is in-use, 'object' and 'object type' 3354 // indicate the associated object and its type. 3355 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { 3356 stringStream ss; 3357 if (Atomic::load(&om_list_globals._in_use_count) > 0) { 3358 out->print_cr("In-use global monitor info:"); 3359 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3360 out->print_cr("%18s %s %7s %18s %18s", 3361 "monitor", "BHL", "ref_cnt", "object", "object type"); 3362 out->print_cr("================== === ======= ================== =================="); 3363 ObjectMonitor* cur = NULL; 3364 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3365 // Marked the global in-use list head so process the list. 3366 while (true) { 3367 const oop obj = (oop) cur->object(); 3368 const markWord mark = cur->header(); 3369 ResourceMark rm; 3370 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", p2i(cur), 3371 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL, 3372 (int)cur->ref_count(), p2i(obj), obj->klass()->external_name()); 3373 if (cur->is_busy() != 0) { 3374 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3375 ss.reset(); 3376 } 3377 out->cr(); 3378 3379 cur = lock_next_for_traversal(cur); 3380 if (cur == NULL) { 3381 break; 3382 } 3383 } 3384 } 3385 } 3386 3387 out->print_cr("In-use per-thread monitor info:"); 3388 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3389 out->print_cr("%18s %18s %s %7s %18s %18s", 3390 "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); 3391 out->print_cr("================== ================== === ======= ================== =================="); 3392 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3393 ObjectMonitor* cur = NULL; 3394 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3395 // Marked the global in-use list head so process the list. 3396 while (true) { 3397 const oop obj = (oop) cur->object(); 3398 const markWord mark = cur->header(); 3399 ResourceMark rm; 3400 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT 3401 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, 3402 mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(), 3403 p2i(obj), obj->klass()->external_name()); 3404 if (cur->is_busy() != 0) { 3405 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3406 ss.reset(); 3407 } 3408 out->cr(); 3409 3410 cur = lock_next_for_traversal(cur); 3411 if (cur == NULL) { 3412 break; 3413 } 3414 } 3415 } 3416 } 3417 3418 out->flush(); 3419 } 3420 3421 // Log counts for the global and per-thread monitor lists and return 3422 // the population count. 3423 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 3424 int pop_count = 0; 3425 out->print_cr("%18s %10s %10s %10s %10s", 3426 "Global Lists:", "InUse", "Free", "Wait", "Total"); 3427 out->print_cr("================== ========== ========== ========== =========="); 3428 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3429 int l_free_count = Atomic::load(&om_list_globals._free_count); 3430 int l_wait_count = Atomic::load(&om_list_globals._wait_count); 3431 out->print_cr("%18s %10d %10d %10d %10d", "", l_in_use_count, 3432 l_free_count, l_wait_count, 3433 Atomic::load(&om_list_globals._population)); 3434 pop_count += l_in_use_count + l_free_count; 3435 if (HandshakeAfterDeflateIdleMonitors) { 3436 pop_count += l_wait_count; 3437 } 3438 3439 out->print_cr("%18s %10s %10s %10s", 3440 "Per-Thread Lists:", "InUse", "Free", "Provision"); 3441 out->print_cr("================== ========== ========== =========="); 3442 3443 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3444 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3445 int l_om_free_count = Atomic::load(&jt->om_free_count); 3446 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 3447 l_om_in_use_count, l_om_free_count, jt->om_free_provision); 3448 pop_count += l_om_in_use_count + l_om_free_count; 3449 } 3450 return pop_count; 3451 } 3452 3453 #ifndef PRODUCT 3454 3455 // Check if monitor belongs to the monitor cache 3456 // The list is grow-only so it's *relatively* safe to traverse 3457 // the list of extant blocks without taking a lock. 3458 3459 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 3460 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 3461 while (block != NULL) { 3462 assert(block->object() == CHAINMARKER, "must be a block header"); 3463 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 3464 address mon = (address)monitor; 3465 address blk = (address)block; 3466 size_t diff = mon - blk; 3467 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 3468 return 1; 3469 } 3470 // unmarked_next() is not needed with g_block_list (no locking 3471 // used with block linkage _next_om fields). 3472 block = (PaddedObjectMonitor*)block->next_om(); 3473 } 3474 return 0; 3475 } 3476 3477 #endif