1 /* 2 * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/handshake.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/objectMonitor.inline.hpp" 45 #include "runtime/osThread.hpp" 46 #include "runtime/safepointMechanism.inline.hpp" 47 #include "runtime/safepointVerifiers.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "runtime/thread.inline.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/align.hpp" 56 #include "utilities/dtrace.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/preserveException.hpp" 59 60 // The "core" versions of monitor enter and exit reside in this file. 61 // The interpreter and compilers contain specialized transliterated 62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 63 // for instance. If you make changes here, make sure to modify the 64 // interpreter, and both C1 and C2 fast-path inline locking code emission. 65 // 66 // ----------------------------------------------------------------------------- 67 68 #ifdef DTRACE_ENABLED 69 70 // Only bother with this argument setup if dtrace is available 71 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 72 73 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 74 char* bytes = NULL; \ 75 int len = 0; \ 76 jlong jtid = SharedRuntime::get_java_tid(thread); \ 77 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 78 if (klassname != NULL) { \ 79 bytes = (char*)klassname->bytes(); \ 80 len = klassname->utf8_length(); \ 81 } 82 83 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 84 { \ 85 if (DTraceMonitorProbes) { \ 86 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 87 HOTSPOT_MONITOR_WAIT(jtid, \ 88 (uintptr_t)(monitor), bytes, len, (millis)); \ 89 } \ 90 } 91 92 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 93 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 94 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 95 96 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 97 { \ 98 if (DTraceMonitorProbes) { \ 99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 100 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 101 (uintptr_t)(monitor), bytes, len); \ 102 } \ 103 } 104 105 #else // ndef DTRACE_ENABLED 106 107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 109 110 #endif // ndef DTRACE_ENABLED 111 112 // This exists only as a workaround of dtrace bug 6254741 113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 115 return 0; 116 } 117 118 #define NINFLATIONLOCKS 256 119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 120 121 // global list of blocks of monitors 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 126 127 struct ObjectMonitorListGlobals { 128 char _pad_prefix[OM_CACHE_LINE_SIZE]; 129 // These are highly shared list related variables. 130 // To avoid false-sharing they need to be the sole occupants of a cache line. 131 132 // Global ObjectMonitor free list. Newly allocated and deflated 133 // ObjectMonitors are prepended here. 134 ObjectMonitor* _free_list; 135 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 136 137 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 138 // ObjectMonitors on its per-thread in-use list are prepended here. 139 ObjectMonitor* _in_use_list; 140 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 141 142 // Global ObjectMonitor wait list. Deflated ObjectMonitors wait on 143 // this list until after a handshake or a safepoint for platforms 144 // that don't support handshakes. After the handshake or safepoint, 145 // the deflated ObjectMonitors are prepended to free_list. 146 ObjectMonitor* _wait_list; 147 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 148 149 int _free_count; // # on free_list 150 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); 151 152 int _in_use_count; // # on in_use_list 153 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); 154 155 int _population; // # Extant -- in circulation 156 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int)); 157 158 int _wait_count; // # on wait_list 159 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int)); 160 }; 161 static ObjectMonitorListGlobals om_list_globals; 162 163 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 164 165 166 // =====================> Spin-lock functions 167 168 // ObjectMonitors are not lockable outside of this file. We use spin-locks 169 // implemented using a bit in the _next_om field instead of the heavier 170 // weight locking mechanisms for faster list management. 171 172 #define OM_LOCK_BIT 0x1 173 174 // Return true if the ObjectMonitor is locked. 175 // Otherwise returns false. 176 static bool is_locked(ObjectMonitor* om) { 177 return ((intptr_t)om->next_om_acquire() & OM_LOCK_BIT) == OM_LOCK_BIT; 178 } 179 180 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. 181 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { 182 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); 183 } 184 185 // Return the unmarked next field in an ObjectMonitor. Note: the next 186 // field may or may not have been marked with OM_LOCK_BIT originally. 187 static ObjectMonitor* unmarked_next(ObjectMonitor* om) { 188 return (ObjectMonitor*)((intptr_t)om->next_om() & ~OM_LOCK_BIT); 189 } 190 191 // Try to lock an ObjectMonitor. Returns true if locking was successful. 192 // Otherwise returns false. 193 static bool try_om_lock(ObjectMonitor* om) { 194 // Get current next field without any OM_LOCK_BIT value. 195 ObjectMonitor* next = unmarked_next(om); 196 if (om->try_set_next_om(next, mark_om_ptr(next)) != next) { 197 return false; // Cannot lock the ObjectMonitor. 198 } 199 return true; 200 } 201 202 // Lock an ObjectMonitor. 203 static void om_lock(ObjectMonitor* om) { 204 while (true) { 205 if (try_om_lock(om)) { 206 return; 207 } 208 } 209 } 210 211 // Unlock an ObjectMonitor. 212 static void om_unlock(ObjectMonitor* om) { 213 ObjectMonitor* next = om->next_om(); 214 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT 215 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); 216 217 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. 218 om->release_set_next_om(next); 219 } 220 221 // Get the list head after locking it. Returns the list head or NULL 222 // if the list is empty. 223 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { 224 while (true) { 225 // Acquire semantics not needed on this list load since we're 226 // checking for NULL here or following up with a cmpxchg() via 227 // try_om_lock() below and we retry on cmpxchg() failure. 228 ObjectMonitor* mid = Atomic::load(list_p); 229 if (mid == NULL) { 230 return NULL; // The list is empty. 231 } 232 if (try_om_lock(mid)) { 233 // Acquire semantics not needed on this list load since memory is 234 // already consistent due to the cmpxchg() via try_om_lock() above. 235 if (Atomic::load(list_p) != mid) { 236 // The list head changed before we could lock it so we have to retry. 237 om_unlock(mid); 238 continue; 239 } 240 return mid; 241 } 242 } 243 } 244 245 #undef OM_LOCK_BIT 246 247 248 // =====================> List Management functions 249 250 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is 251 // the last ObjectMonitor in the list and there are 'count' on the list. 252 // Also updates the specified *count_p. 253 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, 254 int count, ObjectMonitor** list_p, 255 int* count_p) { 256 while (true) { 257 // Acquire semantics not needed on this list load since we're 258 // following up with a cmpxchg() via try_om_lock() below and we 259 // retry on cmpxchg() failure. 260 ObjectMonitor* cur = Atomic::load(list_p); 261 // Prepend list to *list_p. 262 if (!try_om_lock(tail)) { 263 // Failed to lock tail due to a list walker so try it all again. 264 continue; 265 } 266 // Release semantics not needed on this "unlock" since memory is 267 // already consistent due to the cmpxchg() via try_om_lock() above. 268 tail->set_next_om(cur); // tail now points to cur (and unlocks tail) 269 if (cur == NULL) { 270 // No potential race with takers or other prependers since 271 // *list_p is empty. 272 if (Atomic::cmpxchg(list_p, cur, list) == cur) { 273 // Successfully switched *list_p to the list value. 274 Atomic::add(count_p, count); 275 break; 276 } 277 // Implied else: try it all again 278 } else { 279 if (!try_om_lock(cur)) { 280 continue; // failed to lock cur so try it all again 281 } 282 // We locked cur so try to switch *list_p to the list value. 283 if (Atomic::cmpxchg(list_p, cur, list) != cur) { 284 // The list head has changed so unlock cur and try again: 285 om_unlock(cur); 286 continue; 287 } 288 Atomic::add(count_p, count); 289 om_unlock(cur); 290 break; 291 } 292 } 293 } 294 295 // Prepend a newly allocated block of ObjectMonitors to g_block_list and 296 // om_list_globals._free_list. Also updates om_list_globals._population 297 // and om_list_globals._free_count. 298 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { 299 // First we handle g_block_list: 300 while (true) { 301 PaddedObjectMonitor* cur = Atomic::load(&g_block_list); 302 // Prepend new_blk to g_block_list. The first ObjectMonitor in 303 // a block is reserved for use as linkage to the next block. 304 new_blk[0].set_next_om(cur); 305 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { 306 // Successfully switched g_block_list to the new_blk value. 307 Atomic::add(&om_list_globals._population, _BLOCKSIZE - 1); 308 break; 309 } 310 // Implied else: try it all again 311 } 312 313 // Second we handle om_list_globals._free_list: 314 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, 315 &om_list_globals._free_list, &om_list_globals._free_count); 316 } 317 318 // Prepend a list of ObjectMonitors to om_list_globals._free_list. 319 // 'tail' is the last ObjectMonitor in the list and there are 'count' 320 // on the list. Also updates om_list_globals._free_count. 321 static void prepend_list_to_global_free_list(ObjectMonitor* list, 322 ObjectMonitor* tail, int count) { 323 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 324 &om_list_globals._free_count); 325 } 326 327 // Prepend a list of ObjectMonitors to om_list_globals._wait_list. 328 // 'tail' is the last ObjectMonitor in the list and there are 'count' 329 // on the list. Also updates om_list_globals._wait_count. 330 static void prepend_list_to_global_wait_list(ObjectMonitor* list, 331 ObjectMonitor* tail, int count) { 332 prepend_list_to_common(list, tail, count, &om_list_globals._wait_list, 333 &om_list_globals._wait_count); 334 } 335 336 // Prepend a list of ObjectMonitors to om_list_globals._in_use_list. 337 // 'tail' is the last ObjectMonitor in the list and there are 'count' 338 // on the list. Also updates om_list_globals._in_use_list. 339 static void prepend_list_to_global_in_use_list(ObjectMonitor* list, 340 ObjectMonitor* tail, int count) { 341 prepend_list_to_common(list, tail, count, &om_list_globals._in_use_list, 342 &om_list_globals._in_use_count); 343 } 344 345 // Prepend an ObjectMonitor to the specified list. Also updates 346 // the specified counter. 347 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, 348 int* count_p) { 349 while (true) { 350 om_lock(m); // Lock m so we can safely update its next field. 351 ObjectMonitor* cur = NULL; 352 // Lock the list head to guard against races with a list walker 353 // or async deflater thread (which only races in om_in_use_list): 354 if ((cur = get_list_head_locked(list_p)) != NULL) { 355 // List head is now locked so we can safely switch it. Release 356 // semantics not needed on this "unlock" since memory is already 357 // consistent due to the cmpxchg() via get_list_head_locked() above. 358 m->set_next_om(cur); // m now points to cur (and unlocks m) 359 OrderAccess::storestore(); // Make sure set_next_om() is seen first. 360 Atomic::store(list_p, m); // Switch list head to unlocked m. 361 om_unlock(cur); 362 break; 363 } 364 // The list is empty so try to set the list head. 365 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); 366 // Release semantics not needed on this "unlock" since memory 367 // is already consistent. 368 m->set_next_om(cur); // m now points to NULL (and unlocks m) 369 if (Atomic::cmpxchg(list_p, cur, m) == cur) { 370 // List head is now unlocked m. 371 break; 372 } 373 // Implied else: try it all again 374 } 375 Atomic::inc(count_p); 376 } 377 378 // Prepend an ObjectMonitor to a per-thread om_free_list. 379 // Also updates the per-thread om_free_count. 380 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) { 381 prepend_to_common(m, &self->om_free_list, &self->om_free_count); 382 } 383 384 // Prepend an ObjectMonitor to a per-thread om_in_use_list. 385 // Also updates the per-thread om_in_use_count. 386 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) { 387 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count); 388 } 389 390 // Take an ObjectMonitor from the start of the specified list. Also 391 // decrements the specified counter. Returns NULL if none are available. 392 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, 393 int* count_p) { 394 ObjectMonitor* take = NULL; 395 // Lock the list head to guard against races with a list walker 396 // or async deflater thread (which only races in om_list_globals._free_list): 397 if ((take = get_list_head_locked(list_p)) == NULL) { 398 return NULL; // None are available. 399 } 400 ObjectMonitor* next = unmarked_next(take); 401 // Switch locked list head to next (which unlocks the list head, but 402 // leaves take locked). Release semantics not needed on this "unlock" 403 // since memory is already consistent due to the cmpxchg() via 404 // get_list_head_locked() above. 405 Atomic::store(list_p, next); 406 Atomic::dec(count_p); 407 // Unlock take, but leave the next value for any lagging list 408 // walkers. It will get cleaned up when take is prepended to 409 // the in-use list: 410 om_unlock(take); 411 return take; 412 } 413 414 // Take an ObjectMonitor from the start of the om_list_globals._free_list. 415 // Also updates om_list_globals._free_count. Returns NULL if none are 416 // available. 417 static ObjectMonitor* take_from_start_of_global_free_list() { 418 return take_from_start_of_common(&om_list_globals._free_list, 419 &om_list_globals._free_count); 420 } 421 422 // Take an ObjectMonitor from the start of a per-thread free-list. 423 // Also updates om_free_count. Returns NULL if none are available. 424 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) { 425 return take_from_start_of_common(&self->om_free_list, &self->om_free_count); 426 } 427 428 429 // =====================> Quick functions 430 431 // The quick_* forms are special fast-path variants used to improve 432 // performance. In the simplest case, a "quick_*" implementation could 433 // simply return false, in which case the caller will perform the necessary 434 // state transitions and call the slow-path form. 435 // The fast-path is designed to handle frequently arising cases in an efficient 436 // manner and is just a degenerate "optimistic" variant of the slow-path. 437 // returns true -- to indicate the call was satisfied. 438 // returns false -- to indicate the call needs the services of the slow-path. 439 // A no-loitering ordinance is in effect for code in the quick_* family 440 // operators: safepoints or indefinite blocking (blocking that might span a 441 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 442 // entry. 443 // 444 // Consider: An interesting optimization is to have the JIT recognize the 445 // following common idiom: 446 // synchronized (someobj) { .... ; notify(); } 447 // That is, we find a notify() or notifyAll() call that immediately precedes 448 // the monitorexit operation. In that case the JIT could fuse the operations 449 // into a single notifyAndExit() runtime primitive. 450 451 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 452 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 453 assert(self->is_Java_thread(), "invariant"); 454 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 455 NoSafepointVerifier nsv; 456 if (obj == NULL) return false; // slow-path for invalid obj 457 const markWord mark = obj->mark(); 458 459 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 460 // Degenerate notify 461 // stack-locked by caller so by definition the implied waitset is empty. 462 return true; 463 } 464 465 if (mark.has_monitor()) { 466 ObjectMonitor* const mon = mark.monitor(); 467 assert(mon->object() == obj, "invariant"); 468 if (mon->owner() != self) return false; // slow-path for IMS exception 469 470 if (mon->first_waiter() != NULL) { 471 // We have one or more waiters. Since this is an inflated monitor 472 // that we own, we can transfer one or more threads from the waitset 473 // to the entrylist here and now, avoiding the slow-path. 474 if (all) { 475 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 476 } else { 477 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 478 } 479 int free_count = 0; 480 do { 481 mon->INotify(self); 482 ++free_count; 483 } while (mon->first_waiter() != NULL && all); 484 OM_PERFDATA_OP(Notifications, inc(free_count)); 485 } 486 return true; 487 } 488 489 // biased locking and any other IMS exception states take the slow-path 490 return false; 491 } 492 493 494 // The LockNode emitted directly at the synchronization site would have 495 // been too big if it were to have included support for the cases of inflated 496 // recursive enter and exit, so they go here instead. 497 // Note that we can't safely call AsyncPrintJavaStack() from within 498 // quick_enter() as our thread state remains _in_Java. 499 500 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 501 BasicLock * lock) { 502 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 503 assert(self->is_Java_thread(), "invariant"); 504 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 505 NoSafepointVerifier nsv; 506 if (obj == NULL) return false; // Need to throw NPE 507 508 const markWord mark = obj->mark(); 509 510 if (mark.has_monitor()) { 511 ObjectMonitor* const m = mark.monitor(); 512 if (AsyncDeflateIdleMonitors) { 513 // An async deflation can race us before we manage to make the 514 // ObjectMonitor busy by setting the owner below. If we detect 515 // that race we just bail out to the slow-path here. 516 if (m->object() == NULL) { 517 return false; 518 } 519 } else { 520 assert(m->object() == obj, "invariant"); 521 } 522 Thread* const owner = (Thread *) m->_owner; 523 524 // Lock contention and Transactional Lock Elision (TLE) diagnostics 525 // and observability 526 // Case: light contention possibly amenable to TLE 527 // Case: TLE inimical operations such as nested/recursive synchronization 528 529 if (owner == self) { 530 m->_recursions++; 531 return true; 532 } 533 534 // This Java Monitor is inflated so obj's header will never be 535 // displaced to this thread's BasicLock. Make the displaced header 536 // non-NULL so this BasicLock is not seen as recursive nor as 537 // being locked. We do this unconditionally so that this thread's 538 // BasicLock cannot be mis-interpreted by any stack walkers. For 539 // performance reasons, stack walkers generally first check for 540 // Biased Locking in the object's header, the second check is for 541 // stack-locking in the object's header, the third check is for 542 // recursive stack-locking in the displaced header in the BasicLock, 543 // and last are the inflated Java Monitor (ObjectMonitor) checks. 544 lock->set_displaced_header(markWord::unused_mark()); 545 546 if (owner == NULL && m->try_set_owner_from(NULL, self) == NULL) { 547 assert(m->_recursions == 0, "invariant"); 548 return true; 549 } 550 } 551 552 // Note that we could inflate in quick_enter. 553 // This is likely a useful optimization 554 // Critically, in quick_enter() we must not: 555 // -- perform bias revocation, or 556 // -- block indefinitely, or 557 // -- reach a safepoint 558 559 return false; // revert to slow-path 560 } 561 562 // ----------------------------------------------------------------------------- 563 // Monitor Enter/Exit 564 // The interpreter and compiler assembly code tries to lock using the fast path 565 // of this algorithm. Make sure to update that code if the following function is 566 // changed. The implementation is extremely sensitive to race condition. Be careful. 567 568 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 569 if (UseBiasedLocking) { 570 if (!SafepointSynchronize::is_at_safepoint()) { 571 BiasedLocking::revoke(obj, THREAD); 572 } else { 573 BiasedLocking::revoke_at_safepoint(obj); 574 } 575 } 576 577 markWord mark = obj->mark(); 578 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 579 580 if (mark.is_neutral()) { 581 // Anticipate successful CAS -- the ST of the displaced mark must 582 // be visible <= the ST performed by the CAS. 583 lock->set_displaced_header(mark); 584 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 585 return; 586 } 587 // Fall through to inflate() ... 588 } else if (mark.has_locker() && 589 THREAD->is_lock_owned((address)mark.locker())) { 590 assert(lock != mark.locker(), "must not re-lock the same lock"); 591 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 592 lock->set_displaced_header(markWord::from_pointer(NULL)); 593 return; 594 } 595 596 // The object header will never be displaced to this lock, 597 // so it does not matter what the value is, except that it 598 // must be non-zero to avoid looking like a re-entrant lock, 599 // and must not look locked either. 600 lock->set_displaced_header(markWord::unused_mark()); 601 // An async deflation can race after the inflate() call and before 602 // enter() can make the ObjectMonitor busy. enter() returns false if 603 // we have lost the race to async deflation and we simply try again. 604 while (true) { 605 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_monitor_enter); 606 if (monitor->enter(THREAD)) { 607 return; 608 } 609 } 610 } 611 612 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 613 markWord mark = object->mark(); 614 // We cannot check for Biased Locking if we are racing an inflation. 615 assert(mark == markWord::INFLATING() || 616 !mark.has_bias_pattern(), "should not see bias pattern here"); 617 618 markWord dhw = lock->displaced_header(); 619 if (dhw.value() == 0) { 620 // If the displaced header is NULL, then this exit matches up with 621 // a recursive enter. No real work to do here except for diagnostics. 622 #ifndef PRODUCT 623 if (mark != markWord::INFLATING()) { 624 // Only do diagnostics if we are not racing an inflation. Simply 625 // exiting a recursive enter of a Java Monitor that is being 626 // inflated is safe; see the has_monitor() comment below. 627 assert(!mark.is_neutral(), "invariant"); 628 assert(!mark.has_locker() || 629 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 630 if (mark.has_monitor()) { 631 // The BasicLock's displaced_header is marked as a recursive 632 // enter and we have an inflated Java Monitor (ObjectMonitor). 633 // This is a special case where the Java Monitor was inflated 634 // after this thread entered the stack-lock recursively. When a 635 // Java Monitor is inflated, we cannot safely walk the Java 636 // Monitor owner's stack and update the BasicLocks because a 637 // Java Monitor can be asynchronously inflated by a thread that 638 // does not own the Java Monitor. 639 ObjectMonitor* m = mark.monitor(); 640 assert(((oop)(m->object()))->mark() == mark, "invariant"); 641 assert(m->is_entered(THREAD), "invariant"); 642 } 643 } 644 #endif 645 return; 646 } 647 648 if (mark == markWord::from_pointer(lock)) { 649 // If the object is stack-locked by the current thread, try to 650 // swing the displaced header from the BasicLock back to the mark. 651 assert(dhw.is_neutral(), "invariant"); 652 if (object->cas_set_mark(dhw, mark) == mark) { 653 return; 654 } 655 } 656 657 // We have to take the slow-path of possible inflation and then exit. 658 // The ObjectMonitor* can't be async deflated until ownership is 659 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 660 ObjectMonitor* monitor = inflate(THREAD, object, inflate_cause_vm_internal); 661 monitor->exit(true, THREAD); 662 } 663 664 // ----------------------------------------------------------------------------- 665 // Class Loader support to workaround deadlocks on the class loader lock objects 666 // Also used by GC 667 // complete_exit()/reenter() are used to wait on a nested lock 668 // i.e. to give up an outer lock completely and then re-enter 669 // Used when holding nested locks - lock acquisition order: lock1 then lock2 670 // 1) complete_exit lock1 - saving recursion count 671 // 2) wait on lock2 672 // 3) when notified on lock2, unlock lock2 673 // 4) reenter lock1 with original recursion count 674 // 5) lock lock2 675 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 676 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 677 if (UseBiasedLocking) { 678 BiasedLocking::revoke(obj, THREAD); 679 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 680 } 681 682 // The ObjectMonitor* can't be async deflated until ownership is 683 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 684 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 685 intptr_t ret_code = monitor->complete_exit(THREAD); 686 return ret_code; 687 } 688 689 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 690 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) { 691 if (UseBiasedLocking) { 692 BiasedLocking::revoke(obj, THREAD); 693 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 694 } 695 696 // An async deflation can race after the inflate() call and before 697 // reenter() -> enter() can make the ObjectMonitor busy. reenter() -> 698 // enter() returns false if we have lost the race to async deflation 699 // and we simply try again. 700 while (true) { 701 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_vm_internal); 702 if (monitor->reenter(recursions, THREAD)) { 703 return; 704 } 705 } 706 } 707 708 // ----------------------------------------------------------------------------- 709 // JNI locks on java objects 710 // NOTE: must use heavy weight monitor to handle jni monitor enter 711 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 712 // the current locking is from JNI instead of Java code 713 if (UseBiasedLocking) { 714 BiasedLocking::revoke(obj, THREAD); 715 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 716 } 717 THREAD->set_current_pending_monitor_is_from_java(false); 718 // An async deflation can race after the inflate() call and before 719 // enter() can make the ObjectMonitor busy. enter() returns false if 720 // we have lost the race to async deflation and we simply try again. 721 while (true) { 722 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_jni_enter); 723 if (monitor->enter(THREAD)) { 724 break; 725 } 726 } 727 THREAD->set_current_pending_monitor_is_from_java(true); 728 } 729 730 // NOTE: must use heavy weight monitor to handle jni monitor exit 731 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 732 if (UseBiasedLocking) { 733 Handle h_obj(THREAD, obj); 734 BiasedLocking::revoke(h_obj, THREAD); 735 obj = h_obj(); 736 } 737 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 738 739 // The ObjectMonitor* can't be async deflated until ownership is 740 // dropped inside exit() and the ObjectMonitor* must be !is_busy(). 741 ObjectMonitor* monitor = inflate(THREAD, obj, inflate_cause_jni_exit); 742 // If this thread has locked the object, exit the monitor. We 743 // intentionally do not use CHECK here because we must exit the 744 // monitor even if an exception is pending. 745 if (monitor->check_owner(THREAD)) { 746 monitor->exit(true, THREAD); 747 } 748 } 749 750 // ----------------------------------------------------------------------------- 751 // Internal VM locks on java objects 752 // standard constructor, allows locking failures 753 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 754 _dolock = do_lock; 755 _thread = thread; 756 _thread->check_for_valid_safepoint_state(); 757 _obj = obj; 758 759 if (_dolock) { 760 ObjectSynchronizer::enter(_obj, &_lock, _thread); 761 } 762 } 763 764 ObjectLocker::~ObjectLocker() { 765 if (_dolock) { 766 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 767 } 768 } 769 770 771 // ----------------------------------------------------------------------------- 772 // Wait/Notify/NotifyAll 773 // NOTE: must use heavy weight monitor to handle wait() 774 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 775 if (UseBiasedLocking) { 776 BiasedLocking::revoke(obj, THREAD); 777 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 778 } 779 if (millis < 0) { 780 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 781 } 782 // The ObjectMonitor* can't be async deflated because the _waiters 783 // field is incremented before ownership is dropped and decremented 784 // after ownership is regained. 785 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 786 787 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 788 monitor->wait(millis, true, THREAD); 789 790 // This dummy call is in place to get around dtrace bug 6254741. Once 791 // that's fixed we can uncomment the following line, remove the call 792 // and change this function back into a "void" func. 793 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 794 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 795 return ret_code; 796 } 797 798 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 799 if (UseBiasedLocking) { 800 BiasedLocking::revoke(obj, THREAD); 801 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 802 } 803 if (millis < 0) { 804 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 805 } 806 // The ObjectMonitor* can't be async deflated because the _waiters 807 // field is incremented before ownership is dropped and decremented 808 // after ownership is regained. 809 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_wait); 810 monitor->wait(millis, false, THREAD); 811 } 812 813 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 814 if (UseBiasedLocking) { 815 BiasedLocking::revoke(obj, THREAD); 816 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 817 } 818 819 markWord mark = obj->mark(); 820 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 821 return; 822 } 823 // The ObjectMonitor* can't be async deflated until ownership is 824 // dropped by the calling thread. 825 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 826 monitor->notify(THREAD); 827 } 828 829 // NOTE: see comment of notify() 830 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 831 if (UseBiasedLocking) { 832 BiasedLocking::revoke(obj, THREAD); 833 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 834 } 835 836 markWord mark = obj->mark(); 837 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 838 return; 839 } 840 // The ObjectMonitor* can't be async deflated until ownership is 841 // dropped by the calling thread. 842 ObjectMonitor* monitor = inflate(THREAD, obj(), inflate_cause_notify); 843 monitor->notifyAll(THREAD); 844 } 845 846 // ----------------------------------------------------------------------------- 847 // Hash Code handling 848 // 849 // Performance concern: 850 // OrderAccess::storestore() calls release() which at one time stored 0 851 // into the global volatile OrderAccess::dummy variable. This store was 852 // unnecessary for correctness. Many threads storing into a common location 853 // causes considerable cache migration or "sloshing" on large SMP systems. 854 // As such, I avoided using OrderAccess::storestore(). In some cases 855 // OrderAccess::fence() -- which incurs local latency on the executing 856 // processor -- is a better choice as it scales on SMP systems. 857 // 858 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 859 // a discussion of coherency costs. Note that all our current reference 860 // platforms provide strong ST-ST order, so the issue is moot on IA32, 861 // x64, and SPARC. 862 // 863 // As a general policy we use "volatile" to control compiler-based reordering 864 // and explicit fences (barriers) to control for architectural reordering 865 // performed by the CPU(s) or platform. 866 867 struct SharedGlobals { 868 char _pad_prefix[OM_CACHE_LINE_SIZE]; 869 // These are highly shared mostly-read variables. 870 // To avoid false-sharing they need to be the sole occupants of a cache line. 871 volatile int stw_random; 872 volatile int stw_cycle; 873 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 874 // Hot RW variable -- Sequester to avoid false-sharing 875 volatile int hc_sequence; 876 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 877 }; 878 879 static SharedGlobals GVars; 880 881 static markWord read_stable_mark(oop obj) { 882 markWord mark = obj->mark(); 883 if (!mark.is_being_inflated()) { 884 return mark; // normal fast-path return 885 } 886 887 int its = 0; 888 for (;;) { 889 markWord mark = obj->mark(); 890 if (!mark.is_being_inflated()) { 891 return mark; // normal fast-path return 892 } 893 894 // The object is being inflated by some other thread. 895 // The caller of read_stable_mark() must wait for inflation to complete. 896 // Avoid live-lock 897 // TODO: consider calling SafepointSynchronize::do_call_back() while 898 // spinning to see if there's a safepoint pending. If so, immediately 899 // yielding or blocking would be appropriate. Avoid spinning while 900 // there is a safepoint pending. 901 // TODO: add inflation contention performance counters. 902 // TODO: restrict the aggregate number of spinners. 903 904 ++its; 905 if (its > 10000 || !os::is_MP()) { 906 if (its & 1) { 907 os::naked_yield(); 908 } else { 909 // Note that the following code attenuates the livelock problem but is not 910 // a complete remedy. A more complete solution would require that the inflating 911 // thread hold the associated inflation lock. The following code simply restricts 912 // the number of spinners to at most one. We'll have N-2 threads blocked 913 // on the inflationlock, 1 thread holding the inflation lock and using 914 // a yield/park strategy, and 1 thread in the midst of inflation. 915 // A more refined approach would be to change the encoding of INFLATING 916 // to allow encapsulation of a native thread pointer. Threads waiting for 917 // inflation to complete would use CAS to push themselves onto a singly linked 918 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 919 // and calling park(). When inflation was complete the thread that accomplished inflation 920 // would detach the list and set the markword to inflated with a single CAS and 921 // then for each thread on the list, set the flag and unpark() the thread. 922 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 923 // wakes at most one thread whereas we need to wake the entire list. 924 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 925 int YieldThenBlock = 0; 926 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 927 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 928 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 929 while (obj->mark() == markWord::INFLATING()) { 930 // Beware: NakedYield() is advisory and has almost no effect on some platforms 931 // so we periodically call self->_ParkEvent->park(1). 932 // We use a mixed spin/yield/block mechanism. 933 if ((YieldThenBlock++) >= 16) { 934 Thread::current()->_ParkEvent->park(1); 935 } else { 936 os::naked_yield(); 937 } 938 } 939 Thread::muxRelease(gInflationLocks + ix); 940 } 941 } else { 942 SpinPause(); // SMP-polite spinning 943 } 944 } 945 } 946 947 // hashCode() generation : 948 // 949 // Possibilities: 950 // * MD5Digest of {obj,stw_random} 951 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 952 // * A DES- or AES-style SBox[] mechanism 953 // * One of the Phi-based schemes, such as: 954 // 2654435761 = 2^32 * Phi (golden ratio) 955 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 956 // * A variation of Marsaglia's shift-xor RNG scheme. 957 // * (obj ^ stw_random) is appealing, but can result 958 // in undesirable regularity in the hashCode values of adjacent objects 959 // (objects allocated back-to-back, in particular). This could potentially 960 // result in hashtable collisions and reduced hashtable efficiency. 961 // There are simple ways to "diffuse" the middle address bits over the 962 // generated hashCode values: 963 964 static inline intptr_t get_next_hash(Thread* self, oop obj) { 965 intptr_t value = 0; 966 if (hashCode == 0) { 967 // This form uses global Park-Miller RNG. 968 // On MP system we'll have lots of RW access to a global, so the 969 // mechanism induces lots of coherency traffic. 970 value = os::random(); 971 } else if (hashCode == 1) { 972 // This variation has the property of being stable (idempotent) 973 // between STW operations. This can be useful in some of the 1-0 974 // synchronization schemes. 975 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 976 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 977 } else if (hashCode == 2) { 978 value = 1; // for sensitivity testing 979 } else if (hashCode == 3) { 980 value = ++GVars.hc_sequence; 981 } else if (hashCode == 4) { 982 value = cast_from_oop<intptr_t>(obj); 983 } else { 984 // Marsaglia's xor-shift scheme with thread-specific state 985 // This is probably the best overall implementation -- we'll 986 // likely make this the default in future releases. 987 unsigned t = self->_hashStateX; 988 t ^= (t << 11); 989 self->_hashStateX = self->_hashStateY; 990 self->_hashStateY = self->_hashStateZ; 991 self->_hashStateZ = self->_hashStateW; 992 unsigned v = self->_hashStateW; 993 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 994 self->_hashStateW = v; 995 value = v; 996 } 997 998 value &= markWord::hash_mask; 999 if (value == 0) value = 0xBAD; 1000 assert(value != markWord::no_hash, "invariant"); 1001 return value; 1002 } 1003 1004 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 1005 if (UseBiasedLocking) { 1006 // NOTE: many places throughout the JVM do not expect a safepoint 1007 // to be taken here, in particular most operations on perm gen 1008 // objects. However, we only ever bias Java instances and all of 1009 // the call sites of identity_hash that might revoke biases have 1010 // been checked to make sure they can handle a safepoint. The 1011 // added check of the bias pattern is to avoid useless calls to 1012 // thread-local storage. 1013 if (obj->mark().has_bias_pattern()) { 1014 // Handle for oop obj in case of STW safepoint 1015 Handle hobj(self, obj); 1016 // Relaxing assertion for bug 6320749. 1017 assert(Universe::verify_in_progress() || 1018 !SafepointSynchronize::is_at_safepoint(), 1019 "biases should not be seen by VM thread here"); 1020 BiasedLocking::revoke(hobj, JavaThread::current()); 1021 obj = hobj(); 1022 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1023 } 1024 } 1025 1026 // hashCode() is a heap mutator ... 1027 // Relaxing assertion for bug 6320749. 1028 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1029 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1030 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1031 self->is_Java_thread() , "invariant"); 1032 assert(Universe::verify_in_progress() || DumpSharedSpaces || 1033 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 1034 1035 while (true) { 1036 ObjectMonitor* monitor = NULL; 1037 markWord temp, test; 1038 intptr_t hash; 1039 markWord mark = read_stable_mark(obj); 1040 1041 // object should remain ineligible for biased locking 1042 assert(!mark.has_bias_pattern(), "invariant"); 1043 1044 if (mark.is_neutral()) { // if this is a normal header 1045 hash = mark.hash(); 1046 if (hash != 0) { // if it has a hash, just return it 1047 return hash; 1048 } 1049 hash = get_next_hash(self, obj); // get a new hash 1050 temp = mark.copy_set_hash(hash); // merge the hash into header 1051 // try to install the hash 1052 test = obj->cas_set_mark(temp, mark); 1053 if (test == mark) { // if the hash was installed, return it 1054 return hash; 1055 } 1056 // Failed to install the hash. It could be that another thread 1057 // installed the hash just before our attempt or inflation has 1058 // occurred or... so we fall thru to inflate the monitor for 1059 // stability and then install the hash. 1060 } else if (mark.has_monitor()) { 1061 monitor = mark.monitor(); 1062 temp = monitor->header(); 1063 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1064 hash = temp.hash(); 1065 if (hash != 0) { 1066 // It has a hash. 1067 1068 // Separate load of dmw/header above from the loads in 1069 // is_being_async_deflated(). 1070 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 1071 // A non-multiple copy atomic (nMCA) machine needs a bigger 1072 // hammer to separate the load above and the loads below. 1073 OrderAccess::fence(); 1074 } else { 1075 OrderAccess::loadload(); 1076 } 1077 if (monitor->is_being_async_deflated()) { 1078 // But we can't safely use the hash if we detect that async 1079 // deflation has occurred. So we attempt to restore the 1080 // header/dmw to the object's header so that we only retry 1081 // once if the deflater thread happens to be slow. 1082 monitor->install_displaced_markword_in_object(obj); 1083 continue; 1084 } 1085 return hash; 1086 } 1087 // Fall thru so we only have one place that installs the hash in 1088 // the ObjectMonitor. 1089 } else if (self->is_lock_owned((address)mark.locker())) { 1090 // This is a stack lock owned by the calling thread so fetch the 1091 // displaced markWord from the BasicLock on the stack. 1092 temp = mark.displaced_mark_helper(); 1093 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1094 hash = temp.hash(); 1095 if (hash != 0) { // if it has a hash, just return it 1096 return hash; 1097 } 1098 // WARNING: 1099 // The displaced header in the BasicLock on a thread's stack 1100 // is strictly immutable. It CANNOT be changed in ANY cases. 1101 // So we have to inflate the stack lock into an ObjectMonitor 1102 // even if the current thread owns the lock. The BasicLock on 1103 // a thread's stack can be asynchronously read by other threads 1104 // during an inflate() call so any change to that stack memory 1105 // may not propagate to other threads correctly. 1106 } 1107 1108 // Inflate the monitor to set the hash. 1109 1110 // An async deflation can race after the inflate() call and before we 1111 // can update the ObjectMonitor's header with the hash value below. 1112 monitor = inflate(self, obj, inflate_cause_hash_code); 1113 // Load ObjectMonitor's header/dmw field and see if it has a hash. 1114 mark = monitor->header(); 1115 assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1116 hash = mark.hash(); 1117 if (hash == 0) { // if it does not have a hash 1118 hash = get_next_hash(self, obj); // get a new hash 1119 temp = mark.copy_set_hash(hash); // merge the hash into header 1120 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1121 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1122 test = markWord(v); 1123 if (test != mark) { 1124 // The attempt to update the ObjectMonitor's header/dmw field 1125 // did not work. This can happen if another thread managed to 1126 // merge in the hash just before our cmpxchg(). 1127 // If we add any new usages of the header/dmw field, this code 1128 // will need to be updated. 1129 hash = test.hash(); 1130 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1131 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1132 } 1133 if (monitor->is_being_async_deflated()) { 1134 // If we detect that async deflation has occurred, then we 1135 // attempt to restore the header/dmw to the object's header 1136 // so that we only retry once if the deflater thread happens 1137 // to be slow. 1138 monitor->install_displaced_markword_in_object(obj); 1139 continue; 1140 } 1141 } 1142 // We finally get the hash. 1143 return hash; 1144 } 1145 } 1146 1147 // Deprecated -- use FastHashCode() instead. 1148 1149 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1150 return FastHashCode(Thread::current(), obj()); 1151 } 1152 1153 1154 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1155 Handle h_obj) { 1156 if (UseBiasedLocking) { 1157 BiasedLocking::revoke(h_obj, thread); 1158 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1159 } 1160 1161 assert(thread == JavaThread::current(), "Can only be called on current thread"); 1162 oop obj = h_obj(); 1163 1164 markWord mark = read_stable_mark(obj); 1165 1166 // Uncontended case, header points to stack 1167 if (mark.has_locker()) { 1168 return thread->is_lock_owned((address)mark.locker()); 1169 } 1170 // Contended case, header points to ObjectMonitor (tagged pointer) 1171 if (mark.has_monitor()) { 1172 // The first stage of async deflation does not affect any field 1173 // used by this comparison so the ObjectMonitor* is usable here. 1174 ObjectMonitor* monitor = mark.monitor(); 1175 return monitor->is_entered(thread) != 0; 1176 } 1177 // Unlocked case, header in place 1178 assert(mark.is_neutral(), "sanity check"); 1179 return false; 1180 } 1181 1182 // Be aware of this method could revoke bias of the lock object. 1183 // This method queries the ownership of the lock handle specified by 'h_obj'. 1184 // If the current thread owns the lock, it returns owner_self. If no 1185 // thread owns the lock, it returns owner_none. Otherwise, it will return 1186 // owner_other. 1187 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1188 (JavaThread *self, Handle h_obj) { 1189 // The caller must beware this method can revoke bias, and 1190 // revocation can result in a safepoint. 1191 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 1192 assert(self->thread_state() != _thread_blocked, "invariant"); 1193 1194 // Possible mark states: neutral, biased, stack-locked, inflated 1195 1196 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 1197 // CASE: biased 1198 BiasedLocking::revoke(h_obj, self); 1199 assert(!h_obj->mark().has_bias_pattern(), 1200 "biases should be revoked by now"); 1201 } 1202 1203 assert(self == JavaThread::current(), "Can only be called on current thread"); 1204 oop obj = h_obj(); 1205 markWord mark = read_stable_mark(obj); 1206 1207 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1208 if (mark.has_locker()) { 1209 return self->is_lock_owned((address)mark.locker()) ? 1210 owner_self : owner_other; 1211 } 1212 1213 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 1214 // The Object:ObjectMonitor relationship is stable as long as we're 1215 // not at a safepoint and AsyncDeflateIdleMonitors is false. 1216 if (mark.has_monitor()) { 1217 // The first stage of async deflation does not affect any field 1218 // used by this comparison so the ObjectMonitor* is usable here. 1219 ObjectMonitor* monitor = mark.monitor(); 1220 void* owner = monitor->owner(); 1221 if (owner == NULL) return owner_none; 1222 return (owner == self || 1223 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1224 } 1225 1226 // CASE: neutral 1227 assert(mark.is_neutral(), "sanity check"); 1228 return owner_none; // it's unlocked 1229 } 1230 1231 // FIXME: jvmti should call this 1232 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1233 if (UseBiasedLocking) { 1234 if (SafepointSynchronize::is_at_safepoint()) { 1235 BiasedLocking::revoke_at_safepoint(h_obj); 1236 } else { 1237 BiasedLocking::revoke(h_obj, JavaThread::current()); 1238 } 1239 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1240 } 1241 1242 oop obj = h_obj(); 1243 address owner = NULL; 1244 1245 markWord mark = read_stable_mark(obj); 1246 1247 // Uncontended case, header points to stack 1248 if (mark.has_locker()) { 1249 owner = (address) mark.locker(); 1250 } 1251 1252 // Contended case, header points to ObjectMonitor (tagged pointer) 1253 else if (mark.has_monitor()) { 1254 // The first stage of async deflation does not affect any field 1255 // used by this comparison so the ObjectMonitor* is usable here. 1256 ObjectMonitor* monitor = mark.monitor(); 1257 assert(monitor != NULL, "monitor should be non-null"); 1258 owner = (address) monitor->owner(); 1259 } 1260 1261 if (owner != NULL) { 1262 // owning_thread_from_monitor_owner() may also return NULL here 1263 return Threads::owning_thread_from_monitor_owner(t_list, owner); 1264 } 1265 1266 // Unlocked case, header in place 1267 // Cannot have assertion since this object may have been 1268 // locked by another thread when reaching here. 1269 // assert(mark.is_neutral(), "sanity check"); 1270 1271 return NULL; 1272 } 1273 1274 // Visitors ... 1275 1276 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1277 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 1278 while (block != NULL) { 1279 assert(block->object() == CHAINMARKER, "must be a block header"); 1280 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1281 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1282 if (mid->object() != NULL) { 1283 // Only process with closure if the object is set. 1284 1285 // monitors_iterate() is only called at a safepoint or when the 1286 // target thread is suspended or when the target thread is 1287 // operating on itself. The current closures in use today are 1288 // only interested in an owned ObjectMonitor and ownership 1289 // cannot be dropped under the calling contexts so the 1290 // ObjectMonitor cannot be async deflated. 1291 closure->do_monitor(mid); 1292 } 1293 } 1294 // unmarked_next() is not needed with g_block_list (no locking 1295 // used with block linkage _next_om fields). 1296 block = (PaddedObjectMonitor*)block->next_om(); 1297 } 1298 } 1299 1300 static bool monitors_used_above_threshold() { 1301 int population = Atomic::load(&om_list_globals._population); 1302 if (population == 0) { 1303 return false; 1304 } 1305 if (MonitorUsedDeflationThreshold > 0) { 1306 int monitors_used = population - Atomic::load(&om_list_globals._free_count) - 1307 Atomic::load(&om_list_globals._wait_count); 1308 int monitor_usage = (monitors_used * 100LL) / population; 1309 return monitor_usage > MonitorUsedDeflationThreshold; 1310 } 1311 return false; 1312 } 1313 1314 bool ObjectSynchronizer::is_async_deflation_needed() { 1315 if (!AsyncDeflateIdleMonitors) { 1316 return false; 1317 } 1318 if (is_async_deflation_requested()) { 1319 // Async deflation request. 1320 return true; 1321 } 1322 if (AsyncDeflationInterval > 0 && 1323 time_since_last_async_deflation_ms() > AsyncDeflationInterval && 1324 monitors_used_above_threshold()) { 1325 // It's been longer than our specified deflate interval and there 1326 // are too many monitors in use. We don't deflate more frequently 1327 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1328 // in order to not swamp the ServiceThread. 1329 _last_async_deflation_time_ns = os::javaTimeNanos(); 1330 return true; 1331 } 1332 return false; 1333 } 1334 1335 bool ObjectSynchronizer::is_safepoint_deflation_needed() { 1336 if (!AsyncDeflateIdleMonitors) { 1337 if (monitors_used_above_threshold()) { 1338 // Too many monitors in use. 1339 return true; 1340 } 1341 return false; 1342 } 1343 if (is_special_deflation_requested()) { 1344 // For AsyncDeflateIdleMonitors only do a safepoint deflation 1345 // if there is a special deflation request. 1346 return true; 1347 } 1348 return false; 1349 } 1350 1351 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1352 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); 1353 } 1354 1355 void ObjectSynchronizer::oops_do(OopClosure* f) { 1356 // We only scan the global used list here (for moribund threads), and 1357 // the thread-local monitors in Thread::oops_do(). 1358 global_used_oops_do(f); 1359 } 1360 1361 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1362 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1363 // Acquire semantics not needed since we're at a safepoint. 1364 list_oops_do(Atomic::load(&om_list_globals._in_use_list), f); 1365 } 1366 1367 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1368 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1369 list_oops_do(thread->om_in_use_list, f); 1370 } 1371 1372 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) { 1373 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1374 // The oops_do() phase does not overlap with monitor deflation 1375 // so no need to lock ObjectMonitors for the list traversal. 1376 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { 1377 if (mid->object() != NULL) { 1378 f->do_oop((oop*)mid->object_addr()); 1379 } 1380 } 1381 } 1382 1383 1384 // ----------------------------------------------------------------------------- 1385 // ObjectMonitor Lifecycle 1386 // ----------------------- 1387 // Inflation unlinks monitors from om_list_globals._free_list or a per-thread 1388 // free list and associates them with objects. Deflation -- which occurs at 1389 // STW-time or asynchronously -- disassociates idle monitors from objects. 1390 // Such scavenged monitors are returned to the om_list_globals._free_list. 1391 // 1392 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1393 // 1394 // Lifecycle: 1395 // -- unassigned and on the om_list_globals._free_list 1396 // -- unassigned and on a per-thread free list 1397 // -- assigned to an object. The object is inflated and the mark refers 1398 // to the ObjectMonitor. 1399 1400 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) { 1401 // A large MAXPRIVATE value reduces both list lock contention 1402 // and list coherency traffic, but also tends to increase the 1403 // number of ObjectMonitors in circulation as well as the STW 1404 // scavenge costs. As usual, we lean toward time in space-time 1405 // tradeoffs. 1406 const int MAXPRIVATE = 1024; 1407 NoSafepointVerifier nsv; 1408 1409 for (;;) { 1410 ObjectMonitor* m; 1411 1412 // 1: try to allocate from the thread's local om_free_list. 1413 // Threads will attempt to allocate first from their local list, then 1414 // from the global list, and only after those attempts fail will the 1415 // thread attempt to instantiate new monitors. Thread-local free lists 1416 // improve allocation latency, as well as reducing coherency traffic 1417 // on the shared global list. 1418 m = take_from_start_of_om_free_list(self); 1419 if (m != NULL) { 1420 guarantee(m->object() == NULL, "invariant"); 1421 m->set_allocation_state(ObjectMonitor::New); 1422 prepend_to_om_in_use_list(self, m); 1423 return m; 1424 } 1425 1426 // 2: try to allocate from the global om_list_globals._free_list 1427 // If we're using thread-local free lists then try 1428 // to reprovision the caller's free list. 1429 // Acquire semantics not needed on this list load since memory 1430 // is already consistent due to the cmpxchg() via 1431 // take_from_start_of_om_free_list() above. 1432 if (Atomic::load(&om_list_globals._free_list) != NULL) { 1433 // Reprovision the thread's om_free_list. 1434 // Use bulk transfers to reduce the allocation rate and heat 1435 // on various locks. 1436 for (int i = self->om_free_provision; --i >= 0;) { 1437 ObjectMonitor* take = take_from_start_of_global_free_list(); 1438 if (take == NULL) { 1439 break; // No more are available. 1440 } 1441 guarantee(take->object() == NULL, "invariant"); 1442 if (AsyncDeflateIdleMonitors) { 1443 // We allowed 3 field values to linger during async deflation. 1444 // Clear or restore them as appropriate. 1445 take->set_header(markWord::zero()); 1446 // DEFLATER_MARKER is the only non-NULL value we should see here. 1447 take->try_set_owner_from(DEFLATER_MARKER, NULL); 1448 if (take->contentions() < 0) { 1449 // Add back max_jint to restore the contentions field to its 1450 // proper value. 1451 take->add_to_contentions(max_jint); 1452 1453 #ifdef ASSERT 1454 jint l_contentions = take->contentions(); 1455 #endif 1456 assert(l_contentions >= 0, "must not be negative: l_contentions=%d, contentions=%d", 1457 l_contentions, take->contentions()); 1458 } 1459 } 1460 take->Recycle(); 1461 // Since we're taking from the global free-list, take must be Free. 1462 // om_release() also sets the allocation state to Free because it 1463 // is called from other code paths. 1464 assert(take->is_free(), "invariant"); 1465 om_release(self, take, false); 1466 } 1467 self->om_free_provision += 1 + (self->om_free_provision / 2); 1468 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1469 continue; 1470 } 1471 1472 // 3: allocate a block of new ObjectMonitors 1473 // Both the local and global free lists are empty -- resort to malloc(). 1474 // In the current implementation ObjectMonitors are TSM - immortal. 1475 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1476 // each ObjectMonitor to start at the beginning of a cache line, 1477 // so we use align_up(). 1478 // A better solution would be to use C++ placement-new. 1479 // BEWARE: As it stands currently, we don't run the ctors! 1480 assert(_BLOCKSIZE > 1, "invariant"); 1481 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1482 PaddedObjectMonitor* temp; 1483 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1484 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); 1485 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1486 (void)memset((void *) temp, 0, neededsize); 1487 1488 // Format the block. 1489 // initialize the linked list, each monitor points to its next 1490 // forming the single linked free list, the very first monitor 1491 // will points to next block, which forms the block list. 1492 // The trick of using the 1st element in the block as g_block_list 1493 // linkage should be reconsidered. A better implementation would 1494 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1495 1496 for (int i = 1; i < _BLOCKSIZE; i++) { 1497 temp[i].set_next_om((ObjectMonitor*)&temp[i + 1]); 1498 assert(temp[i].is_free(), "invariant"); 1499 } 1500 1501 // terminate the last monitor as the end of list 1502 temp[_BLOCKSIZE - 1].set_next_om((ObjectMonitor*)NULL); 1503 1504 // Element [0] is reserved for global list linkage 1505 temp[0].set_object(CHAINMARKER); 1506 1507 // Consider carving out this thread's current request from the 1508 // block in hand. This avoids some lock traffic and redundant 1509 // list activity. 1510 1511 prepend_block_to_lists(temp); 1512 } 1513 } 1514 1515 // Place "m" on the caller's private per-thread om_free_list. 1516 // In practice there's no need to clamp or limit the number of 1517 // monitors on a thread's om_free_list as the only non-allocation time 1518 // we'll call om_release() is to return a monitor to the free list after 1519 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1520 // accumulate on a thread's free list. 1521 // 1522 // Key constraint: all ObjectMonitors on a thread's free list and the global 1523 // free list must have their object field set to null. This prevents the 1524 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1525 // -- from reclaiming them while we are trying to release them. 1526 1527 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1528 bool from_per_thread_alloc) { 1529 guarantee(m->header().value() == 0, "invariant"); 1530 guarantee(m->object() == NULL, "invariant"); 1531 NoSafepointVerifier nsv; 1532 1533 if ((m->is_busy() | m->_recursions) != 0) { 1534 stringStream ss; 1535 fatal("freeing in-use monitor: %s, recursions=" INTX_FORMAT, 1536 m->is_busy_to_string(&ss), m->_recursions); 1537 } 1538 m->set_allocation_state(ObjectMonitor::Free); 1539 // _next_om is used for both per-thread in-use and free lists so 1540 // we have to remove 'm' from the in-use list first (as needed). 1541 if (from_per_thread_alloc) { 1542 // Need to remove 'm' from om_in_use_list. 1543 ObjectMonitor* mid = NULL; 1544 ObjectMonitor* next = NULL; 1545 1546 // This list walk can race with another list walker or with async 1547 // deflation so we have to worry about an ObjectMonitor being 1548 // removed from this list while we are walking it. 1549 1550 // Lock the list head to avoid racing with another list walker 1551 // or with async deflation. 1552 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { 1553 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); 1554 } 1555 next = unmarked_next(mid); 1556 if (m == mid) { 1557 // First special case: 1558 // 'm' matches mid, is the list head and is locked. Switch the list 1559 // head to next which unlocks the list head, but leaves the extracted 1560 // mid locked. Release semantics not needed on this "unlock" since 1561 // memory is already consistent due to the get_list_head_locked() 1562 // above. 1563 Atomic::store(&self->om_in_use_list, next); 1564 } else if (m == next) { 1565 // Second special case: 1566 // 'm' matches next after the list head and we already have the list 1567 // head locked so set mid to what we are extracting: 1568 mid = next; 1569 // Lock mid to prevent races with a list walker or an async 1570 // deflater thread that's ahead of us. The locked list head 1571 // prevents races from behind us. 1572 om_lock(mid); 1573 // Update next to what follows mid (if anything): 1574 next = unmarked_next(mid); 1575 // Switch next after the list head to new next which unlocks the 1576 // list head, but leaves the extracted mid locked. Release semantics 1577 // not needed on this "unlock" since memory is already consistent 1578 // due to the get_list_head_locked() above. 1579 self->om_in_use_list->set_next_om(next); 1580 } else { 1581 // We have to search the list to find 'm'. 1582 guarantee(next != NULL, "thread=" INTPTR_FORMAT ": om_in_use_list=" INTPTR_FORMAT 1583 " is too short.", p2i(self), p2i(self->om_in_use_list)); 1584 // Our starting anchor is next after the list head which is the 1585 // last ObjectMonitor we checked: 1586 ObjectMonitor* anchor = next; 1587 // Lock anchor to prevent races with a list walker or an async 1588 // deflater thread that's ahead of us. The locked list head 1589 // prevents races from behind us. 1590 om_lock(anchor); 1591 om_unlock(mid); // Unlock the list head now that anchor is locked. 1592 while ((mid = unmarked_next(anchor)) != NULL) { 1593 if (m == mid) { 1594 // We found 'm' on the per-thread in-use list so extract it. 1595 // Update next to what follows mid (if anything): 1596 next = unmarked_next(mid); 1597 // Switch next after the anchor to new next which unlocks the 1598 // anchor, but leaves the extracted mid locked. Release semantics 1599 // not needed on this "unlock" since memory is already consistent 1600 // due to the om_unlock() above before entering the loop or the 1601 // om_unlock() below before looping again. 1602 anchor->set_next_om(next); 1603 break; 1604 } else { 1605 // Lock the next anchor to prevent races with a list walker 1606 // or an async deflater thread that's ahead of us. The locked 1607 // current anchor prevents races from behind us. 1608 om_lock(mid); 1609 // Unlock current anchor now that next anchor is locked: 1610 om_unlock(anchor); 1611 anchor = mid; // Advance to new anchor and try again. 1612 } 1613 } 1614 } 1615 1616 if (mid == NULL) { 1617 // Reached end of the list and didn't find 'm' so: 1618 fatal("thread=" INTPTR_FORMAT " must find m=" INTPTR_FORMAT "on om_in_use_list=" 1619 INTPTR_FORMAT, p2i(self), p2i(m), p2i(self->om_in_use_list)); 1620 } 1621 1622 // At this point mid is disconnected from the in-use list so 1623 // its lock no longer has any effects on the in-use list. 1624 Atomic::dec(&self->om_in_use_count); 1625 // Unlock mid, but leave the next value for any lagging list 1626 // walkers. It will get cleaned up when mid is prepended to 1627 // the thread's free list: 1628 om_unlock(mid); 1629 } 1630 1631 prepend_to_om_free_list(self, m); 1632 guarantee(m->is_free(), "invariant"); 1633 } 1634 1635 // Return ObjectMonitors on a moribund thread's free and in-use 1636 // lists to the appropriate global lists. The ObjectMonitors on the 1637 // per-thread in-use list may still be in use by other threads. 1638 // 1639 // We currently call om_flush() from Threads::remove() before the 1640 // thread has been excised from the thread list and is no longer a 1641 // mutator. This means that om_flush() cannot run concurrently with 1642 // a safepoint and interleave with deflate_idle_monitors(). In 1643 // particular, this ensures that the thread's in-use monitors are 1644 // scanned by a GC safepoint, either via Thread::oops_do() (before 1645 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1646 // om_flush() is called). 1647 // 1648 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1649 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1650 // run at the same time as om_flush() so we have to follow a careful 1651 // protocol to prevent list corruption. 1652 1653 void ObjectSynchronizer::om_flush(Thread* self) { 1654 // Process the per-thread in-use list first to be consistent. 1655 int in_use_count = 0; 1656 ObjectMonitor* in_use_list = NULL; 1657 ObjectMonitor* in_use_tail = NULL; 1658 NoSafepointVerifier nsv; 1659 1660 // This function can race with a list walker or with an async 1661 // deflater thread so we lock the list head to prevent confusion. 1662 // An async deflater thread checks to see if the target thread 1663 // is exiting, but if it has made it past that check before we 1664 // started exiting, then it is racing to get to the in-use list. 1665 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { 1666 // At this point, we have locked the in-use list head so a racing 1667 // thread cannot come in after us. However, a racing thread could 1668 // be ahead of us; we'll detect that and delay to let it finish. 1669 // 1670 // The thread is going away, however the ObjectMonitors on the 1671 // om_in_use_list may still be in-use by other threads. Link 1672 // them to in_use_tail, which will be linked into the global 1673 // in-use list (om_list_globals._in_use_list) below. 1674 // 1675 // Account for the in-use list head before the loop since it is 1676 // already locked (by this thread): 1677 in_use_tail = in_use_list; 1678 in_use_count++; 1679 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) { 1680 if (is_locked(cur_om)) { 1681 // cur_om is locked so there must be a racing walker or async 1682 // deflater thread ahead of us so we'll give it a chance to finish. 1683 while (is_locked(cur_om)) { 1684 os::naked_short_sleep(1); 1685 } 1686 // Refetch the possibly changed next field and try again. 1687 cur_om = unmarked_next(in_use_tail); 1688 continue; 1689 } 1690 if (cur_om->object() == NULL) { 1691 // cur_om was deflated and the object ref was cleared while it 1692 // was locked. We happened to see it just after it was unlocked 1693 // (and added to the free list). Refetch the possibly changed 1694 // next field and try again. 1695 cur_om = unmarked_next(in_use_tail); 1696 continue; 1697 } 1698 in_use_tail = cur_om; 1699 in_use_count++; 1700 cur_om = unmarked_next(cur_om); 1701 } 1702 guarantee(in_use_tail != NULL, "invariant"); 1703 int l_om_in_use_count = Atomic::load(&self->om_in_use_count); 1704 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't match: " 1705 "l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count); 1706 Atomic::store(&self->om_in_use_count, 0); 1707 OrderAccess::storestore(); // Make sure counter update is seen first. 1708 // Clear the in-use list head (which also unlocks it): 1709 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); 1710 om_unlock(in_use_list); 1711 } 1712 1713 int free_count = 0; 1714 ObjectMonitor* free_list = NULL; 1715 ObjectMonitor* free_tail = NULL; 1716 // This function can race with a list walker thread so we lock the 1717 // list head to prevent confusion. 1718 if ((free_list = get_list_head_locked(&self->om_free_list)) != NULL) { 1719 // At this point, we have locked the free list head so a racing 1720 // thread cannot come in after us. However, a racing thread could 1721 // be ahead of us; we'll detect that and delay to let it finish. 1722 // 1723 // The thread is going away. Set 'free_tail' to the last per-thread free 1724 // monitor which will be linked to om_list_globals._free_list below. 1725 // 1726 // Account for the free list head before the loop since it is 1727 // already locked (by this thread): 1728 free_tail = free_list; 1729 free_count++; 1730 for (ObjectMonitor* s = unmarked_next(free_list); s != NULL; s = unmarked_next(s)) { 1731 if (is_locked(s)) { 1732 // s is locked so there must be a racing walker thread ahead 1733 // of us so we'll give it a chance to finish. 1734 while (is_locked(s)) { 1735 os::naked_short_sleep(1); 1736 } 1737 } 1738 free_tail = s; 1739 free_count++; 1740 guarantee(s->object() == NULL, "invariant"); 1741 if (s->is_busy()) { 1742 stringStream ss; 1743 fatal("must be !is_busy: %s", s->is_busy_to_string(&ss)); 1744 } 1745 } 1746 guarantee(free_tail != NULL, "invariant"); 1747 int l_om_free_count = Atomic::load(&self->om_free_count); 1748 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " 1749 "l_om_free_count=%d, free_count=%d", l_om_free_count, free_count); 1750 Atomic::store(&self->om_free_count, 0); 1751 OrderAccess::storestore(); // Make sure counter update is seen first. 1752 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); 1753 om_unlock(free_list); 1754 } 1755 1756 if (free_tail != NULL) { 1757 prepend_list_to_global_free_list(free_list, free_tail, free_count); 1758 } 1759 1760 if (in_use_tail != NULL) { 1761 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); 1762 } 1763 1764 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1765 LogStreamHandle(Info, monitorinflation) lsh_info; 1766 LogStream* ls = NULL; 1767 if (log_is_enabled(Debug, monitorinflation)) { 1768 ls = &lsh_debug; 1769 } else if ((free_count != 0 || in_use_count != 0) && 1770 log_is_enabled(Info, monitorinflation)) { 1771 ls = &lsh_info; 1772 } 1773 if (ls != NULL) { 1774 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1775 ", in_use_count=%d" ", om_free_provision=%d", 1776 p2i(self), free_count, in_use_count, self->om_free_provision); 1777 } 1778 } 1779 1780 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1781 const oop obj, 1782 ObjectSynchronizer::InflateCause cause) { 1783 assert(event != NULL, "invariant"); 1784 assert(event->should_commit(), "invariant"); 1785 event->set_monitorClass(obj->klass()); 1786 event->set_address((uintptr_t)(void*)obj); 1787 event->set_cause((u1)cause); 1788 event->commit(); 1789 } 1790 1791 // Fast path code shared by multiple functions 1792 void ObjectSynchronizer::inflate_helper(oop obj) { 1793 markWord mark = obj->mark(); 1794 if (mark.has_monitor()) { 1795 ObjectMonitor* monitor = mark.monitor(); 1796 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor=" INTPTR_FORMAT " is invalid", p2i(monitor)); 1797 markWord dmw = monitor->header(); 1798 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1799 return; 1800 } 1801 (void)inflate(Thread::current(), obj, inflate_cause_vm_internal); 1802 } 1803 1804 ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object, 1805 const InflateCause cause) { 1806 // Inflate mutates the heap ... 1807 // Relaxing assertion for bug 6320749. 1808 assert(Universe::verify_in_progress() || 1809 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1810 1811 EventJavaMonitorInflate event; 1812 1813 for (;;) { 1814 const markWord mark = object->mark(); 1815 assert(!mark.has_bias_pattern(), "invariant"); 1816 1817 // The mark can be in one of the following states: 1818 // * Inflated - just return 1819 // * Stack-locked - coerce it to inflated 1820 // * INFLATING - busy wait for conversion to complete 1821 // * Neutral - aggressively inflate the object. 1822 // * BIASED - Illegal. We should never see this 1823 1824 // CASE: inflated 1825 if (mark.has_monitor()) { 1826 ObjectMonitor* inf = mark.monitor(); 1827 markWord dmw = inf->header(); 1828 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1829 assert(AsyncDeflateIdleMonitors || inf->object() == object, "invariant"); 1830 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1831 return inf; 1832 } 1833 1834 // CASE: inflation in progress - inflating over a stack-lock. 1835 // Some other thread is converting from stack-locked to inflated. 1836 // Only that thread can complete inflation -- other threads must wait. 1837 // The INFLATING value is transient. 1838 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1839 // We could always eliminate polling by parking the thread on some auxiliary list. 1840 if (mark == markWord::INFLATING()) { 1841 read_stable_mark(object); 1842 continue; 1843 } 1844 1845 // CASE: stack-locked 1846 // Could be stack-locked either by this thread or by some other thread. 1847 // 1848 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1849 // to install INFLATING into the mark word. We originally installed INFLATING, 1850 // allocated the objectmonitor, and then finally STed the address of the 1851 // objectmonitor into the mark. This was correct, but artificially lengthened 1852 // the interval in which INFLATED appeared in the mark, thus increasing 1853 // the odds of inflation contention. 1854 // 1855 // We now use per-thread private objectmonitor free lists. 1856 // These list are reprovisioned from the global free list outside the 1857 // critical INFLATING...ST interval. A thread can transfer 1858 // multiple objectmonitors en-mass from the global free list to its local free list. 1859 // This reduces coherency traffic and lock contention on the global free list. 1860 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1861 // before or after the CAS(INFLATING) operation. 1862 // See the comments in om_alloc(). 1863 1864 LogStreamHandle(Trace, monitorinflation) lsh; 1865 1866 if (mark.has_locker()) { 1867 ObjectMonitor* m = om_alloc(self); 1868 // Optimistically prepare the objectmonitor - anticipate successful CAS 1869 // We do this before the CAS in order to minimize the length of time 1870 // in which INFLATING appears in the mark. 1871 m->Recycle(); 1872 m->_Responsible = NULL; 1873 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1874 1875 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1876 if (cmp != mark) { 1877 // om_release() will reset the allocation state from New to Free. 1878 om_release(self, m, true); 1879 continue; // Interference -- just retry 1880 } 1881 1882 // We've successfully installed INFLATING (0) into the mark-word. 1883 // This is the only case where 0 will appear in a mark-word. 1884 // Only the singular thread that successfully swings the mark-word 1885 // to 0 can perform (or more precisely, complete) inflation. 1886 // 1887 // Why do we CAS a 0 into the mark-word instead of just CASing the 1888 // mark-word from the stack-locked value directly to the new inflated state? 1889 // Consider what happens when a thread unlocks a stack-locked object. 1890 // It attempts to use CAS to swing the displaced header value from the 1891 // on-stack BasicLock back into the object header. Recall also that the 1892 // header value (hash code, etc) can reside in (a) the object header, or 1893 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1894 // header in an ObjectMonitor. The inflate() routine must copy the header 1895 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1896 // the while preserving the hashCode stability invariants. If the owner 1897 // decides to release the lock while the value is 0, the unlock will fail 1898 // and control will eventually pass from slow_exit() to inflate. The owner 1899 // will then spin, waiting for the 0 value to disappear. Put another way, 1900 // the 0 causes the owner to stall if the owner happens to try to 1901 // drop the lock (restoring the header from the BasicLock to the object) 1902 // while inflation is in-progress. This protocol avoids races that might 1903 // would otherwise permit hashCode values to change or "flicker" for an object. 1904 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1905 // 0 serves as a "BUSY" inflate-in-progress indicator. 1906 1907 1908 // fetch the displaced mark from the owner's stack. 1909 // The owner can't die or unwind past the lock while our INFLATING 1910 // object is in the mark. Furthermore the owner can't complete 1911 // an unlock on the object, either. 1912 markWord dmw = mark.displaced_mark_helper(); 1913 // Catch if the object's header is not neutral (not locked and 1914 // not marked is what we care about here). 1915 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1916 1917 // Setup monitor fields to proper values -- prepare the monitor 1918 m->set_header(dmw); 1919 1920 // Optimization: if the mark.locker stack address is associated 1921 // with this thread we could simply set m->_owner = self. 1922 // Note that a thread can inflate an object 1923 // that it has stack-locked -- as might happen in wait() -- directly 1924 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1925 if (AsyncDeflateIdleMonitors) { 1926 m->set_owner_from(NULL, DEFLATER_MARKER, mark.locker()); 1927 } else { 1928 m->set_owner_from(NULL, mark.locker()); 1929 } 1930 m->set_object(object); 1931 // TODO-FIXME: assert BasicLock->dhw != 0. 1932 1933 // Must preserve store ordering. The monitor state must 1934 // be stable at the time of publishing the monitor address. 1935 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1936 // Release semantics so that above set_object() is seen first. 1937 object->release_set_mark(markWord::encode(m)); 1938 1939 // Once ObjectMonitor is configured and the object is associated 1940 // with the ObjectMonitor, it is safe to allow async deflation: 1941 assert(m->is_new(), "freshly allocated monitor must be new"); 1942 // Release semantics needed to keep allocation_state from floating up. 1943 m->release_set_allocation_state(ObjectMonitor::Old); 1944 1945 // Hopefully the performance counters are allocated on distinct cache lines 1946 // to avoid false sharing on MP systems ... 1947 OM_PERFDATA_OP(Inflations, inc()); 1948 if (log_is_enabled(Trace, monitorinflation)) { 1949 ResourceMark rm(self); 1950 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1951 INTPTR_FORMAT ", type='%s'", p2i(object), 1952 object->mark().value(), object->klass()->external_name()); 1953 } 1954 if (event.should_commit()) { 1955 post_monitor_inflate_event(&event, object, cause); 1956 } 1957 return m; 1958 } 1959 1960 // CASE: neutral 1961 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1962 // If we know we're inflating for entry it's better to inflate by swinging a 1963 // pre-locked ObjectMonitor pointer into the object header. A successful 1964 // CAS inflates the object *and* confers ownership to the inflating thread. 1965 // In the current implementation we use a 2-step mechanism where we CAS() 1966 // to inflate and then CAS() again to try to swing _owner from NULL to self. 1967 // An inflateTry() method that we could call from enter() would be useful. 1968 1969 // Catch if the object's header is not neutral (not locked and 1970 // not marked is what we care about here). 1971 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value()); 1972 ObjectMonitor* m = om_alloc(self); 1973 // prepare m for installation - set monitor to initial state 1974 m->Recycle(); 1975 m->set_header(mark); 1976 if (AsyncDeflateIdleMonitors) { 1977 // DEFLATER_MARKER is the only non-NULL value we should see here. 1978 m->try_set_owner_from(DEFLATER_MARKER, NULL); 1979 } 1980 m->set_object(object); 1981 m->_Responsible = NULL; 1982 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 1983 1984 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 1985 m->set_header(markWord::zero()); 1986 m->set_object(NULL); 1987 m->Recycle(); 1988 // om_release() will reset the allocation state from New to Free. 1989 om_release(self, m, true); 1990 m = NULL; 1991 continue; 1992 // interference - the markword changed - just retry. 1993 // The state-transitions are one-way, so there's no chance of 1994 // live-lock -- "Inflated" is an absorbing state. 1995 } 1996 1997 // Once the ObjectMonitor is configured and object is associated 1998 // with the ObjectMonitor, it is safe to allow async deflation: 1999 assert(m->is_new(), "freshly allocated monitor must be new"); 2000 // Release semantics are not needed to keep allocation_state from 2001 // floating up since cas_set_mark() takes care of it. 2002 m->set_allocation_state(ObjectMonitor::Old); 2003 2004 // Hopefully the performance counters are allocated on distinct 2005 // cache lines to avoid false sharing on MP systems ... 2006 OM_PERFDATA_OP(Inflations, inc()); 2007 if (log_is_enabled(Trace, monitorinflation)) { 2008 ResourceMark rm(self); 2009 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 2010 INTPTR_FORMAT ", type='%s'", p2i(object), 2011 object->mark().value(), object->klass()->external_name()); 2012 } 2013 if (event.should_commit()) { 2014 post_monitor_inflate_event(&event, object, cause); 2015 } 2016 return m; 2017 } 2018 } 2019 2020 2021 // We maintain a list of in-use monitors for each thread. 2022 // 2023 // For safepoint based deflation: 2024 // deflate_thread_local_monitors() scans a single thread's in-use list, while 2025 // deflate_idle_monitors() scans only a global list of in-use monitors which 2026 // is populated only as a thread dies (see om_flush()). 2027 // 2028 // These operations are called at all safepoints, immediately after mutators 2029 // are stopped, but before any objects have moved. Collectively they traverse 2030 // the population of in-use monitors, deflating where possible. The scavenged 2031 // monitors are returned to the global monitor free list. 2032 // 2033 // Beware that we scavenge at *every* stop-the-world point. Having a large 2034 // number of monitors in-use could negatively impact performance. We also want 2035 // to minimize the total # of monitors in circulation, as they incur a small 2036 // footprint penalty. 2037 // 2038 // Perversely, the heap size -- and thus the STW safepoint rate -- 2039 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 2040 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 2041 // This is an unfortunate aspect of this design. 2042 // 2043 // For async deflation: 2044 // If a special deflation request is made, then the safepoint based 2045 // deflation mechanism is used. Otherwise, an async deflation request 2046 // is registered with the ServiceThread and it is notified. 2047 2048 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) { 2049 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2050 2051 // The per-thread in-use lists are handled in 2052 // ParallelSPCleanupThreadClosure::do_thread(). 2053 2054 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { 2055 // Use the older mechanism for the global in-use list or if a 2056 // special deflation has been requested before the safepoint. 2057 ObjectSynchronizer::deflate_idle_monitors(counters); 2058 return; 2059 } 2060 2061 log_debug(monitorinflation)("requesting async deflation of idle monitors."); 2062 // Request deflation of idle monitors by the ServiceThread: 2063 set_is_async_deflation_requested(true); 2064 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 2065 ml.notify_all(); 2066 2067 if (log_is_enabled(Debug, monitorinflation)) { 2068 // exit_globals()'s call to audit_and_print_stats() is done 2069 // at the Info level and not at a safepoint. 2070 // For safepoint based deflation, audit_and_print_stats() is called 2071 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the 2072 // Debug level at a safepoint. 2073 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2074 } 2075 } 2076 2077 // Deflate a single monitor if not in-use 2078 // Return true if deflated, false if in-use 2079 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 2080 ObjectMonitor** free_head_p, 2081 ObjectMonitor** free_tail_p) { 2082 bool deflated; 2083 // Normal case ... The monitor is associated with obj. 2084 const markWord mark = obj->mark(); 2085 guarantee(mark == markWord::encode(mid), "should match: mark=" 2086 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 2087 markWord::encode(mid).value()); 2088 // Make sure that mark.monitor() and markWord::encode() agree: 2089 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 2090 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 2091 const markWord dmw = mid->header(); 2092 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 2093 2094 if (mid->is_busy()) { 2095 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2096 deflated = false; 2097 } else { 2098 // Deflate the monitor if it is no longer being used 2099 // It's idle - scavenge and return to the global free list 2100 // plain old deflation ... 2101 if (log_is_enabled(Trace, monitorinflation)) { 2102 ResourceMark rm; 2103 log_trace(monitorinflation)("deflate_monitor: " 2104 "object=" INTPTR_FORMAT ", mark=" 2105 INTPTR_FORMAT ", type='%s'", p2i(obj), 2106 mark.value(), obj->klass()->external_name()); 2107 } 2108 2109 // Restore the header back to obj 2110 // XXX - I have no rationale for this "release", but it's been here forever. 2111 obj->release_set_mark(dmw); 2112 if (AsyncDeflateIdleMonitors) { 2113 // clear() expects the owner field to be NULL. 2114 // DEFLATER_MARKER is the only non-NULL value we should see here. 2115 mid->try_set_owner_from(DEFLATER_MARKER, NULL); 2116 } 2117 mid->clear(); 2118 2119 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 2120 p2i(mid->object())); 2121 assert(mid->is_free(), "invariant"); 2122 2123 // Move the deflated ObjectMonitor to the working free list 2124 // defined by free_head_p and free_tail_p. 2125 if (*free_head_p == NULL) *free_head_p = mid; 2126 if (*free_tail_p != NULL) { 2127 // We append to the list so the caller can use mid->_next_om 2128 // to fix the linkages in its context. 2129 ObjectMonitor* prevtail = *free_tail_p; 2130 // Should have been cleaned up by the caller: 2131 // Note: Should not have to lock prevtail here since we're at a 2132 // safepoint and ObjectMonitors on the local free list should 2133 // not be accessed in parallel. 2134 #ifdef ASSERT 2135 ObjectMonitor* l_next_om = prevtail->next_om(); 2136 #endif 2137 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2138 prevtail->set_next_om(mid); 2139 } 2140 *free_tail_p = mid; 2141 // At this point, mid->_next_om still refers to its current 2142 // value and another ObjectMonitor's _next_om field still 2143 // refers to this ObjectMonitor. Those linkages have to be 2144 // cleaned up by the caller who has the complete context. 2145 deflated = true; 2146 } 2147 return deflated; 2148 } 2149 2150 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 2151 // Returns true if it was deflated and false otherwise. 2152 // 2153 // The async deflation protocol sets owner to DEFLATER_MARKER and 2154 // makes contentions negative as signals to contending threads that 2155 // an async deflation is in progress. There are a number of checks 2156 // as part of the protocol to make sure that the calling thread has 2157 // not lost the race to a contending thread. 2158 // 2159 // The ObjectMonitor has been successfully async deflated when: 2160 // (contentions < 0) 2161 // Contending threads that see that condition know to retry their operation. 2162 // 2163 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 2164 ObjectMonitor** free_head_p, 2165 ObjectMonitor** free_tail_p) { 2166 assert(AsyncDeflateIdleMonitors, "sanity check"); 2167 assert(Thread::current()->is_Java_thread(), "precondition"); 2168 // A newly allocated ObjectMonitor should not be seen here so we 2169 // avoid an endless inflate/deflate cycle. 2170 assert(mid->is_old(), "must be old: allocation_state=%d", 2171 (int) mid->allocation_state()); 2172 2173 if (mid->is_busy()) { 2174 // Easy checks are first - the ObjectMonitor is busy so no deflation. 2175 return false; 2176 } 2177 2178 // Set a NULL owner to DEFLATER_MARKER to force any contending thread 2179 // through the slow path. This is just the first part of the async 2180 // deflation dance. 2181 if (mid->try_set_owner_from(NULL, DEFLATER_MARKER) != NULL) { 2182 // The owner field is no longer NULL so we lost the race since the 2183 // ObjectMonitor is now busy. 2184 return false; 2185 } 2186 2187 if (mid->contentions() > 0 || mid->_waiters != 0) { 2188 // Another thread has raced to enter the ObjectMonitor after 2189 // mid->is_busy() above or has already entered and waited on 2190 // it which makes it busy so no deflation. Restore owner to 2191 // NULL if it is still DEFLATER_MARKER. 2192 if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) { 2193 // Deferred decrement for the JT EnterI() that cancelled the async deflation. 2194 mid->add_to_contentions(-1); 2195 } 2196 return false; 2197 } 2198 2199 // Make a zero contentions field negative to force any contending threads 2200 // to retry. This is the second part of the async deflation dance. 2201 if (Atomic::cmpxchg(&mid->_contentions, (jint)0, -max_jint) != 0) { 2202 // Contentions was no longer 0 so we lost the race since the 2203 // ObjectMonitor is now busy. Restore owner to NULL if it is 2204 // still DEFLATER_MARKER: 2205 if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) != DEFLATER_MARKER) { 2206 // Deferred decrement for the JT EnterI() that cancelled the async deflation. 2207 mid->add_to_contentions(-1); 2208 } 2209 return false; 2210 } 2211 2212 // Sanity checks for the races: 2213 guarantee(mid->owner_is_DEFLATER_MARKER(), "must be deflater marker"); 2214 guarantee(mid->contentions() < 0, "must be negative: contentions=%d", 2215 mid->contentions()); 2216 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 2217 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 2218 INTPTR_FORMAT, p2i(mid->_cxq)); 2219 guarantee(mid->_EntryList == NULL, 2220 "must be no entering threads: EntryList=" INTPTR_FORMAT, 2221 p2i(mid->_EntryList)); 2222 2223 const oop obj = (oop) mid->object(); 2224 if (log_is_enabled(Trace, monitorinflation)) { 2225 ResourceMark rm; 2226 log_trace(monitorinflation)("deflate_monitor_using_JT: " 2227 "object=" INTPTR_FORMAT ", mark=" 2228 INTPTR_FORMAT ", type='%s'", 2229 p2i(obj), obj->mark().value(), 2230 obj->klass()->external_name()); 2231 } 2232 2233 // Install the old mark word if nobody else has already done it. 2234 mid->install_displaced_markword_in_object(obj); 2235 mid->clear_common(); 2236 2237 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 2238 p2i(mid->object())); 2239 assert(mid->is_free(), "must be free: allocation_state=%d", 2240 (int)mid->allocation_state()); 2241 2242 // Move the deflated ObjectMonitor to the working free list 2243 // defined by free_head_p and free_tail_p. 2244 if (*free_head_p == NULL) { 2245 // First one on the list. 2246 *free_head_p = mid; 2247 } 2248 if (*free_tail_p != NULL) { 2249 // We append to the list so the caller can use mid->_next_om 2250 // to fix the linkages in its context. 2251 ObjectMonitor* prevtail = *free_tail_p; 2252 // prevtail should have been cleaned up by the caller: 2253 #ifdef ASSERT 2254 ObjectMonitor* l_next_om = unmarked_next(prevtail); 2255 #endif 2256 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2257 om_lock(prevtail); 2258 prevtail->set_next_om(mid); // prevtail now points to mid (and is unlocked) 2259 } 2260 *free_tail_p = mid; 2261 2262 // At this point, mid->_next_om still refers to its current 2263 // value and another ObjectMonitor's _next_om field still 2264 // refers to this ObjectMonitor. Those linkages have to be 2265 // cleaned up by the caller who has the complete context. 2266 2267 // We leave owner == DEFLATER_MARKER and contentions < 0 2268 // to force any racing threads to retry. 2269 return true; // Success, ObjectMonitor has been deflated. 2270 } 2271 2272 // Walk a given monitor list, and deflate idle monitors. 2273 // The given list could be a per-thread list or a global list. 2274 // 2275 // In the case of parallel processing of thread local monitor lists, 2276 // work is done by Threads::parallel_threads_do() which ensures that 2277 // each Java thread is processed by exactly one worker thread, and 2278 // thus avoid conflicts that would arise when worker threads would 2279 // process the same monitor lists concurrently. 2280 // 2281 // See also ParallelSPCleanupTask and 2282 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 2283 // Threads::parallel_java_threads_do() in thread.cpp. 2284 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 2285 int* count_p, 2286 ObjectMonitor** free_head_p, 2287 ObjectMonitor** free_tail_p) { 2288 ObjectMonitor* cur_mid_in_use = NULL; 2289 ObjectMonitor* mid = NULL; 2290 ObjectMonitor* next = NULL; 2291 int deflated_count = 0; 2292 2293 // This list walk executes at a safepoint and does not race with any 2294 // other list walkers. 2295 2296 for (mid = Atomic::load(list_p); mid != NULL; mid = next) { 2297 next = unmarked_next(mid); 2298 oop obj = (oop) mid->object(); 2299 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 2300 // Deflation succeeded and already updated free_head_p and 2301 // free_tail_p as needed. Finish the move to the local free list 2302 // by unlinking mid from the global or per-thread in-use list. 2303 if (cur_mid_in_use == NULL) { 2304 // mid is the list head so switch the list head to next: 2305 Atomic::store(list_p, next); 2306 } else { 2307 // Switch cur_mid_in_use's next field to next: 2308 cur_mid_in_use->set_next_om(next); 2309 } 2310 // At this point mid is disconnected from the in-use list. 2311 deflated_count++; 2312 Atomic::dec(count_p); 2313 // mid is current tail in the free_head_p list so NULL terminate it. 2314 // No release semantics needed since Atomic::dec() already provides it. 2315 mid->set_next_om(NULL); 2316 } else { 2317 cur_mid_in_use = mid; 2318 } 2319 } 2320 return deflated_count; 2321 } 2322 2323 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 2324 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 2325 // list could be a per-thread in-use list or the global in-use list. 2326 // If a safepoint has started, then we save state via saved_mid_in_use_p 2327 // and return to the caller to honor the safepoint. 2328 // 2329 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, 2330 int* count_p, 2331 ObjectMonitor** free_head_p, 2332 ObjectMonitor** free_tail_p, 2333 ObjectMonitor** saved_mid_in_use_p) { 2334 assert(AsyncDeflateIdleMonitors, "sanity check"); 2335 JavaThread* self = JavaThread::current(); 2336 2337 ObjectMonitor* cur_mid_in_use = NULL; 2338 ObjectMonitor* mid = NULL; 2339 ObjectMonitor* next = NULL; 2340 ObjectMonitor* next_next = NULL; 2341 int deflated_count = 0; 2342 NoSafepointVerifier nsv; 2343 2344 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go 2345 // protocol because om_release() can do list deletions in parallel; 2346 // this also prevents races with a list walker thread. We also 2347 // lock-next-next-as-we-go to prevent an om_flush() that is behind 2348 // this thread from passing us. 2349 if (*saved_mid_in_use_p == NULL) { 2350 // No saved state so start at the beginning. 2351 // Lock the list head so we can possibly deflate it: 2352 if ((mid = get_list_head_locked(list_p)) == NULL) { 2353 return 0; // The list is empty so nothing to deflate. 2354 } 2355 next = unmarked_next(mid); 2356 } else { 2357 // We're restarting after a safepoint so restore the necessary state 2358 // before we resume. 2359 cur_mid_in_use = *saved_mid_in_use_p; 2360 // Lock cur_mid_in_use so we can possibly update its 2361 // next field to extract a deflated ObjectMonitor. 2362 om_lock(cur_mid_in_use); 2363 mid = unmarked_next(cur_mid_in_use); 2364 if (mid == NULL) { 2365 om_unlock(cur_mid_in_use); 2366 *saved_mid_in_use_p = NULL; 2367 return 0; // The remainder is empty so nothing more to deflate. 2368 } 2369 // Lock mid so we can possibly deflate it: 2370 om_lock(mid); 2371 next = unmarked_next(mid); 2372 } 2373 2374 while (true) { 2375 // The current mid is locked at this point. If we have a 2376 // cur_mid_in_use, then it is also locked at this point. 2377 2378 if (next != NULL) { 2379 // We lock next so that an om_flush() thread that is behind us 2380 // cannot pass us when we unlock the current mid. 2381 om_lock(next); 2382 next_next = unmarked_next(next); 2383 } 2384 2385 // Only try to deflate if there is an associated Java object and if 2386 // mid is old (is not newly allocated and is not newly freed). 2387 if (mid->object() != NULL && mid->is_old() && 2388 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { 2389 // Deflation succeeded and already updated free_head_p and 2390 // free_tail_p as needed. Finish the move to the local free list 2391 // by unlinking mid from the global or per-thread in-use list. 2392 if (cur_mid_in_use == NULL) { 2393 // mid is the list head and it is locked. Switch the list head 2394 // to next which is also locked (if not NULL) and also leave 2395 // mid locked. Release semantics needed since not all code paths 2396 // in deflate_monitor_using_JT() ensure memory consistency. 2397 Atomic::release_store(list_p, next); 2398 } else { 2399 ObjectMonitor* locked_next = mark_om_ptr(next); 2400 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's 2401 // next field to locked_next and also leave mid locked. 2402 // Release semantics needed since not all code paths in 2403 // deflate_monitor_using_JT() ensure memory consistency. 2404 cur_mid_in_use->release_set_next_om(locked_next); 2405 } 2406 // At this point mid is disconnected from the in-use list so 2407 // its lock longer has any effects on in-use list. 2408 deflated_count++; 2409 Atomic::dec(count_p); 2410 // mid is current tail in the free_head_p list so NULL terminate 2411 // it (which also unlocks it). No release semantics needed since 2412 // Atomic::dec() already provides it. 2413 mid->set_next_om(NULL); 2414 2415 // All the list management is done so move on to the next one: 2416 mid = next; // mid keeps non-NULL next's locked state 2417 next = next_next; 2418 } else { 2419 // mid is considered in-use if it does not have an associated 2420 // Java object or mid is not old or deflation did not succeed. 2421 // A mid->is_new() node can be seen here when it is freshly 2422 // returned by om_alloc() (and skips the deflation code path). 2423 // A mid->is_old() node can be seen here when deflation failed. 2424 // A mid->is_free() node can be seen here when a fresh node from 2425 // om_alloc() is released by om_release() due to losing the race 2426 // in inflate(). 2427 2428 // All the list management is done so move on to the next one: 2429 if (cur_mid_in_use != NULL) { 2430 om_unlock(cur_mid_in_use); 2431 } 2432 // The next cur_mid_in_use keeps mid's lock state so 2433 // that it is stable for a possible next field change. It 2434 // cannot be modified by om_release() while it is locked. 2435 cur_mid_in_use = mid; 2436 mid = next; // mid keeps non-NULL next's locked state 2437 next = next_next; 2438 2439 if (SafepointMechanism::should_block(self) && 2440 // Acquire semantics are not needed on this list load since 2441 // it is not dependent on the following load which does have 2442 // acquire semantics. 2443 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { 2444 // If a safepoint has started and cur_mid_in_use is not the list 2445 // head and is old, then it is safe to use as saved state. Return 2446 // to the caller before blocking. 2447 *saved_mid_in_use_p = cur_mid_in_use; 2448 om_unlock(cur_mid_in_use); 2449 if (mid != NULL) { 2450 om_unlock(mid); 2451 } 2452 return deflated_count; 2453 } 2454 } 2455 if (mid == NULL) { 2456 if (cur_mid_in_use != NULL) { 2457 om_unlock(cur_mid_in_use); 2458 } 2459 break; // Reached end of the list so nothing more to deflate. 2460 } 2461 2462 // The current mid's next field is locked at this point. If we have 2463 // a cur_mid_in_use, then it is also locked at this point. 2464 } 2465 // We finished the list without a safepoint starting so there's 2466 // no need to save state. 2467 *saved_mid_in_use_p = NULL; 2468 return deflated_count; 2469 } 2470 2471 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2472 counters->n_in_use = 0; // currently associated with objects 2473 counters->n_in_circulation = 0; // extant 2474 counters->n_scavenged = 0; // reclaimed (global and per-thread) 2475 counters->per_thread_scavenged = 0; // per-thread scavenge total 2476 counters->per_thread_times = 0.0; // per-thread scavenge times 2477 } 2478 2479 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2480 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2481 2482 if (AsyncDeflateIdleMonitors) { 2483 // Nothing to do when global idle ObjectMonitors are deflated using 2484 // a JavaThread unless a special deflation has been requested. 2485 if (!is_special_deflation_requested()) { 2486 return; 2487 } 2488 } 2489 2490 bool deflated = false; 2491 2492 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2493 ObjectMonitor* free_tail_p = NULL; 2494 elapsedTimer timer; 2495 2496 if (log_is_enabled(Info, monitorinflation)) { 2497 timer.start(); 2498 } 2499 2500 // Note: the thread-local monitors lists get deflated in 2501 // a separate pass. See deflate_thread_local_monitors(). 2502 2503 // For moribund threads, scan om_list_globals._in_use_list 2504 int deflated_count = 0; 2505 // Acquire semantics not needed since we are at a safepoint. 2506 if (Atomic::load(&om_list_globals._in_use_list) != NULL) { 2507 // Update n_in_circulation before om_list_globals._in_use_count is 2508 // updated by deflation. 2509 Atomic::add(&counters->n_in_circulation, 2510 Atomic::load(&om_list_globals._in_use_count)); 2511 2512 deflated_count = deflate_monitor_list(&om_list_globals._in_use_list, 2513 &om_list_globals._in_use_count, 2514 &free_head_p, &free_tail_p); 2515 Atomic::add(&counters->n_in_use, Atomic::load(&om_list_globals._in_use_count)); 2516 } 2517 2518 if (free_head_p != NULL) { 2519 // Move the deflated ObjectMonitors back to the global free list. 2520 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2521 #ifdef ASSERT 2522 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2523 #endif 2524 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2525 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2526 Atomic::add(&counters->n_scavenged, deflated_count); 2527 } 2528 timer.stop(); 2529 2530 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2531 LogStreamHandle(Info, monitorinflation) lsh_info; 2532 LogStream* ls = NULL; 2533 if (log_is_enabled(Debug, monitorinflation)) { 2534 ls = &lsh_debug; 2535 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2536 ls = &lsh_info; 2537 } 2538 if (ls != NULL) { 2539 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2540 } 2541 } 2542 2543 class HandshakeForDeflation : public HandshakeClosure { 2544 public: 2545 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {} 2546 2547 void do_thread(Thread* thread) { 2548 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" 2549 INTPTR_FORMAT, p2i(thread)); 2550 } 2551 }; 2552 2553 void ObjectSynchronizer::deflate_idle_monitors_using_JT() { 2554 assert(AsyncDeflateIdleMonitors, "sanity check"); 2555 2556 // Deflate any global idle monitors. 2557 deflate_global_idle_monitors_using_JT(); 2558 2559 int count = 0; 2560 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2561 if (Atomic::load(&jt->om_in_use_count) > 0 && !jt->is_exiting()) { 2562 // This JavaThread is using ObjectMonitors so deflate any that 2563 // are idle unless this JavaThread is exiting; do not race with 2564 // ObjectSynchronizer::om_flush(). 2565 deflate_per_thread_idle_monitors_using_JT(jt); 2566 count++; 2567 } 2568 } 2569 if (count > 0) { 2570 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); 2571 } 2572 2573 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, " 2574 "global_free_count=%d, global_wait_count=%d", 2575 Atomic::load(&om_list_globals._population), 2576 Atomic::load(&om_list_globals._in_use_count), 2577 Atomic::load(&om_list_globals._free_count), 2578 Atomic::load(&om_list_globals._wait_count)); 2579 2580 // The ServiceThread's async deflation request has been processed. 2581 set_is_async_deflation_requested(false); 2582 2583 if (Atomic::load(&om_list_globals._wait_count) > 0) { 2584 // There are deflated ObjectMonitors waiting for a handshake 2585 // (or a safepoint) for safety. 2586 2587 ObjectMonitor* list = Atomic::load(&om_list_globals._wait_list); 2588 ADIM_guarantee(list != NULL, "om_list_globals._wait_list must not be NULL"); 2589 int count = Atomic::load(&om_list_globals._wait_count); 2590 Atomic::store(&om_list_globals._wait_count, 0); 2591 OrderAccess::storestore(); // Make sure counter update is seen first. 2592 Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL); 2593 2594 // Find the tail for prepend_list_to_common(). No need to mark 2595 // ObjectMonitors for this list walk since only the deflater 2596 // thread manages the wait list. 2597 int l_count = 0; 2598 ObjectMonitor* tail = NULL; 2599 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { 2600 tail = n; 2601 l_count++; 2602 } 2603 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count); 2604 2605 // Will execute a safepoint if !ThreadLocalHandshakes: 2606 HandshakeForDeflation hfd_hc; 2607 Handshake::execute(&hfd_hc); 2608 2609 prepend_list_to_common(list, tail, count, &om_list_globals._free_list, 2610 &om_list_globals._free_count); 2611 2612 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); 2613 } 2614 } 2615 2616 // Deflate global idle ObjectMonitors using a JavaThread. 2617 // 2618 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2619 assert(AsyncDeflateIdleMonitors, "sanity check"); 2620 assert(Thread::current()->is_Java_thread(), "precondition"); 2621 JavaThread* self = JavaThread::current(); 2622 2623 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2624 } 2625 2626 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread. 2627 // 2628 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) { 2629 assert(AsyncDeflateIdleMonitors, "sanity check"); 2630 assert(Thread::current()->is_Java_thread(), "precondition"); 2631 2632 deflate_common_idle_monitors_using_JT(false /* !is_global */, target); 2633 } 2634 2635 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2636 // 2637 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) { 2638 JavaThread* self = JavaThread::current(); 2639 2640 int deflated_count = 0; 2641 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors 2642 ObjectMonitor* free_tail_p = NULL; 2643 ObjectMonitor* saved_mid_in_use_p = NULL; 2644 elapsedTimer timer; 2645 2646 if (log_is_enabled(Info, monitorinflation)) { 2647 timer.start(); 2648 } 2649 2650 if (is_global) { 2651 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&om_list_globals._in_use_count))); 2652 } else { 2653 OM_PERFDATA_OP(MonExtant, inc(Atomic::load(&target->om_in_use_count))); 2654 } 2655 2656 do { 2657 if (saved_mid_in_use_p != NULL) { 2658 // We looped around because deflate_monitor_list_using_JT() 2659 // detected a pending safepoint. Honoring the safepoint is good, 2660 // but as long as is_special_deflation_requested() is supported, 2661 // we can't safely restart using saved_mid_in_use_p. That saved 2662 // ObjectMonitor could have been deflated by safepoint based 2663 // deflation and would no longer be on the in-use list where we 2664 // originally found it. 2665 saved_mid_in_use_p = NULL; 2666 } 2667 int local_deflated_count; 2668 if (is_global) { 2669 local_deflated_count = 2670 deflate_monitor_list_using_JT(&om_list_globals._in_use_list, 2671 &om_list_globals._in_use_count, 2672 &free_head_p, &free_tail_p, 2673 &saved_mid_in_use_p); 2674 } else { 2675 local_deflated_count = 2676 deflate_monitor_list_using_JT(&target->om_in_use_list, 2677 &target->om_in_use_count, &free_head_p, 2678 &free_tail_p, &saved_mid_in_use_p); 2679 } 2680 deflated_count += local_deflated_count; 2681 2682 if (free_head_p != NULL) { 2683 // Move the deflated ObjectMonitors to the global free list. 2684 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); 2685 // Note: The target thread can be doing an om_alloc() that 2686 // is trying to prepend an ObjectMonitor on its in-use list 2687 // at the same time that we have deflated the current in-use 2688 // list head and put it on the local free list. prepend_to_common() 2689 // will detect the race and retry which avoids list corruption, 2690 // but the next field in free_tail_p can flicker to marked 2691 // and then unmarked while prepend_to_common() is sorting it 2692 // all out. 2693 #ifdef ASSERT 2694 ObjectMonitor* l_next_om = unmarked_next(free_tail_p); 2695 #endif 2696 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2697 2698 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count); 2699 2700 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2701 } 2702 2703 if (saved_mid_in_use_p != NULL) { 2704 // deflate_monitor_list_using_JT() detected a safepoint starting. 2705 timer.stop(); 2706 { 2707 if (is_global) { 2708 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2709 } else { 2710 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); 2711 } 2712 assert(SafepointMechanism::should_block(self), "sanity check"); 2713 ThreadBlockInVM blocker(self); 2714 } 2715 // Prepare for another loop after the safepoint. 2716 free_head_p = NULL; 2717 free_tail_p = NULL; 2718 if (log_is_enabled(Info, monitorinflation)) { 2719 timer.start(); 2720 } 2721 } 2722 } while (saved_mid_in_use_p != NULL); 2723 timer.stop(); 2724 2725 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2726 LogStreamHandle(Info, monitorinflation) lsh_info; 2727 LogStream* ls = NULL; 2728 if (log_is_enabled(Debug, monitorinflation)) { 2729 ls = &lsh_debug; 2730 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2731 ls = &lsh_info; 2732 } 2733 if (ls != NULL) { 2734 if (is_global) { 2735 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2736 } else { 2737 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count); 2738 } 2739 } 2740 } 2741 2742 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2743 // Report the cumulative time for deflating each thread's idle 2744 // monitors. Note: if the work is split among more than one 2745 // worker thread, then the reported time will likely be more 2746 // than a beginning to end measurement of the phase. 2747 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 2748 2749 bool needs_special_deflation = is_special_deflation_requested(); 2750 if (AsyncDeflateIdleMonitors && !needs_special_deflation) { 2751 // Nothing to do when idle ObjectMonitors are deflated using 2752 // a JavaThread unless a special deflation has been requested. 2753 return; 2754 } 2755 2756 if (log_is_enabled(Debug, monitorinflation)) { 2757 // exit_globals()'s call to audit_and_print_stats() is done 2758 // at the Info level and not at a safepoint. 2759 // For async deflation, audit_and_print_stats() is called in 2760 // ObjectSynchronizer::do_safepoint_work() at the Debug level 2761 // at a safepoint. 2762 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2763 } else if (log_is_enabled(Info, monitorinflation)) { 2764 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " 2765 "global_free_count=%d, global_wait_count=%d", 2766 Atomic::load(&om_list_globals._population), 2767 Atomic::load(&om_list_globals._in_use_count), 2768 Atomic::load(&om_list_globals._free_count), 2769 Atomic::load(&om_list_globals._wait_count)); 2770 } 2771 2772 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 2773 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 2774 2775 GVars.stw_random = os::random(); 2776 GVars.stw_cycle++; 2777 2778 if (needs_special_deflation) { 2779 set_is_special_deflation_requested(false); // special deflation is done 2780 } 2781 } 2782 2783 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2784 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2785 2786 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) { 2787 // Nothing to do if a special deflation has NOT been requested. 2788 return; 2789 } 2790 2791 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2792 ObjectMonitor* free_tail_p = NULL; 2793 elapsedTimer timer; 2794 2795 if (log_is_enabled(Info, safepoint, cleanup) || 2796 log_is_enabled(Info, monitorinflation)) { 2797 timer.start(); 2798 } 2799 2800 // Update n_in_circulation before om_in_use_count is updated by deflation. 2801 Atomic::add(&counters->n_in_circulation, Atomic::load(&thread->om_in_use_count)); 2802 2803 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 2804 Atomic::add(&counters->n_in_use, Atomic::load(&thread->om_in_use_count)); 2805 2806 if (free_head_p != NULL) { 2807 // Move the deflated ObjectMonitors back to the global free list. 2808 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2809 #ifdef ASSERT 2810 ObjectMonitor* l_next_om = free_tail_p->next_om(); 2811 #endif 2812 assert(l_next_om == NULL, "must be NULL: _next_om=" INTPTR_FORMAT, p2i(l_next_om)); 2813 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2814 Atomic::add(&counters->n_scavenged, deflated_count); 2815 Atomic::add(&counters->per_thread_scavenged, deflated_count); 2816 } 2817 2818 timer.stop(); 2819 counters->per_thread_times += timer.seconds(); 2820 2821 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2822 LogStreamHandle(Info, monitorinflation) lsh_info; 2823 LogStream* ls = NULL; 2824 if (log_is_enabled(Debug, monitorinflation)) { 2825 ls = &lsh_debug; 2826 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2827 ls = &lsh_info; 2828 } 2829 if (ls != NULL) { 2830 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2831 } 2832 } 2833 2834 // Monitor cleanup on JavaThread::exit 2835 2836 // Iterate through monitor cache and attempt to release thread's monitors 2837 // Gives up on a particular monitor if an exception occurs, but continues 2838 // the overall iteration, swallowing the exception. 2839 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2840 private: 2841 TRAPS; 2842 2843 public: 2844 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2845 void do_monitor(ObjectMonitor* mid) { 2846 if (mid->owner() == THREAD) { 2847 (void)mid->complete_exit(CHECK); 2848 } 2849 } 2850 }; 2851 2852 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2853 // ignored. This is meant to be called during JNI thread detach which assumes 2854 // all remaining monitors are heavyweight. All exceptions are swallowed. 2855 // Scanning the extant monitor list can be time consuming. 2856 // A simple optimization is to add a per-thread flag that indicates a thread 2857 // called jni_monitorenter() during its lifetime. 2858 // 2859 // Instead of No_Savepoint_Verifier it might be cheaper to 2860 // use an idiom of the form: 2861 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2862 // <code that must not run at safepoint> 2863 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2864 // Since the tests are extremely cheap we could leave them enabled 2865 // for normal product builds. 2866 2867 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2868 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2869 NoSafepointVerifier nsv; 2870 ReleaseJavaMonitorsClosure rjmc(THREAD); 2871 ObjectSynchronizer::monitors_iterate(&rjmc); 2872 THREAD->clear_pending_exception(); 2873 } 2874 2875 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2876 switch (cause) { 2877 case inflate_cause_vm_internal: return "VM Internal"; 2878 case inflate_cause_monitor_enter: return "Monitor Enter"; 2879 case inflate_cause_wait: return "Monitor Wait"; 2880 case inflate_cause_notify: return "Monitor Notify"; 2881 case inflate_cause_hash_code: return "Monitor Hash Code"; 2882 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2883 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2884 default: 2885 ShouldNotReachHere(); 2886 } 2887 return "Unknown"; 2888 } 2889 2890 //------------------------------------------------------------------------------ 2891 // Debugging code 2892 2893 u_char* ObjectSynchronizer::get_gvars_addr() { 2894 return (u_char*)&GVars; 2895 } 2896 2897 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2898 return (u_char*)&GVars.hc_sequence; 2899 } 2900 2901 size_t ObjectSynchronizer::get_gvars_size() { 2902 return sizeof(SharedGlobals); 2903 } 2904 2905 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2906 return (u_char*)&GVars.stw_random; 2907 } 2908 2909 // This function can be called at a safepoint or it can be called when 2910 // we are trying to exit the VM. When we are trying to exit the VM, the 2911 // list walker functions can run in parallel with the other list 2912 // operations so spin-locking is used for safety. 2913 // 2914 // Calls to this function can be added in various places as a debugging 2915 // aid; pass 'true' for the 'on_exit' parameter to have in-use monitor 2916 // details logged at the Info level and 'false' for the 'on_exit' 2917 // parameter to have in-use monitor details logged at the Trace level. 2918 // deflate_monitor_list() no longer uses spin-locking so be careful 2919 // when adding audit_and_print_stats() calls at a safepoint. 2920 // 2921 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2922 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2923 2924 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2925 LogStreamHandle(Info, monitorinflation) lsh_info; 2926 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2927 LogStream* ls = NULL; 2928 if (log_is_enabled(Trace, monitorinflation)) { 2929 ls = &lsh_trace; 2930 } else if (log_is_enabled(Debug, monitorinflation)) { 2931 ls = &lsh_debug; 2932 } else if (log_is_enabled(Info, monitorinflation)) { 2933 ls = &lsh_info; 2934 } 2935 assert(ls != NULL, "sanity check"); 2936 2937 // Log counts for the global and per-thread monitor lists: 2938 int chk_om_population = log_monitor_list_counts(ls); 2939 int error_cnt = 0; 2940 2941 ls->print_cr("Checking global lists:"); 2942 2943 // Check om_list_globals._population: 2944 if (Atomic::load(&om_list_globals._population) == chk_om_population) { 2945 ls->print_cr("global_population=%d equals chk_om_population=%d", 2946 Atomic::load(&om_list_globals._population), chk_om_population); 2947 } else { 2948 // With fine grained locks on the monitor lists, it is possible for 2949 // log_monitor_list_counts() to return a value that doesn't match 2950 // om_list_globals._population. So far a higher value has been 2951 // seen in testing so something is being double counted by 2952 // log_monitor_list_counts(). 2953 ls->print_cr("WARNING: global_population=%d is not equal to " 2954 "chk_om_population=%d", 2955 Atomic::load(&om_list_globals._population), chk_om_population); 2956 } 2957 2958 // Check om_list_globals._in_use_list and om_list_globals._in_use_count: 2959 chk_global_in_use_list_and_count(ls, &error_cnt); 2960 2961 // Check om_list_globals._free_list and om_list_globals._free_count: 2962 chk_global_free_list_and_count(ls, &error_cnt); 2963 2964 // Check om_list_globals._wait_list and om_list_globals._wait_count: 2965 chk_global_wait_list_and_count(ls, &error_cnt); 2966 2967 ls->print_cr("Checking per-thread lists:"); 2968 2969 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2970 // Check om_in_use_list and om_in_use_count: 2971 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2972 2973 // Check om_free_list and om_free_count: 2974 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2975 } 2976 2977 if (error_cnt == 0) { 2978 ls->print_cr("No errors found in monitor list checks."); 2979 } else { 2980 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2981 } 2982 2983 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 2984 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 2985 // When exiting this log output is at the Info level. When called 2986 // at a safepoint, this log output is at the Trace level since 2987 // there can be a lot of it. 2988 log_in_use_monitor_details(ls); 2989 } 2990 2991 ls->flush(); 2992 2993 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 2994 } 2995 2996 // Check a free monitor entry; log any errors. 2997 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 2998 outputStream * out, int *error_cnt_p) { 2999 stringStream ss; 3000 if (n->is_busy()) { 3001 if (jt != NULL) { 3002 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3003 ": free per-thread monitor must not be busy: %s", p2i(jt), 3004 p2i(n), n->is_busy_to_string(&ss)); 3005 } else { 3006 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3007 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 3008 } 3009 *error_cnt_p = *error_cnt_p + 1; 3010 } 3011 if (n->header().value() != 0) { 3012 if (jt != NULL) { 3013 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3014 ": free per-thread monitor must have NULL _header " 3015 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 3016 n->header().value()); 3017 *error_cnt_p = *error_cnt_p + 1; 3018 } else if (!AsyncDeflateIdleMonitors) { 3019 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3020 "must have NULL _header field: _header=" INTPTR_FORMAT, 3021 p2i(n), n->header().value()); 3022 *error_cnt_p = *error_cnt_p + 1; 3023 } 3024 } 3025 if (n->object() != NULL) { 3026 if (jt != NULL) { 3027 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3028 ": free per-thread monitor must have NULL _object " 3029 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 3030 p2i(n->object())); 3031 } else { 3032 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3033 "must have NULL _object field: _object=" INTPTR_FORMAT, 3034 p2i(n), p2i(n->object())); 3035 } 3036 *error_cnt_p = *error_cnt_p + 1; 3037 } 3038 } 3039 3040 // Lock the next ObjectMonitor for traversal and unlock the current 3041 // ObjectMonitor. Returns the next ObjectMonitor if there is one. 3042 // Otherwise returns NULL (after unlocking the current ObjectMonitor). 3043 // This function is used by the various list walker functions to 3044 // safely walk a list without allowing an ObjectMonitor to be moved 3045 // to another list in the middle of a walk. 3046 static ObjectMonitor* lock_next_for_traversal(ObjectMonitor* cur) { 3047 assert(is_locked(cur), "cur=" INTPTR_FORMAT " must be locked", p2i(cur)); 3048 ObjectMonitor* next = unmarked_next(cur); 3049 if (next == NULL) { // Reached the end of the list. 3050 om_unlock(cur); 3051 return NULL; 3052 } 3053 om_lock(next); // Lock next before unlocking current to keep 3054 om_unlock(cur); // from being by-passed by another thread. 3055 return next; 3056 } 3057 3058 // Check the global free list and count; log the results of the checks. 3059 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 3060 int *error_cnt_p) { 3061 int chk_om_free_count = 0; 3062 ObjectMonitor* cur = NULL; 3063 if ((cur = get_list_head_locked(&om_list_globals._free_list)) != NULL) { 3064 // Marked the global free list head so process the list. 3065 while (true) { 3066 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3067 chk_om_free_count++; 3068 3069 cur = lock_next_for_traversal(cur); 3070 if (cur == NULL) { 3071 break; 3072 } 3073 } 3074 } 3075 int l_free_count = Atomic::load(&om_list_globals._free_count); 3076 if (l_free_count == chk_om_free_count) { 3077 out->print_cr("global_free_count=%d equals chk_om_free_count=%d", 3078 l_free_count, chk_om_free_count); 3079 } else { 3080 // With fine grained locks on om_list_globals._free_list, it 3081 // is possible for an ObjectMonitor to be prepended to 3082 // om_list_globals._free_list after we started calculating 3083 // chk_om_free_count so om_list_globals._free_count may not 3084 // match anymore. 3085 out->print_cr("WARNING: global_free_count=%d is not equal to " 3086 "chk_om_free_count=%d", l_free_count, chk_om_free_count); 3087 } 3088 } 3089 3090 // Check the global wait list and count; log the results of the checks. 3091 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, 3092 int *error_cnt_p) { 3093 int chk_om_wait_count = 0; 3094 ObjectMonitor* cur = NULL; 3095 if ((cur = get_list_head_locked(&om_list_globals._wait_list)) != NULL) { 3096 // Marked the global wait list head so process the list. 3097 while (true) { 3098 // Rules for om_list_globals._wait_list are the same as for 3099 // om_list_globals._free_list: 3100 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3101 chk_om_wait_count++; 3102 3103 cur = lock_next_for_traversal(cur); 3104 if (cur == NULL) { 3105 break; 3106 } 3107 } 3108 } 3109 if (Atomic::load(&om_list_globals._wait_count) == chk_om_wait_count) { 3110 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d", 3111 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3112 } else { 3113 out->print_cr("ERROR: global_wait_count=%d is not equal to " 3114 "chk_om_wait_count=%d", 3115 Atomic::load(&om_list_globals._wait_count), chk_om_wait_count); 3116 *error_cnt_p = *error_cnt_p + 1; 3117 } 3118 } 3119 3120 // Check the global in-use list and count; log the results of the checks. 3121 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 3122 int *error_cnt_p) { 3123 int chk_om_in_use_count = 0; 3124 ObjectMonitor* cur = NULL; 3125 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3126 // Marked the global in-use list head so process the list. 3127 while (true) { 3128 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); 3129 chk_om_in_use_count++; 3130 3131 cur = lock_next_for_traversal(cur); 3132 if (cur == NULL) { 3133 break; 3134 } 3135 } 3136 } 3137 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3138 if (l_in_use_count == chk_om_in_use_count) { 3139 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", 3140 l_in_use_count, chk_om_in_use_count); 3141 } else { 3142 // With fine grained locks on the monitor lists, it is possible for 3143 // an exiting JavaThread to put its in-use ObjectMonitors on the 3144 // global in-use list after chk_om_in_use_count is calculated above. 3145 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", 3146 l_in_use_count, chk_om_in_use_count); 3147 } 3148 } 3149 3150 // Check an in-use monitor entry; log any errors. 3151 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 3152 outputStream * out, int *error_cnt_p) { 3153 if (n->header().value() == 0) { 3154 if (jt != NULL) { 3155 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3156 ": in-use per-thread monitor must have non-NULL _header " 3157 "field.", p2i(jt), p2i(n)); 3158 } else { 3159 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3160 "must have non-NULL _header field.", p2i(n)); 3161 } 3162 *error_cnt_p = *error_cnt_p + 1; 3163 } 3164 if (n->object() == NULL) { 3165 if (jt != NULL) { 3166 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3167 ": in-use per-thread monitor must have non-NULL _object " 3168 "field.", p2i(jt), p2i(n)); 3169 } else { 3170 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3171 "must have non-NULL _object field.", p2i(n)); 3172 } 3173 *error_cnt_p = *error_cnt_p + 1; 3174 } 3175 const oop obj = (oop)n->object(); 3176 const markWord mark = obj->mark(); 3177 if (!mark.has_monitor()) { 3178 if (jt != NULL) { 3179 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3180 ": in-use per-thread monitor's object does not think " 3181 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 3182 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 3183 } else { 3184 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3185 "monitor's object does not think it has a monitor: obj=" 3186 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 3187 p2i(obj), mark.value()); 3188 } 3189 *error_cnt_p = *error_cnt_p + 1; 3190 } 3191 ObjectMonitor* const obj_mon = mark.monitor(); 3192 if (n != obj_mon) { 3193 if (jt != NULL) { 3194 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3195 ": in-use per-thread monitor's object does not refer " 3196 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 3197 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 3198 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3199 } else { 3200 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3201 "monitor's object does not refer to the same monitor: obj=" 3202 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 3203 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3204 } 3205 *error_cnt_p = *error_cnt_p + 1; 3206 } 3207 } 3208 3209 // Check the thread's free list and count; log the results of the checks. 3210 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 3211 outputStream * out, 3212 int *error_cnt_p) { 3213 int chk_om_free_count = 0; 3214 ObjectMonitor* cur = NULL; 3215 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { 3216 // Marked the per-thread free list head so process the list. 3217 while (true) { 3218 chk_free_entry(jt, cur, out, error_cnt_p); 3219 chk_om_free_count++; 3220 3221 cur = lock_next_for_traversal(cur); 3222 if (cur == NULL) { 3223 break; 3224 } 3225 } 3226 } 3227 int l_om_free_count = Atomic::load(&jt->om_free_count); 3228 if (l_om_free_count == chk_om_free_count) { 3229 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 3230 "chk_om_free_count=%d", p2i(jt), l_om_free_count, chk_om_free_count); 3231 } else { 3232 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 3233 "equal to chk_om_free_count=%d", p2i(jt), l_om_free_count, 3234 chk_om_free_count); 3235 *error_cnt_p = *error_cnt_p + 1; 3236 } 3237 } 3238 3239 // Check the thread's in-use list and count; log the results of the checks. 3240 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 3241 outputStream * out, 3242 int *error_cnt_p) { 3243 int chk_om_in_use_count = 0; 3244 ObjectMonitor* cur = NULL; 3245 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3246 // Marked the per-thread in-use list head so process the list. 3247 while (true) { 3248 chk_in_use_entry(jt, cur, out, error_cnt_p); 3249 chk_om_in_use_count++; 3250 3251 cur = lock_next_for_traversal(cur); 3252 if (cur == NULL) { 3253 break; 3254 } 3255 } 3256 } 3257 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3258 if (l_om_in_use_count == chk_om_in_use_count) { 3259 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 3260 "chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3261 chk_om_in_use_count); 3262 } else { 3263 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 3264 "equal to chk_om_in_use_count=%d", p2i(jt), l_om_in_use_count, 3265 chk_om_in_use_count); 3266 *error_cnt_p = *error_cnt_p + 1; 3267 } 3268 } 3269 3270 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 3271 // flags indicate why the entry is in-use, 'object' and 'object type' 3272 // indicate the associated object and its type. 3273 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { 3274 stringStream ss; 3275 if (Atomic::load(&om_list_globals._in_use_count) > 0) { 3276 out->print_cr("In-use global monitor info:"); 3277 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3278 out->print_cr("%18s %s %18s %18s", 3279 "monitor", "BHL", "object", "object type"); 3280 out->print_cr("================== === ================== =================="); 3281 ObjectMonitor* cur = NULL; 3282 if ((cur = get_list_head_locked(&om_list_globals._in_use_list)) != NULL) { 3283 // Marked the global in-use list head so process the list. 3284 while (true) { 3285 const oop obj = (oop) cur->object(); 3286 const markWord mark = cur->header(); 3287 ResourceMark rm; 3288 out->print(INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT " %s", p2i(cur), 3289 cur->is_busy() != 0, mark.hash() != 0, cur->owner() != NULL, 3290 p2i(obj), obj->klass()->external_name()); 3291 if (cur->is_busy() != 0) { 3292 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3293 ss.reset(); 3294 } 3295 out->cr(); 3296 3297 cur = lock_next_for_traversal(cur); 3298 if (cur == NULL) { 3299 break; 3300 } 3301 } 3302 } 3303 } 3304 3305 out->print_cr("In-use per-thread monitor info:"); 3306 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3307 out->print_cr("%18s %18s %s %18s %18s", 3308 "jt", "monitor", "BHL", "object", "object type"); 3309 out->print_cr("================== ================== === ================== =================="); 3310 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3311 ObjectMonitor* cur = NULL; 3312 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3313 // Marked the global in-use list head so process the list. 3314 while (true) { 3315 const oop obj = (oop) cur->object(); 3316 const markWord mark = cur->header(); 3317 ResourceMark rm; 3318 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d " INTPTR_FORMAT 3319 " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, 3320 mark.hash() != 0, cur->owner() != NULL, p2i(obj), 3321 obj->klass()->external_name()); 3322 if (cur->is_busy() != 0) { 3323 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3324 ss.reset(); 3325 } 3326 out->cr(); 3327 3328 cur = lock_next_for_traversal(cur); 3329 if (cur == NULL) { 3330 break; 3331 } 3332 } 3333 } 3334 } 3335 3336 out->flush(); 3337 } 3338 3339 // Log counts for the global and per-thread monitor lists and return 3340 // the population count. 3341 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 3342 int pop_count = 0; 3343 out->print_cr("%18s %10s %10s %10s %10s", 3344 "Global Lists:", "InUse", "Free", "Wait", "Total"); 3345 out->print_cr("================== ========== ========== ========== =========="); 3346 int l_in_use_count = Atomic::load(&om_list_globals._in_use_count); 3347 int l_free_count = Atomic::load(&om_list_globals._free_count); 3348 int l_wait_count = Atomic::load(&om_list_globals._wait_count); 3349 out->print_cr("%18s %10d %10d %10d %10d", "", l_in_use_count, 3350 l_free_count, l_wait_count, 3351 Atomic::load(&om_list_globals._population)); 3352 pop_count += l_in_use_count + l_free_count + l_wait_count; 3353 3354 out->print_cr("%18s %10s %10s %10s", 3355 "Per-Thread Lists:", "InUse", "Free", "Provision"); 3356 out->print_cr("================== ========== ========== =========="); 3357 3358 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3359 int l_om_in_use_count = Atomic::load(&jt->om_in_use_count); 3360 int l_om_free_count = Atomic::load(&jt->om_free_count); 3361 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 3362 l_om_in_use_count, l_om_free_count, jt->om_free_provision); 3363 pop_count += l_om_in_use_count + l_om_free_count; 3364 } 3365 return pop_count; 3366 } 3367 3368 #ifndef PRODUCT 3369 3370 // Check if monitor belongs to the monitor cache 3371 // The list is grow-only so it's *relatively* safe to traverse 3372 // the list of extant blocks without taking a lock. 3373 3374 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 3375 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 3376 while (block != NULL) { 3377 assert(block->object() == CHAINMARKER, "must be a block header"); 3378 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 3379 address mon = (address)monitor; 3380 address blk = (address)block; 3381 size_t diff = mon - blk; 3382 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 3383 return 1; 3384 } 3385 // unmarked_next() is not needed with g_block_list (no locking 3386 // used with block linkage _next_om fields). 3387 block = (PaddedObjectMonitor*)block->next_om(); 3388 } 3389 return 0; 3390 } 3391 3392 #endif