1 /* 2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/vmSymbols.hpp" 27 #include "logging/log.hpp" 28 #include "logging/logStream.hpp" 29 #include "jfr/jfrEvents.hpp" 30 #include "memory/allocation.inline.hpp" 31 #include "memory/metaspaceShared.hpp" 32 #include "memory/padded.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/markWord.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/atomic.hpp" 38 #include "runtime/biasedLocking.hpp" 39 #include "runtime/handles.inline.hpp" 40 #include "runtime/handshake.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "runtime/objectMonitor.hpp" 44 #include "runtime/objectMonitor.inline.hpp" 45 #include "runtime/osThread.hpp" 46 #include "runtime/safepointMechanism.inline.hpp" 47 #include "runtime/safepointVerifiers.hpp" 48 #include "runtime/sharedRuntime.hpp" 49 #include "runtime/stubRoutines.hpp" 50 #include "runtime/synchronizer.hpp" 51 #include "runtime/thread.inline.hpp" 52 #include "runtime/timer.hpp" 53 #include "runtime/vframe.hpp" 54 #include "runtime/vmThread.hpp" 55 #include "utilities/align.hpp" 56 #include "utilities/dtrace.hpp" 57 #include "utilities/events.hpp" 58 #include "utilities/preserveException.hpp" 59 60 // The "core" versions of monitor enter and exit reside in this file. 61 // The interpreter and compilers contain specialized transliterated 62 // variants of the enter-exit fast-path operations. See i486.ad fast_lock(), 63 // for instance. If you make changes here, make sure to modify the 64 // interpreter, and both C1 and C2 fast-path inline locking code emission. 65 // 66 // ----------------------------------------------------------------------------- 67 68 #ifdef DTRACE_ENABLED 69 70 // Only bother with this argument setup if dtrace is available 71 // TODO-FIXME: probes should not fire when caller is _blocked. assert() accordingly. 72 73 #define DTRACE_MONITOR_PROBE_COMMON(obj, thread) \ 74 char* bytes = NULL; \ 75 int len = 0; \ 76 jlong jtid = SharedRuntime::get_java_tid(thread); \ 77 Symbol* klassname = ((oop)(obj))->klass()->name(); \ 78 if (klassname != NULL) { \ 79 bytes = (char*)klassname->bytes(); \ 80 len = klassname->utf8_length(); \ 81 } 82 83 #define DTRACE_MONITOR_WAIT_PROBE(monitor, obj, thread, millis) \ 84 { \ 85 if (DTraceMonitorProbes) { \ 86 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 87 HOTSPOT_MONITOR_WAIT(jtid, \ 88 (uintptr_t)(monitor), bytes, len, (millis)); \ 89 } \ 90 } 91 92 #define HOTSPOT_MONITOR_PROBE_notify HOTSPOT_MONITOR_NOTIFY 93 #define HOTSPOT_MONITOR_PROBE_notifyAll HOTSPOT_MONITOR_NOTIFYALL 94 #define HOTSPOT_MONITOR_PROBE_waited HOTSPOT_MONITOR_WAITED 95 96 #define DTRACE_MONITOR_PROBE(probe, monitor, obj, thread) \ 97 { \ 98 if (DTraceMonitorProbes) { \ 99 DTRACE_MONITOR_PROBE_COMMON(obj, thread); \ 100 HOTSPOT_MONITOR_PROBE_##probe(jtid, /* probe = waited */ \ 101 (uintptr_t)(monitor), bytes, len); \ 102 } \ 103 } 104 105 #else // ndef DTRACE_ENABLED 106 107 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon) {;} 108 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon) {;} 109 110 #endif // ndef DTRACE_ENABLED 111 112 // This exists only as a workaround of dtrace bug 6254741 113 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) { 114 DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr); 115 return 0; 116 } 117 118 #define NINFLATIONLOCKS 256 119 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS]; 120 121 // global list of blocks of monitors 122 PaddedObjectMonitor* ObjectSynchronizer::g_block_list = NULL; 123 bool volatile ObjectSynchronizer::_is_async_deflation_requested = false; 124 bool volatile ObjectSynchronizer::_is_special_deflation_requested = false; 125 jlong ObjectSynchronizer::_last_async_deflation_time_ns = 0; 126 127 struct ListGlobals { 128 char _pad_prefix[OM_CACHE_LINE_SIZE]; 129 // These are highly shared list related variables. 130 // To avoid false-sharing they need to be the sole occupants of a cache line. 131 132 // Global ObjectMonitor free list. Newly allocated and deflated 133 // ObjectMonitors are prepended here. 134 ObjectMonitor* free_list; 135 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 136 137 // Global ObjectMonitor in-use list. When a JavaThread is exiting, 138 // ObjectMonitors on its per-thread in-use list are prepended here. 139 ObjectMonitor* in_use_list; 140 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 141 142 // Global ObjectMonitor wait list. If HandshakeAfterDeflateIdleMonitors 143 // is true, deflated ObjectMonitors wait on this list until after a 144 // handshake or a safepoint for platforms that don't support handshakes. 145 // After the handshake or safepoint, the deflated ObjectMonitors are 146 // prepended to free_list. 147 ObjectMonitor* wait_list; 148 DEFINE_PAD_MINUS_SIZE(3, OM_CACHE_LINE_SIZE, sizeof(ObjectMonitor*)); 149 150 int free_count; // # on free_list 151 DEFINE_PAD_MINUS_SIZE(4, OM_CACHE_LINE_SIZE, sizeof(int)); 152 153 int in_use_count; // # on in_use_list 154 DEFINE_PAD_MINUS_SIZE(5, OM_CACHE_LINE_SIZE, sizeof(int)); 155 156 int population; // # Extant -- in circulation 157 DEFINE_PAD_MINUS_SIZE(6, OM_CACHE_LINE_SIZE, sizeof(int)); 158 159 int wait_count; // # on wait_list 160 DEFINE_PAD_MINUS_SIZE(7, OM_CACHE_LINE_SIZE, sizeof(int)); 161 }; 162 static ListGlobals LVars; 163 164 #define CHAINMARKER (cast_to_oop<intptr_t>(-1)) 165 166 167 // =====================> Spinlock functions 168 169 // ObjectMonitors are not lockable outside of this file. We use spinlocks 170 // implemented using a bit in the _next_om field instead of the heavier 171 // weight locking mechanisms for faster list management. 172 173 #define OM_LOCK_BIT 0x1 174 175 // Return true if the ObjectMonitor is locked. 176 // Otherwise returns false. 177 static bool is_locked(ObjectMonitor* om) { 178 return ((intptr_t)Atomic::load(&om->_next_om) & OM_LOCK_BIT) == OM_LOCK_BIT; 179 } 180 181 // Mark an ObjectMonitor* with OM_LOCK_BIT and return it. 182 // Note: the om parameter may or may not have been marked originally. 183 static ObjectMonitor* mark_om_ptr(ObjectMonitor* om) { 184 return (ObjectMonitor*)((intptr_t)om | OM_LOCK_BIT); 185 } 186 187 // Try to lock an ObjectMonitor. Returns true if locking was successful. 188 // Otherwise returns false. 189 static bool try_om_lock(ObjectMonitor* om) { 190 // Get current next field without any OM_LOCK_BIT value. 191 ObjectMonitor* next = (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT); 192 if (Atomic::cmpxchg(&om->_next_om, next, mark_om_ptr(next)) != next) { 193 return false; // Cannot lock the ObjectMonitor. 194 } 195 return true; 196 } 197 198 // Lock an ObjectMonitor. 199 static void om_lock(ObjectMonitor* om) { 200 while (true) { 201 if (try_om_lock(om)) { 202 return; 203 } 204 } 205 } 206 207 // Unlock an ObjectMonitor. 208 static void om_unlock(ObjectMonitor* om) { 209 ObjectMonitor* next = Atomic::load(&om->_next_om); 210 guarantee(((intptr_t)next & OM_LOCK_BIT) == OM_LOCK_BIT, "next=" INTPTR_FORMAT 211 " must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT); 212 213 next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT. 214 Atomic::store(&om->_next_om, next); 215 } 216 217 // Get the list head after locking it. Returns the list head or NULL 218 // if the list is empty. 219 static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) { 220 while (true) { 221 ObjectMonitor* mid = Atomic::load(list_p); 222 if (mid == NULL) { 223 return NULL; // The list is empty. 224 } 225 if (try_om_lock(mid)) { 226 if (Atomic::load(list_p) != mid) { 227 // The list head changed so we have to retry. 228 om_unlock(mid); 229 continue; 230 } 231 return mid; 232 } 233 } 234 } 235 236 // Return the unmarked next field in an ObjectMonitor. Note: the next 237 // field may or may not have been marked with OM_LOCK_BIT originally. 238 static ObjectMonitor* unmarked_next(ObjectMonitor* om) { 239 return (ObjectMonitor*)((intptr_t)Atomic::load(&om->_next_om) & ~OM_LOCK_BIT); 240 } 241 242 #undef OM_LOCK_BIT 243 244 245 // =====================> List Management functions 246 247 // Set the next field in an ObjectMonitor to the specified value. 248 static void set_next(ObjectMonitor* om, ObjectMonitor* value) { 249 Atomic::store(&om->_next_om, value); 250 } 251 252 // Prepend a list of ObjectMonitors to the specified *list_p. 'tail' is 253 // the last ObjectMonitor in the list and there are 'count' on the list. 254 // Also updates the specified *count_p. 255 static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail, 256 int count, ObjectMonitor** list_p, 257 int* count_p) { 258 while (true) { 259 ObjectMonitor* cur = Atomic::load(list_p); 260 // Prepend list to *list_p. 261 if (!try_om_lock(tail)) { 262 continue; // failed to lock tail so try it all again 263 } 264 set_next(tail, cur); // tail now points to cur (and unlocks tail) 265 if (cur == NULL) { 266 // No potential race with takers or other prependers since 267 // *list_p is empty. 268 if (Atomic::cmpxchg(list_p, cur, list) == cur) { 269 // Successfully switched *list_p to the list value. 270 Atomic::add(count_p, count); 271 break; 272 } 273 // Implied else: try it all again 274 } else { 275 if (!try_om_lock(cur)) { 276 continue; // failed to lock cur so try it all again 277 } 278 // We locked cur so try to switch *list_p to the list value. 279 if (Atomic::cmpxchg(list_p, cur, list) != cur) { 280 // The list head has changed so unlock cur and try again: 281 om_unlock(cur); 282 continue; 283 } 284 Atomic::add(count_p, count); 285 om_unlock(cur); 286 break; 287 } 288 } 289 } 290 291 // Prepend a newly allocated block of ObjectMonitors to g_block_list and 292 // LVars.free_list. Also updates LVars.population and LVars.free_count. 293 void ObjectSynchronizer::prepend_block_to_lists(PaddedObjectMonitor* new_blk) { 294 // First we handle g_block_list: 295 while (true) { 296 PaddedObjectMonitor* cur = Atomic::load(&g_block_list); 297 // Prepend new_blk to g_block_list. The first ObjectMonitor in 298 // a block is reserved for use as linkage to the next block. 299 new_blk[0]._next_om = cur; 300 if (Atomic::cmpxchg(&g_block_list, cur, new_blk) == cur) { 301 // Successfully switched g_block_list to the new_blk value. 302 Atomic::add(&LVars.population, _BLOCKSIZE - 1); 303 break; 304 } 305 // Implied else: try it all again 306 } 307 308 // Second we handle LVars.free_list: 309 prepend_list_to_common(new_blk + 1, &new_blk[_BLOCKSIZE - 1], _BLOCKSIZE - 1, 310 &LVars.free_list, &LVars.free_count); 311 } 312 313 // Prepend a list of ObjectMonitors to LVars.free_list. 'tail' is the last 314 // ObjectMonitor in the list and there are 'count' on the list. Also 315 // updates LVars.free_count. 316 static void prepend_list_to_global_free_list(ObjectMonitor* list, 317 ObjectMonitor* tail, int count) { 318 prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count); 319 } 320 321 // Prepend a list of ObjectMonitors to LVars.wait_list. 'tail' is the last 322 // ObjectMonitor in the list and there are 'count' on the list. Also 323 // updates LVars.wait_count. 324 static void prepend_list_to_global_wait_list(ObjectMonitor* list, 325 ObjectMonitor* tail, int count) { 326 assert(HandshakeAfterDeflateIdleMonitors, "sanity check"); 327 prepend_list_to_common(list, tail, count, &LVars.wait_list, &LVars.wait_count); 328 } 329 330 // Prepend a list of ObjectMonitors to LVars.in_use_list. 'tail' is the last 331 // ObjectMonitor in the list and there are 'count' on the list. Also 332 // updates LVars.in_use_list. 333 static void prepend_list_to_global_in_use_list(ObjectMonitor* list, 334 ObjectMonitor* tail, int count) { 335 prepend_list_to_common(list, tail, count, &LVars.in_use_list, &LVars.in_use_count); 336 } 337 338 // Prepend an ObjectMonitor to the specified list. Also updates 339 // the specified counter. 340 static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p, 341 int* count_p) { 342 while (true) { 343 om_lock(m); // Lock m so we can safely update its next field. 344 ObjectMonitor* cur = NULL; 345 // Lock the list head to guard against A-B-A race: 346 if ((cur = get_list_head_locked(list_p)) != NULL) { 347 // List head is now locked so we can safely switch it. 348 set_next(m, cur); // m now points to cur (and unlocks m) 349 Atomic::store(list_p, m); // Switch list head to unlocked m. 350 om_unlock(cur); 351 break; 352 } 353 // The list is empty so try to set the list head. 354 assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur)); 355 set_next(m, cur); // m now points to NULL (and unlocks m) 356 if (Atomic::cmpxchg(list_p, cur, m) == cur) { 357 // List head is now unlocked m. 358 break; 359 } 360 // Implied else: try it all again 361 } 362 Atomic::inc(count_p); 363 } 364 365 // Prepend an ObjectMonitor to a per-thread om_free_list. 366 // Also updates the per-thread om_free_count. 367 static void prepend_to_om_free_list(Thread* self, ObjectMonitor* m) { 368 prepend_to_common(m, &self->om_free_list, &self->om_free_count); 369 } 370 371 // Prepend an ObjectMonitor to a per-thread om_in_use_list. 372 // Also updates the per-thread om_in_use_count. 373 static void prepend_to_om_in_use_list(Thread* self, ObjectMonitor* m) { 374 prepend_to_common(m, &self->om_in_use_list, &self->om_in_use_count); 375 } 376 377 // Take an ObjectMonitor from the start of the specified list. Also 378 // decrements the specified counter. Returns NULL if none are available. 379 static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p, 380 int* count_p) { 381 ObjectMonitor* take = NULL; 382 // Lock the list head to guard against A-B-A race: 383 if ((take = get_list_head_locked(list_p)) == NULL) { 384 return NULL; // None are available. 385 } 386 ObjectMonitor* next = unmarked_next(take); 387 // Switch locked list head to next (which unlocks the list head, but 388 // leaves take locked): 389 Atomic::store(list_p, next); 390 Atomic::dec(count_p); 391 // Unlock take, but leave the next value for any lagging list 392 // walkers. It will get cleaned up when take is prepended to 393 // the in-use list: 394 om_unlock(take); 395 return take; 396 } 397 398 // Take an ObjectMonitor from the start of the LVars.free_list. Also 399 // updates LVars.free_count. Returns NULL if none are available. 400 static ObjectMonitor* take_from_start_of_global_free_list() { 401 return take_from_start_of_common(&LVars.free_list, &LVars.free_count); 402 } 403 404 // Take an ObjectMonitor from the start of a per-thread free-list. 405 // Also updates om_free_count. Returns NULL if none are available. 406 static ObjectMonitor* take_from_start_of_om_free_list(Thread* self) { 407 return take_from_start_of_common(&self->om_free_list, &self->om_free_count); 408 } 409 410 411 // =====================> Quick functions 412 413 // The quick_* forms are special fast-path variants used to improve 414 // performance. In the simplest case, a "quick_*" implementation could 415 // simply return false, in which case the caller will perform the necessary 416 // state transitions and call the slow-path form. 417 // The fast-path is designed to handle frequently arising cases in an efficient 418 // manner and is just a degenerate "optimistic" variant of the slow-path. 419 // returns true -- to indicate the call was satisfied. 420 // returns false -- to indicate the call needs the services of the slow-path. 421 // A no-loitering ordinance is in effect for code in the quick_* family 422 // operators: safepoints or indefinite blocking (blocking that might span a 423 // safepoint) are forbidden. Generally the thread_state() is _in_Java upon 424 // entry. 425 // 426 // Consider: An interesting optimization is to have the JIT recognize the 427 // following common idiom: 428 // synchronized (someobj) { .... ; notify(); } 429 // That is, we find a notify() or notifyAll() call that immediately precedes 430 // the monitorexit operation. In that case the JIT could fuse the operations 431 // into a single notifyAndExit() runtime primitive. 432 433 bool ObjectSynchronizer::quick_notify(oopDesc* obj, Thread* self, bool all) { 434 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 435 assert(self->is_Java_thread(), "invariant"); 436 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 437 NoSafepointVerifier nsv; 438 if (obj == NULL) return false; // slow-path for invalid obj 439 const markWord mark = obj->mark(); 440 441 if (mark.has_locker() && self->is_lock_owned((address)mark.locker())) { 442 // Degenerate notify 443 // stack-locked by caller so by definition the implied waitset is empty. 444 return true; 445 } 446 447 if (mark.has_monitor()) { 448 ObjectMonitor* const mon = mark.monitor(); 449 assert(mon->object() == obj, "invariant"); 450 if (mon->owner() != self) return false; // slow-path for IMS exception 451 452 if (mon->first_waiter() != NULL) { 453 // We have one or more waiters. Since this is an inflated monitor 454 // that we own, we can transfer one or more threads from the waitset 455 // to the entrylist here and now, avoiding the slow-path. 456 if (all) { 457 DTRACE_MONITOR_PROBE(notifyAll, mon, obj, self); 458 } else { 459 DTRACE_MONITOR_PROBE(notify, mon, obj, self); 460 } 461 int free_count = 0; 462 do { 463 mon->INotify(self); 464 ++free_count; 465 } while (mon->first_waiter() != NULL && all); 466 OM_PERFDATA_OP(Notifications, inc(free_count)); 467 } 468 return true; 469 } 470 471 // biased locking and any other IMS exception states take the slow-path 472 return false; 473 } 474 475 476 // The LockNode emitted directly at the synchronization site would have 477 // been too big if it were to have included support for the cases of inflated 478 // recursive enter and exit, so they go here instead. 479 // Note that we can't safely call AsyncPrintJavaStack() from within 480 // quick_enter() as our thread state remains _in_Java. 481 482 bool ObjectSynchronizer::quick_enter(oop obj, Thread* self, 483 BasicLock * lock) { 484 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 485 assert(self->is_Java_thread(), "invariant"); 486 assert(((JavaThread *) self)->thread_state() == _thread_in_Java, "invariant"); 487 NoSafepointVerifier nsv; 488 if (obj == NULL) return false; // Need to throw NPE 489 490 while (true) { 491 const markWord mark = obj->mark(); 492 493 if (mark.has_monitor()) { 494 ObjectMonitorHandle omh; 495 if (!omh.save_om_ptr(obj, mark)) { 496 // Lost a race with async deflation so try again. 497 assert(AsyncDeflateIdleMonitors, "sanity check"); 498 continue; 499 } 500 ObjectMonitor* const m = omh.om_ptr(); 501 assert(m->object() == obj, "invariant"); 502 Thread* const owner = (Thread *) m->_owner; 503 504 // Lock contention and Transactional Lock Elision (TLE) diagnostics 505 // and observability 506 // Case: light contention possibly amenable to TLE 507 // Case: TLE inimical operations such as nested/recursive synchronization 508 509 if (owner == self) { 510 m->_recursions++; 511 return true; 512 } 513 514 // This Java Monitor is inflated so obj's header will never be 515 // displaced to this thread's BasicLock. Make the displaced header 516 // non-NULL so this BasicLock is not seen as recursive nor as 517 // being locked. We do this unconditionally so that this thread's 518 // BasicLock cannot be mis-interpreted by any stack walkers. For 519 // performance reasons, stack walkers generally first check for 520 // Biased Locking in the object's header, the second check is for 521 // stack-locking in the object's header, the third check is for 522 // recursive stack-locking in the displaced header in the BasicLock, 523 // and last are the inflated Java Monitor (ObjectMonitor) checks. 524 lock->set_displaced_header(markWord::unused_mark()); 525 526 if (owner == NULL && m->try_set_owner_from(self, NULL) == NULL) { 527 assert(m->_recursions == 0, "invariant"); 528 return true; 529 } 530 531 if (AsyncDeflateIdleMonitors && 532 m->try_set_owner_from(self, DEFLATER_MARKER) == DEFLATER_MARKER) { 533 // The deflation protocol finished the first part (setting owner), 534 // but it failed the second part (making ref_count negative) and 535 // bailed. Or the ObjectMonitor was async deflated and reused. 536 // Acquired the monitor. 537 assert(m->_recursions == 0, "invariant"); 538 return true; 539 } 540 } 541 break; 542 } 543 544 // Note that we could inflate in quick_enter. 545 // This is likely a useful optimization 546 // Critically, in quick_enter() we must not: 547 // -- perform bias revocation, or 548 // -- block indefinitely, or 549 // -- reach a safepoint 550 551 return false; // revert to slow-path 552 } 553 554 // ----------------------------------------------------------------------------- 555 // Monitor Enter/Exit 556 // The interpreter and compiler assembly code tries to lock using the fast path 557 // of this algorithm. Make sure to update that code if the following function is 558 // changed. The implementation is extremely sensitive to race condition. Be careful. 559 560 void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, TRAPS) { 561 if (UseBiasedLocking) { 562 if (!SafepointSynchronize::is_at_safepoint()) { 563 BiasedLocking::revoke(obj, THREAD); 564 } else { 565 BiasedLocking::revoke_at_safepoint(obj); 566 } 567 } 568 569 markWord mark = obj->mark(); 570 assert(!mark.has_bias_pattern(), "should not see bias pattern here"); 571 572 if (mark.is_neutral()) { 573 // Anticipate successful CAS -- the ST of the displaced mark must 574 // be visible <= the ST performed by the CAS. 575 lock->set_displaced_header(mark); 576 if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) { 577 return; 578 } 579 // Fall through to inflate() ... 580 } else if (mark.has_locker() && 581 THREAD->is_lock_owned((address)mark.locker())) { 582 assert(lock != mark.locker(), "must not re-lock the same lock"); 583 assert(lock != (BasicLock*)obj->mark().value(), "don't relock with same BasicLock"); 584 lock->set_displaced_header(markWord::from_pointer(NULL)); 585 return; 586 } 587 588 // The object header will never be displaced to this lock, 589 // so it does not matter what the value is, except that it 590 // must be non-zero to avoid looking like a re-entrant lock, 591 // and must not look locked either. 592 lock->set_displaced_header(markWord::unused_mark()); 593 ObjectMonitorHandle omh; 594 inflate(&omh, THREAD, obj(), inflate_cause_monitor_enter); 595 omh.om_ptr()->enter(THREAD); 596 } 597 598 void ObjectSynchronizer::exit(oop object, BasicLock* lock, TRAPS) { 599 markWord mark = object->mark(); 600 // We cannot check for Biased Locking if we are racing an inflation. 601 assert(mark == markWord::INFLATING() || 602 !mark.has_bias_pattern(), "should not see bias pattern here"); 603 604 markWord dhw = lock->displaced_header(); 605 if (dhw.value() == 0) { 606 // If the displaced header is NULL, then this exit matches up with 607 // a recursive enter. No real work to do here except for diagnostics. 608 #ifndef PRODUCT 609 if (mark != markWord::INFLATING()) { 610 // Only do diagnostics if we are not racing an inflation. Simply 611 // exiting a recursive enter of a Java Monitor that is being 612 // inflated is safe; see the has_monitor() comment below. 613 assert(!mark.is_neutral(), "invariant"); 614 assert(!mark.has_locker() || 615 THREAD->is_lock_owned((address)mark.locker()), "invariant"); 616 if (mark.has_monitor()) { 617 // The BasicLock's displaced_header is marked as a recursive 618 // enter and we have an inflated Java Monitor (ObjectMonitor). 619 // This is a special case where the Java Monitor was inflated 620 // after this thread entered the stack-lock recursively. When a 621 // Java Monitor is inflated, we cannot safely walk the Java 622 // Monitor owner's stack and update the BasicLocks because a 623 // Java Monitor can be asynchronously inflated by a thread that 624 // does not own the Java Monitor. 625 ObjectMonitor* m = mark.monitor(); 626 assert(((oop)(m->object()))->mark() == mark, "invariant"); 627 assert(m->is_entered(THREAD), "invariant"); 628 } 629 } 630 #endif 631 return; 632 } 633 634 if (mark == markWord::from_pointer(lock)) { 635 // If the object is stack-locked by the current thread, try to 636 // swing the displaced header from the BasicLock back to the mark. 637 assert(dhw.is_neutral(), "invariant"); 638 if (object->cas_set_mark(dhw, mark) == mark) { 639 return; 640 } 641 } 642 643 // We have to take the slow-path of possible inflation and then exit. 644 ObjectMonitorHandle omh; 645 inflate(&omh, THREAD, object, inflate_cause_vm_internal); 646 omh.om_ptr()->exit(true, THREAD); 647 } 648 649 // ----------------------------------------------------------------------------- 650 // Class Loader support to workaround deadlocks on the class loader lock objects 651 // Also used by GC 652 // complete_exit()/reenter() are used to wait on a nested lock 653 // i.e. to give up an outer lock completely and then re-enter 654 // Used when holding nested locks - lock acquisition order: lock1 then lock2 655 // 1) complete_exit lock1 - saving recursion count 656 // 2) wait on lock2 657 // 3) when notified on lock2, unlock lock2 658 // 4) reenter lock1 with original recursion count 659 // 5) lock lock2 660 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 661 intx ObjectSynchronizer::complete_exit(Handle obj, TRAPS) { 662 if (UseBiasedLocking) { 663 BiasedLocking::revoke(obj, THREAD); 664 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 665 } 666 667 ObjectMonitorHandle omh; 668 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 669 intptr_t ret_code = omh.om_ptr()->complete_exit(THREAD); 670 return ret_code; 671 } 672 673 // NOTE: must use heavy weight monitor to handle complete_exit/reenter() 674 void ObjectSynchronizer::reenter(Handle obj, intx recursions, TRAPS) { 675 if (UseBiasedLocking) { 676 BiasedLocking::revoke(obj, THREAD); 677 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 678 } 679 680 ObjectMonitorHandle omh; 681 inflate(&omh, THREAD, obj(), inflate_cause_vm_internal); 682 omh.om_ptr()->reenter(recursions, THREAD); 683 } 684 // ----------------------------------------------------------------------------- 685 // JNI locks on java objects 686 // NOTE: must use heavy weight monitor to handle jni monitor enter 687 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) { 688 // the current locking is from JNI instead of Java code 689 if (UseBiasedLocking) { 690 BiasedLocking::revoke(obj, THREAD); 691 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 692 } 693 THREAD->set_current_pending_monitor_is_from_java(false); 694 ObjectMonitorHandle omh; 695 inflate(&omh, THREAD, obj(), inflate_cause_jni_enter); 696 omh.om_ptr()->enter(THREAD); 697 THREAD->set_current_pending_monitor_is_from_java(true); 698 } 699 700 // NOTE: must use heavy weight monitor to handle jni monitor exit 701 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) { 702 if (UseBiasedLocking) { 703 Handle h_obj(THREAD, obj); 704 BiasedLocking::revoke(h_obj, THREAD); 705 obj = h_obj(); 706 } 707 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 708 709 ObjectMonitorHandle omh; 710 inflate(&omh, THREAD, obj, inflate_cause_jni_exit); 711 ObjectMonitor* monitor = omh.om_ptr(); 712 // If this thread has locked the object, exit the monitor. We 713 // intentionally do not use CHECK here because we must exit the 714 // monitor even if an exception is pending. 715 if (monitor->check_owner(THREAD)) { 716 monitor->exit(true, THREAD); 717 } 718 } 719 720 // ----------------------------------------------------------------------------- 721 // Internal VM locks on java objects 722 // standard constructor, allows locking failures 723 ObjectLocker::ObjectLocker(Handle obj, Thread* thread, bool do_lock) { 724 _dolock = do_lock; 725 _thread = thread; 726 _thread->check_for_valid_safepoint_state(); 727 _obj = obj; 728 729 if (_dolock) { 730 ObjectSynchronizer::enter(_obj, &_lock, _thread); 731 } 732 } 733 734 ObjectLocker::~ObjectLocker() { 735 if (_dolock) { 736 ObjectSynchronizer::exit(_obj(), &_lock, _thread); 737 } 738 } 739 740 741 // ----------------------------------------------------------------------------- 742 // Wait/Notify/NotifyAll 743 // NOTE: must use heavy weight monitor to handle wait() 744 int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) { 745 if (UseBiasedLocking) { 746 BiasedLocking::revoke(obj, THREAD); 747 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 748 } 749 if (millis < 0) { 750 THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 751 } 752 ObjectMonitorHandle omh; 753 inflate(&omh, THREAD, obj(), inflate_cause_wait); 754 ObjectMonitor* monitor = omh.om_ptr(); 755 756 DTRACE_MONITOR_WAIT_PROBE(monitor, obj(), THREAD, millis); 757 monitor->wait(millis, true, THREAD); 758 759 // This dummy call is in place to get around dtrace bug 6254741. Once 760 // that's fixed we can uncomment the following line, remove the call 761 // and change this function back into a "void" func. 762 // DTRACE_MONITOR_PROBE(waited, monitor, obj(), THREAD); 763 int ret_code = dtrace_waited_probe(monitor, obj, THREAD); 764 return ret_code; 765 } 766 767 void ObjectSynchronizer::wait_uninterruptibly(Handle obj, jlong millis, TRAPS) { 768 if (UseBiasedLocking) { 769 BiasedLocking::revoke(obj, THREAD); 770 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 771 } 772 if (millis < 0) { 773 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative"); 774 } 775 ObjectMonitorHandle omh; 776 inflate(&omh, THREAD, obj(), inflate_cause_wait); 777 omh.om_ptr()->wait(millis, false, THREAD); 778 } 779 780 void ObjectSynchronizer::notify(Handle obj, TRAPS) { 781 if (UseBiasedLocking) { 782 BiasedLocking::revoke(obj, THREAD); 783 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 784 } 785 786 markWord mark = obj->mark(); 787 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 788 return; 789 } 790 ObjectMonitorHandle omh; 791 inflate(&omh, THREAD, obj(), inflate_cause_notify); 792 omh.om_ptr()->notify(THREAD); 793 } 794 795 // NOTE: see comment of notify() 796 void ObjectSynchronizer::notifyall(Handle obj, TRAPS) { 797 if (UseBiasedLocking) { 798 BiasedLocking::revoke(obj, THREAD); 799 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 800 } 801 802 markWord mark = obj->mark(); 803 if (mark.has_locker() && THREAD->is_lock_owned((address)mark.locker())) { 804 return; 805 } 806 ObjectMonitorHandle omh; 807 inflate(&omh, THREAD, obj(), inflate_cause_notify); 808 omh.om_ptr()->notifyAll(THREAD); 809 } 810 811 // ----------------------------------------------------------------------------- 812 // Hash Code handling 813 // 814 // Performance concern: 815 // OrderAccess::storestore() calls release() which at one time stored 0 816 // into the global volatile OrderAccess::dummy variable. This store was 817 // unnecessary for correctness. Many threads storing into a common location 818 // causes considerable cache migration or "sloshing" on large SMP systems. 819 // As such, I avoided using OrderAccess::storestore(). In some cases 820 // OrderAccess::fence() -- which incurs local latency on the executing 821 // processor -- is a better choice as it scales on SMP systems. 822 // 823 // See http://blogs.oracle.com/dave/entry/biased_locking_in_hotspot for 824 // a discussion of coherency costs. Note that all our current reference 825 // platforms provide strong ST-ST order, so the issue is moot on IA32, 826 // x64, and SPARC. 827 // 828 // As a general policy we use "volatile" to control compiler-based reordering 829 // and explicit fences (barriers) to control for architectural reordering 830 // performed by the CPU(s) or platform. 831 832 struct SharedGlobals { 833 char _pad_prefix[OM_CACHE_LINE_SIZE]; 834 // These are highly shared mostly-read variables. 835 // To avoid false-sharing they need to be the sole occupants of a cache line. 836 volatile int stw_random; 837 volatile int stw_cycle; 838 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(volatile int) * 2); 839 // Hot RW variable -- Sequester to avoid false-sharing 840 volatile int hc_sequence; 841 DEFINE_PAD_MINUS_SIZE(2, OM_CACHE_LINE_SIZE, sizeof(volatile int)); 842 }; 843 844 static SharedGlobals GVars; 845 static int _forceMonitorScavenge = 0; // Scavenge required and pending 846 847 static markWord read_stable_mark(oop obj) { 848 markWord mark = obj->mark(); 849 if (!mark.is_being_inflated()) { 850 return mark; // normal fast-path return 851 } 852 853 int its = 0; 854 for (;;) { 855 markWord mark = obj->mark(); 856 if (!mark.is_being_inflated()) { 857 return mark; // normal fast-path return 858 } 859 860 // The object is being inflated by some other thread. 861 // The caller of read_stable_mark() must wait for inflation to complete. 862 // Avoid live-lock 863 // TODO: consider calling SafepointSynchronize::do_call_back() while 864 // spinning to see if there's a safepoint pending. If so, immediately 865 // yielding or blocking would be appropriate. Avoid spinning while 866 // there is a safepoint pending. 867 // TODO: add inflation contention performance counters. 868 // TODO: restrict the aggregate number of spinners. 869 870 ++its; 871 if (its > 10000 || !os::is_MP()) { 872 if (its & 1) { 873 os::naked_yield(); 874 } else { 875 // Note that the following code attenuates the livelock problem but is not 876 // a complete remedy. A more complete solution would require that the inflating 877 // thread hold the associated inflation lock. The following code simply restricts 878 // the number of spinners to at most one. We'll have N-2 threads blocked 879 // on the inflationlock, 1 thread holding the inflation lock and using 880 // a yield/park strategy, and 1 thread in the midst of inflation. 881 // A more refined approach would be to change the encoding of INFLATING 882 // to allow encapsulation of a native thread pointer. Threads waiting for 883 // inflation to complete would use CAS to push themselves onto a singly linked 884 // list rooted at the markword. Once enqueued, they'd loop, checking a per-thread flag 885 // and calling park(). When inflation was complete the thread that accomplished inflation 886 // would detach the list and set the markword to inflated with a single CAS and 887 // then for each thread on the list, set the flag and unpark() the thread. 888 // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease 889 // wakes at most one thread whereas we need to wake the entire list. 890 int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1); 891 int YieldThenBlock = 0; 892 assert(ix >= 0 && ix < NINFLATIONLOCKS, "invariant"); 893 assert((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant"); 894 Thread::muxAcquire(gInflationLocks + ix, "gInflationLock"); 895 while (obj->mark() == markWord::INFLATING()) { 896 // Beware: NakedYield() is advisory and has almost no effect on some platforms 897 // so we periodically call self->_ParkEvent->park(1). 898 // We use a mixed spin/yield/block mechanism. 899 if ((YieldThenBlock++) >= 16) { 900 Thread::current()->_ParkEvent->park(1); 901 } else { 902 os::naked_yield(); 903 } 904 } 905 Thread::muxRelease(gInflationLocks + ix); 906 } 907 } else { 908 SpinPause(); // SMP-polite spinning 909 } 910 } 911 } 912 913 // hashCode() generation : 914 // 915 // Possibilities: 916 // * MD5Digest of {obj,stw_random} 917 // * CRC32 of {obj,stw_random} or any linear-feedback shift register function. 918 // * A DES- or AES-style SBox[] mechanism 919 // * One of the Phi-based schemes, such as: 920 // 2654435761 = 2^32 * Phi (golden ratio) 921 // HashCodeValue = ((uintptr_t(obj) >> 3) * 2654435761) ^ GVars.stw_random ; 922 // * A variation of Marsaglia's shift-xor RNG scheme. 923 // * (obj ^ stw_random) is appealing, but can result 924 // in undesirable regularity in the hashCode values of adjacent objects 925 // (objects allocated back-to-back, in particular). This could potentially 926 // result in hashtable collisions and reduced hashtable efficiency. 927 // There are simple ways to "diffuse" the middle address bits over the 928 // generated hashCode values: 929 930 static inline intptr_t get_next_hash(Thread* self, oop obj) { 931 intptr_t value = 0; 932 if (hashCode == 0) { 933 // This form uses global Park-Miller RNG. 934 // On MP system we'll have lots of RW access to a global, so the 935 // mechanism induces lots of coherency traffic. 936 value = os::random(); 937 } else if (hashCode == 1) { 938 // This variation has the property of being stable (idempotent) 939 // between STW operations. This can be useful in some of the 1-0 940 // synchronization schemes. 941 intptr_t addr_bits = cast_from_oop<intptr_t>(obj) >> 3; 942 value = addr_bits ^ (addr_bits >> 5) ^ GVars.stw_random; 943 } else if (hashCode == 2) { 944 value = 1; // for sensitivity testing 945 } else if (hashCode == 3) { 946 value = ++GVars.hc_sequence; 947 } else if (hashCode == 4) { 948 value = cast_from_oop<intptr_t>(obj); 949 } else { 950 // Marsaglia's xor-shift scheme with thread-specific state 951 // This is probably the best overall implementation -- we'll 952 // likely make this the default in future releases. 953 unsigned t = self->_hashStateX; 954 t ^= (t << 11); 955 self->_hashStateX = self->_hashStateY; 956 self->_hashStateY = self->_hashStateZ; 957 self->_hashStateZ = self->_hashStateW; 958 unsigned v = self->_hashStateW; 959 v = (v ^ (v >> 19)) ^ (t ^ (t >> 8)); 960 self->_hashStateW = v; 961 value = v; 962 } 963 964 value &= markWord::hash_mask; 965 if (value == 0) value = 0xBAD; 966 assert(value != markWord::no_hash, "invariant"); 967 return value; 968 } 969 970 intptr_t ObjectSynchronizer::FastHashCode(Thread* self, oop obj) { 971 if (UseBiasedLocking) { 972 // NOTE: many places throughout the JVM do not expect a safepoint 973 // to be taken here, in particular most operations on perm gen 974 // objects. However, we only ever bias Java instances and all of 975 // the call sites of identity_hash that might revoke biases have 976 // been checked to make sure they can handle a safepoint. The 977 // added check of the bias pattern is to avoid useless calls to 978 // thread-local storage. 979 if (obj->mark().has_bias_pattern()) { 980 // Handle for oop obj in case of STW safepoint 981 Handle hobj(self, obj); 982 // Relaxing assertion for bug 6320749. 983 assert(Universe::verify_in_progress() || 984 !SafepointSynchronize::is_at_safepoint(), 985 "biases should not be seen by VM thread here"); 986 BiasedLocking::revoke(hobj, JavaThread::current()); 987 obj = hobj(); 988 assert(!obj->mark().has_bias_pattern(), "biases should be revoked by now"); 989 } 990 } 991 992 // hashCode() is a heap mutator ... 993 // Relaxing assertion for bug 6320749. 994 assert(Universe::verify_in_progress() || DumpSharedSpaces || 995 !SafepointSynchronize::is_at_safepoint(), "invariant"); 996 assert(Universe::verify_in_progress() || DumpSharedSpaces || 997 self->is_Java_thread() , "invariant"); 998 assert(Universe::verify_in_progress() || DumpSharedSpaces || 999 ((JavaThread *)self)->thread_state() != _thread_blocked, "invariant"); 1000 1001 while (true) { 1002 ObjectMonitor* monitor = NULL; 1003 markWord temp, test; 1004 intptr_t hash; 1005 markWord mark = read_stable_mark(obj); 1006 1007 // object should remain ineligible for biased locking 1008 assert(!mark.has_bias_pattern(), "invariant"); 1009 1010 if (mark.is_neutral()) { // if this is a normal header 1011 hash = mark.hash(); 1012 if (hash != 0) { // if it has a hash, just return it 1013 return hash; 1014 } 1015 hash = get_next_hash(self, obj); // get a new hash 1016 temp = mark.copy_set_hash(hash); // merge the hash into header 1017 // try to install the hash 1018 test = obj->cas_set_mark(temp, mark); 1019 if (test == mark) { // if the hash was installed, return it 1020 return hash; 1021 } 1022 // Failed to install the hash. It could be that another thread 1023 // installed the hash just before our attempt or inflation has 1024 // occurred or... so we fall thru to inflate the monitor for 1025 // stability and then install the hash. 1026 } else if (mark.has_monitor()) { 1027 ObjectMonitorHandle omh; 1028 if (!omh.save_om_ptr(obj, mark)) { 1029 // Lost a race with async deflation so try again. 1030 assert(AsyncDeflateIdleMonitors, "sanity check"); 1031 continue; 1032 } 1033 monitor = omh.om_ptr(); 1034 temp = monitor->header(); 1035 // Allow for a lagging install_displaced_markword_in_object() to 1036 // have marked the ObjectMonitor's header/dmw field. 1037 assert(temp.is_neutral() || (AsyncDeflateIdleMonitors && temp.is_marked()), 1038 "invariant: header=" INTPTR_FORMAT, temp.value()); 1039 hash = temp.hash(); 1040 if (hash != 0) { // if it has a hash, just return it 1041 return hash; 1042 } 1043 // Fall thru so we only have one place that installs the hash in 1044 // the ObjectMonitor. 1045 } else if (self->is_lock_owned((address)mark.locker())) { 1046 // This is a stack lock owned by the calling thread so fetch the 1047 // displaced markWord from the BasicLock on the stack. 1048 temp = mark.displaced_mark_helper(); 1049 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1050 hash = temp.hash(); 1051 if (hash != 0) { // if it has a hash, just return it 1052 return hash; 1053 } 1054 // WARNING: 1055 // The displaced header in the BasicLock on a thread's stack 1056 // is strictly immutable. It CANNOT be changed in ANY cases. 1057 // So we have to inflate the stack lock into an ObjectMonitor 1058 // even if the current thread owns the lock. The BasicLock on 1059 // a thread's stack can be asynchronously read by other threads 1060 // during an inflate() call so any change to that stack memory 1061 // may not propagate to other threads correctly. 1062 } 1063 1064 // Inflate the monitor to set the hash. 1065 ObjectMonitorHandle omh; 1066 inflate(&omh, self, obj, inflate_cause_hash_code); 1067 monitor = omh.om_ptr(); 1068 // Load ObjectMonitor's header/dmw field and see if it has a hash. 1069 mark = monitor->header(); 1070 // Allow for a lagging install_displaced_markword_in_object() to 1071 // have marked the ObjectMonitor's header/dmw field. 1072 assert(mark.is_neutral() || (AsyncDeflateIdleMonitors && mark.is_marked()), 1073 "invariant: header=" INTPTR_FORMAT, mark.value()); 1074 hash = mark.hash(); 1075 if (hash == 0) { // if it does not have a hash 1076 hash = get_next_hash(self, obj); // get a new hash 1077 temp = mark.copy_set_hash(hash); // merge the hash into header 1078 if (AsyncDeflateIdleMonitors && temp.is_marked()) { 1079 // A lagging install_displaced_markword_in_object() has marked 1080 // the ObjectMonitor's header/dmw field. We clear it to avoid 1081 // any confusion if we are able to set the hash. 1082 temp.set_unmarked(); 1083 } 1084 assert(temp.is_neutral(), "invariant: header=" INTPTR_FORMAT, temp.value()); 1085 uintptr_t v = Atomic::cmpxchg((volatile uintptr_t*)monitor->header_addr(), mark.value(), temp.value()); 1086 test = markWord(v); 1087 if (test != mark) { 1088 // The attempt to update the ObjectMonitor's header/dmw field 1089 // did not work. This can happen if another thread managed to 1090 // merge in the hash just before our cmpxchg(). With async 1091 // deflation, a lagging install_displaced_markword_in_object() 1092 // could have just marked or just unmarked the header/dmw field. 1093 // If we add any new usages of the header/dmw field, this code 1094 // will need to be updated. 1095 if (AsyncDeflateIdleMonitors) { 1096 // Since async deflation gives us two possible reasons for 1097 // the cmwxchg() to fail, it is easier to simply retry. 1098 continue; 1099 } 1100 hash = test.hash(); 1101 assert(test.is_neutral(), "invariant: header=" INTPTR_FORMAT, test.value()); 1102 assert(hash != 0, "should only have lost the race to a thread that set a non-zero hash"); 1103 } 1104 } 1105 // We finally get the hash. 1106 return hash; 1107 } 1108 } 1109 1110 // Deprecated -- use FastHashCode() instead. 1111 1112 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) { 1113 return FastHashCode(Thread::current(), obj()); 1114 } 1115 1116 1117 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread, 1118 Handle h_obj) { 1119 if (UseBiasedLocking) { 1120 BiasedLocking::revoke(h_obj, thread); 1121 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1122 } 1123 1124 assert(thread == JavaThread::current(), "Can only be called on current thread"); 1125 oop obj = h_obj(); 1126 1127 while (true) { 1128 markWord mark = read_stable_mark(obj); 1129 1130 // Uncontended case, header points to stack 1131 if (mark.has_locker()) { 1132 return thread->is_lock_owned((address)mark.locker()); 1133 } 1134 // Contended case, header points to ObjectMonitor (tagged pointer) 1135 if (mark.has_monitor()) { 1136 ObjectMonitorHandle omh; 1137 if (!omh.save_om_ptr(obj, mark)) { 1138 // Lost a race with async deflation so try again. 1139 assert(AsyncDeflateIdleMonitors, "sanity check"); 1140 continue; 1141 } 1142 bool ret_code = omh.om_ptr()->is_entered(thread) != 0; 1143 return ret_code; 1144 } 1145 // Unlocked case, header in place 1146 assert(mark.is_neutral(), "sanity check"); 1147 return false; 1148 } 1149 } 1150 1151 // Be aware of this method could revoke bias of the lock object. 1152 // This method queries the ownership of the lock handle specified by 'h_obj'. 1153 // If the current thread owns the lock, it returns owner_self. If no 1154 // thread owns the lock, it returns owner_none. Otherwise, it will return 1155 // owner_other. 1156 ObjectSynchronizer::LockOwnership ObjectSynchronizer::query_lock_ownership 1157 (JavaThread *self, Handle h_obj) { 1158 // The caller must beware this method can revoke bias, and 1159 // revocation can result in a safepoint. 1160 assert(!SafepointSynchronize::is_at_safepoint(), "invariant"); 1161 assert(self->thread_state() != _thread_blocked, "invariant"); 1162 1163 // Possible mark states: neutral, biased, stack-locked, inflated 1164 1165 if (UseBiasedLocking && h_obj()->mark().has_bias_pattern()) { 1166 // CASE: biased 1167 BiasedLocking::revoke(h_obj, self); 1168 assert(!h_obj->mark().has_bias_pattern(), 1169 "biases should be revoked by now"); 1170 } 1171 1172 assert(self == JavaThread::current(), "Can only be called on current thread"); 1173 oop obj = h_obj(); 1174 1175 while (true) { 1176 markWord mark = read_stable_mark(obj); 1177 1178 // CASE: stack-locked. Mark points to a BasicLock on the owner's stack. 1179 if (mark.has_locker()) { 1180 return self->is_lock_owned((address)mark.locker()) ? 1181 owner_self : owner_other; 1182 } 1183 1184 // CASE: inflated. Mark (tagged pointer) points to an ObjectMonitor. 1185 // The Object:ObjectMonitor relationship is stable as long as we're 1186 // not at a safepoint and AsyncDeflateIdleMonitors is false. 1187 if (mark.has_monitor()) { 1188 ObjectMonitorHandle omh; 1189 if (!omh.save_om_ptr(obj, mark)) { 1190 // Lost a race with async deflation so try again. 1191 assert(AsyncDeflateIdleMonitors, "sanity check"); 1192 continue; 1193 } 1194 ObjectMonitor* monitor = omh.om_ptr(); 1195 void* owner = monitor->_owner; 1196 if (owner == NULL) return owner_none; 1197 return (owner == self || 1198 self->is_lock_owned((address)owner)) ? owner_self : owner_other; 1199 } 1200 1201 // CASE: neutral 1202 assert(mark.is_neutral(), "sanity check"); 1203 return owner_none; // it's unlocked 1204 } 1205 } 1206 1207 // FIXME: jvmti should call this 1208 JavaThread* ObjectSynchronizer::get_lock_owner(ThreadsList * t_list, Handle h_obj) { 1209 if (UseBiasedLocking) { 1210 if (SafepointSynchronize::is_at_safepoint()) { 1211 BiasedLocking::revoke_at_safepoint(h_obj); 1212 } else { 1213 BiasedLocking::revoke(h_obj, JavaThread::current()); 1214 } 1215 assert(!h_obj->mark().has_bias_pattern(), "biases should be revoked by now"); 1216 } 1217 1218 oop obj = h_obj(); 1219 1220 while (true) { 1221 address owner = NULL; 1222 markWord mark = read_stable_mark(obj); 1223 1224 // Uncontended case, header points to stack 1225 if (mark.has_locker()) { 1226 owner = (address) mark.locker(); 1227 } 1228 1229 // Contended case, header points to ObjectMonitor (tagged pointer) 1230 else if (mark.has_monitor()) { 1231 ObjectMonitorHandle omh; 1232 if (!omh.save_om_ptr(obj, mark)) { 1233 // Lost a race with async deflation so try again. 1234 assert(AsyncDeflateIdleMonitors, "sanity check"); 1235 continue; 1236 } 1237 ObjectMonitor* monitor = omh.om_ptr(); 1238 assert(monitor != NULL, "monitor should be non-null"); 1239 owner = (address) monitor->owner(); 1240 } 1241 1242 if (owner != NULL) { 1243 // owning_thread_from_monitor_owner() may also return NULL here 1244 return Threads::owning_thread_from_monitor_owner(t_list, owner); 1245 } 1246 1247 // Unlocked case, header in place 1248 // Cannot have assertion since this object may have been 1249 // locked by another thread when reaching here. 1250 // assert(mark.is_neutral(), "sanity check"); 1251 1252 return NULL; 1253 } 1254 } 1255 1256 // Visitors ... 1257 1258 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) { 1259 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 1260 while (block != NULL) { 1261 assert(block->object() == CHAINMARKER, "must be a block header"); 1262 for (int i = _BLOCKSIZE - 1; i > 0; i--) { 1263 ObjectMonitor* mid = (ObjectMonitor *)(block + i); 1264 ObjectMonitorHandle omh; 1265 if (!mid->is_free() && omh.set_om_ptr_if_safe(mid)) { 1266 // The ObjectMonitor* is not free and it has been made safe. 1267 if (mid->object() == NULL) { 1268 // Only process with closure if the object is set. 1269 continue; 1270 } 1271 closure->do_monitor(mid); 1272 } 1273 } 1274 // unmarked_next() is not needed with g_block_list (no locking 1275 // used with with block linkage _next_om fields). 1276 block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om); 1277 } 1278 } 1279 1280 static bool monitors_used_above_threshold() { 1281 if (Atomic::load(&LVars.population) == 0) { 1282 return false; 1283 } 1284 if (MonitorUsedDeflationThreshold > 0) { 1285 int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count); 1286 if (HandshakeAfterDeflateIdleMonitors) { 1287 monitors_used -= Atomic::load(&LVars.wait_count); 1288 } 1289 int monitor_usage = (monitors_used * 100LL) / Atomic::load(&LVars.population); 1290 return monitor_usage > MonitorUsedDeflationThreshold; 1291 } 1292 return false; 1293 } 1294 1295 // Returns true if MonitorBound is set (> 0) and if the specified 1296 // cnt is > MonitorBound. Otherwise returns false. 1297 static bool is_MonitorBound_exceeded(const int cnt) { 1298 const int mx = MonitorBound; 1299 return mx > 0 && cnt > mx; 1300 } 1301 1302 bool ObjectSynchronizer::is_async_deflation_needed() { 1303 if (!AsyncDeflateIdleMonitors) { 1304 return false; 1305 } 1306 if (is_async_deflation_requested()) { 1307 // Async deflation request. 1308 return true; 1309 } 1310 if (AsyncDeflationInterval > 0 && 1311 time_since_last_async_deflation_ms() > AsyncDeflationInterval && 1312 monitors_used_above_threshold()) { 1313 // It's been longer than our specified deflate interval and there 1314 // are too many monitors in use. We don't deflate more frequently 1315 // than AsyncDeflationInterval (unless is_async_deflation_requested) 1316 // in order to not swamp the ServiceThread. 1317 _last_async_deflation_time_ns = os::javaTimeNanos(); 1318 return true; 1319 } 1320 int monitors_used = Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count); 1321 if (HandshakeAfterDeflateIdleMonitors) { 1322 monitors_used -= Atomic::load(&LVars.wait_count); 1323 } 1324 if (is_MonitorBound_exceeded(monitors_used)) { 1325 // Not enough ObjectMonitors on the global free list. 1326 return true; 1327 } 1328 return false; 1329 } 1330 1331 bool ObjectSynchronizer::needs_monitor_scavenge() { 1332 if (Atomic::load(&_forceMonitorScavenge) == 1) { 1333 log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup."); 1334 return true; 1335 } 1336 return false; 1337 } 1338 1339 bool ObjectSynchronizer::is_safepoint_deflation_needed() { 1340 if (!AsyncDeflateIdleMonitors) { 1341 if (monitors_used_above_threshold()) { 1342 // Too many monitors in use. 1343 return true; 1344 } 1345 return needs_monitor_scavenge(); 1346 } 1347 if (is_special_deflation_requested()) { 1348 // For AsyncDeflateIdleMonitors only do a safepoint deflation 1349 // if there is a special deflation request. 1350 return true; 1351 } 1352 return false; 1353 } 1354 1355 jlong ObjectSynchronizer::time_since_last_async_deflation_ms() { 1356 return (os::javaTimeNanos() - _last_async_deflation_time_ns) / (NANOUNITS / MILLIUNITS); 1357 } 1358 1359 void ObjectSynchronizer::oops_do(OopClosure* f) { 1360 // We only scan the global used list here (for moribund threads), and 1361 // the thread-local monitors in Thread::oops_do(). 1362 global_used_oops_do(f); 1363 } 1364 1365 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) { 1366 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1367 list_oops_do(Atomic::load(&LVars.in_use_list), Atomic::load(&LVars.in_use_count), f); 1368 } 1369 1370 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) { 1371 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1372 list_oops_do(thread->om_in_use_list, thread->om_in_use_count, f); 1373 } 1374 1375 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, int count, OopClosure* f) { 1376 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 1377 // The oops_do() phase does not overlap with monitor deflation 1378 // so no need to update the ObjectMonitor's ref_count for this 1379 // ObjectMonitor* use and no need to mark ObjectMonitors for the 1380 // list traversal. 1381 for (ObjectMonitor* mid = list; mid != NULL; mid = unmarked_next(mid)) { 1382 if (mid->object() != NULL) { 1383 f->do_oop((oop*)mid->object_addr()); 1384 } 1385 } 1386 } 1387 1388 1389 // ----------------------------------------------------------------------------- 1390 // ObjectMonitor Lifecycle 1391 // ----------------------- 1392 // Inflation unlinks monitors from LVars.free_list or a per-thread free 1393 // list and associates them with objects. Deflation -- which occurs at 1394 // STW-time or asynchronously -- disassociates idle monitors from objects. 1395 // Such scavenged monitors are returned to the LVars.free_list. 1396 // 1397 // ObjectMonitors reside in type-stable memory (TSM) and are immortal. 1398 // 1399 // Lifecycle: 1400 // -- unassigned and on the LVars.free_list 1401 // -- unassigned and on a per-thread free list 1402 // -- assigned to an object. The object is inflated and the mark refers 1403 // to the ObjectMonitor. 1404 1405 1406 // Constraining monitor pool growth via MonitorBound ... 1407 // 1408 // If MonitorBound is not set (<= 0), MonitorBound checks are disabled. 1409 // 1410 // When safepoint deflation is being used (!AsyncDeflateIdleMonitors): 1411 // The monitor pool is grow-only. We scavenge at STW safepoint-time, but the 1412 // the rate of scavenging is driven primarily by GC. As such, we can find 1413 // an inordinate number of monitors in circulation. 1414 // To avoid that scenario we can artificially induce a STW safepoint 1415 // if the pool appears to be growing past some reasonable bound. 1416 // Generally we favor time in space-time tradeoffs, but as there's no 1417 // natural back-pressure on the # of extant monitors we need to impose some 1418 // type of limit. Beware that if MonitorBound is set to too low a value 1419 // we could just loop. In addition, if MonitorBound is set to a low value 1420 // we'll incur more safepoints, which are harmful to performance. 1421 // See also: GuaranteedSafepointInterval 1422 // 1423 // When safepoint deflation is being used and MonitorBound is set, the 1424 // boundry applies to 1425 // (LVars.population - LVars.free_count) 1426 // i.e., if there are not enough ObjectMonitors on the global free list, 1427 // then a safepoint deflation is induced. Picking a good MonitorBound value 1428 // is non-trivial. 1429 // 1430 // When async deflation is being used: 1431 // The monitor pool is still grow-only. Async deflation is requested 1432 // by a safepoint's cleanup phase or by the ServiceThread at periodic 1433 // intervals when is_async_deflation_needed() returns true. In 1434 // addition to other policies that are checked, if there are not 1435 // enough ObjectMonitors on the global free list, then 1436 // is_async_deflation_needed() will return true. The ServiceThread 1437 // calls deflate_global_idle_monitors_using_JT() and also calls 1438 // deflate_per_thread_idle_monitors_using_JT() as needed. 1439 1440 static void InduceScavenge(Thread* self, const char * Whence) { 1441 assert(!AsyncDeflateIdleMonitors, "is not used by async deflation"); 1442 1443 // Induce STW safepoint to trim monitors 1444 // Ultimately, this results in a call to deflate_idle_monitors() in the near future. 1445 // More precisely, trigger a cleanup safepoint as the number 1446 // of active monitors passes the specified threshold. 1447 // TODO: assert thread state is reasonable 1448 1449 if (Atomic::xchg(&_forceMonitorScavenge, 1) == 0) { 1450 VMThread::check_for_forced_cleanup(); 1451 } 1452 } 1453 1454 ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self, 1455 const InflateCause cause) { 1456 // A large MAXPRIVATE value reduces both list lock contention 1457 // and list coherency traffic, but also tends to increase the 1458 // number of ObjectMonitors in circulation as well as the STW 1459 // scavenge costs. As usual, we lean toward time in space-time 1460 // tradeoffs. 1461 const int MAXPRIVATE = 1024; 1462 1463 stringStream ss; 1464 for (;;) { 1465 ObjectMonitor* m; 1466 1467 // 1: try to allocate from the thread's local om_free_list. 1468 // Threads will attempt to allocate first from their local list, then 1469 // from the global list, and only after those attempts fail will the 1470 // thread attempt to instantiate new monitors. Thread-local free lists 1471 // improve allocation latency, as well as reducing coherency traffic 1472 // on the shared global list. 1473 m = take_from_start_of_om_free_list(self); 1474 if (m != NULL) { 1475 guarantee(m->object() == NULL, "invariant"); 1476 m->set_allocation_state(ObjectMonitor::New); 1477 prepend_to_om_in_use_list(self, m); 1478 return m; 1479 } 1480 1481 // 2: try to allocate from the global LVars.free_list 1482 // CONSIDER: use muxTry() instead of muxAcquire(). 1483 // If the muxTry() fails then drop immediately into case 3. 1484 // If we're using thread-local free lists then try 1485 // to reprovision the caller's free list. 1486 if (Atomic::load(&LVars.free_list) != NULL) { 1487 // Reprovision the thread's om_free_list. 1488 // Use bulk transfers to reduce the allocation rate and heat 1489 // on various locks. 1490 for (int i = self->om_free_provision; --i >= 0;) { 1491 ObjectMonitor* take = take_from_start_of_global_free_list(); 1492 if (take == NULL) { 1493 break; // No more are available. 1494 } 1495 guarantee(take->object() == NULL, "invariant"); 1496 if (AsyncDeflateIdleMonitors) { 1497 // We allowed 3 field values to linger during async deflation. 1498 // We clear header and restore ref_count here, but we leave 1499 // owner == DEFLATER_MARKER so the simple C2 ObjectMonitor 1500 // enter optimization can no longer race with async deflation 1501 // and reuse. 1502 take->set_header(markWord::zero()); 1503 if (take->ref_count() < 0) { 1504 // Add back max_jint to restore the ref_count field to its 1505 // proper value. 1506 Atomic::add(&take->_ref_count, max_jint); 1507 1508 #ifdef ASSERT 1509 jint l_ref_count = take->ref_count(); 1510 #endif 1511 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", 1512 l_ref_count, take->ref_count()); 1513 } 1514 } 1515 take->Recycle(); 1516 // Since we're taking from the global free-list, take must be Free. 1517 // om_release() also sets the allocation state to Free because it 1518 // is called from other code paths. 1519 assert(take->is_free(), "invariant"); 1520 om_release(self, take, false); 1521 } 1522 self->om_free_provision += 1 + (self->om_free_provision / 2); 1523 if (self->om_free_provision > MAXPRIVATE) self->om_free_provision = MAXPRIVATE; 1524 1525 if (!AsyncDeflateIdleMonitors && 1526 is_MonitorBound_exceeded(Atomic::load(&LVars.population) - Atomic::load(&LVars.free_count))) { 1527 // Not enough ObjectMonitors on the global free list. 1528 // We can't safely induce a STW safepoint from om_alloc() as our thread 1529 // state may not be appropriate for such activities and callers may hold 1530 // naked oops, so instead we defer the action. 1531 InduceScavenge(self, "om_alloc"); 1532 } 1533 continue; 1534 } 1535 1536 // 3: allocate a block of new ObjectMonitors 1537 // Both the local and global free lists are empty -- resort to malloc(). 1538 // In the current implementation ObjectMonitors are TSM - immortal. 1539 // Ideally, we'd write "new ObjectMonitor[_BLOCKSIZE], but we want 1540 // each ObjectMonitor to start at the beginning of a cache line, 1541 // so we use align_up(). 1542 // A better solution would be to use C++ placement-new. 1543 // BEWARE: As it stands currently, we don't run the ctors! 1544 assert(_BLOCKSIZE > 1, "invariant"); 1545 size_t neededsize = sizeof(PaddedObjectMonitor) * _BLOCKSIZE; 1546 PaddedObjectMonitor* temp; 1547 size_t aligned_size = neededsize + (OM_CACHE_LINE_SIZE - 1); 1548 void* real_malloc_addr = NEW_C_HEAP_ARRAY(char, aligned_size, mtInternal); 1549 temp = (PaddedObjectMonitor*)align_up(real_malloc_addr, OM_CACHE_LINE_SIZE); 1550 (void)memset((void *) temp, 0, neededsize); 1551 1552 // Format the block. 1553 // initialize the linked list, each monitor points to its next 1554 // forming the single linked free list, the very first monitor 1555 // will points to next block, which forms the block list. 1556 // The trick of using the 1st element in the block as g_block_list 1557 // linkage should be reconsidered. A better implementation would 1558 // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; } 1559 1560 for (int i = 1; i < _BLOCKSIZE; i++) { 1561 temp[i]._next_om = (ObjectMonitor*)&temp[i + 1]; 1562 assert(temp[i].is_free(), "invariant"); 1563 } 1564 1565 // terminate the last monitor as the end of list 1566 temp[_BLOCKSIZE - 1]._next_om = (ObjectMonitor*)NULL; 1567 1568 // Element [0] is reserved for global list linkage 1569 temp[0].set_object(CHAINMARKER); 1570 1571 // Consider carving out this thread's current request from the 1572 // block in hand. This avoids some lock traffic and redundant 1573 // list activity. 1574 1575 prepend_block_to_lists(temp); 1576 } 1577 } 1578 1579 // Place "m" on the caller's private per-thread om_free_list. 1580 // In practice there's no need to clamp or limit the number of 1581 // monitors on a thread's om_free_list as the only non-allocation time 1582 // we'll call om_release() is to return a monitor to the free list after 1583 // a CAS attempt failed. This doesn't allow unbounded #s of monitors to 1584 // accumulate on a thread's free list. 1585 // 1586 // Key constraint: all ObjectMonitors on a thread's free list and the global 1587 // free list must have their object field set to null. This prevents the 1588 // scavenger -- deflate_monitor_list() or deflate_monitor_list_using_JT() 1589 // -- from reclaiming them while we are trying to release them. 1590 1591 void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m, 1592 bool from_per_thread_alloc) { 1593 guarantee(m->header().value() == 0, "invariant"); 1594 guarantee(m->object() == NULL, "invariant"); 1595 stringStream ss; 1596 guarantee((m->is_busy() | m->_recursions) == 0, "freeing in-use monitor: " 1597 "%s, recursions=" INTX_FORMAT, m->is_busy_to_string(&ss), 1598 m->_recursions); 1599 m->set_allocation_state(ObjectMonitor::Free); 1600 // _next_om is used for both per-thread in-use and free lists so 1601 // we have to remove 'm' from the in-use list first (as needed). 1602 if (from_per_thread_alloc) { 1603 // Need to remove 'm' from om_in_use_list. 1604 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go 1605 // protocol because async deflation can do list deletions in parallel. 1606 ObjectMonitor* cur_mid_in_use = NULL; 1607 ObjectMonitor* mid = NULL; 1608 ObjectMonitor* next = NULL; 1609 bool extracted = false; 1610 1611 if ((mid = get_list_head_locked(&self->om_in_use_list)) == NULL) { 1612 fatal("thread=" INTPTR_FORMAT " in-use list must not be empty.", p2i(self)); 1613 } 1614 next = unmarked_next(mid); 1615 while (true) { 1616 if (m == mid) { 1617 // We found 'm' on the per-thread in-use list so try to extract it. 1618 if (cur_mid_in_use == NULL) { 1619 // mid is the list head and it is locked. Switch the list head 1620 // to next which unlocks the list head, but leaves mid locked: 1621 Atomic::store(&self->om_in_use_list, next); 1622 } else { 1623 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's 1624 // next field to next which unlocks cur_mid_in_use, but leaves 1625 // mid locked: 1626 set_next(cur_mid_in_use, next); 1627 } 1628 extracted = true; 1629 Atomic::dec(&self->om_in_use_count); 1630 // Unlock mid, but leave the next value for any lagging list 1631 // walkers. It will get cleaned up when mid is prepended to 1632 // the thread's free list: 1633 om_unlock(mid); 1634 break; 1635 } 1636 if (cur_mid_in_use != NULL) { 1637 om_unlock(cur_mid_in_use); 1638 } 1639 // The next cur_mid_in_use keeps mid's locked state so 1640 // that it is stable for a possible next field change. It 1641 // cannot be deflated while it is locked. 1642 cur_mid_in_use = mid; 1643 mid = next; 1644 if (mid == NULL) { 1645 // Reached end of the list and didn't find m so: 1646 fatal("must find m=" INTPTR_FORMAT "on om_in_use_list=" INTPTR_FORMAT, 1647 p2i(m), p2i(self->om_in_use_list)); 1648 } 1649 // Lock mid so we can possibly extract it: 1650 om_lock(mid); 1651 next = unmarked_next(mid); 1652 } 1653 } 1654 1655 prepend_to_om_free_list(self, m); 1656 guarantee(m->is_free(), "invariant"); 1657 } 1658 1659 // Return ObjectMonitors on a moribund thread's free and in-use 1660 // lists to the appropriate global lists. The ObjectMonitors on the 1661 // per-thread in-use list may still be in use by other threads. 1662 // 1663 // We currently call om_flush() from Threads::remove() before the 1664 // thread has been excised from the thread list and is no longer a 1665 // mutator. This means that om_flush() cannot run concurrently with 1666 // a safepoint and interleave with deflate_idle_monitors(). In 1667 // particular, this ensures that the thread's in-use monitors are 1668 // scanned by a GC safepoint, either via Thread::oops_do() (before 1669 // om_flush() is called) or via ObjectSynchronizer::oops_do() (after 1670 // om_flush() is called). 1671 // 1672 // With AsyncDeflateIdleMonitors, deflate_global_idle_monitors_using_JT() 1673 // and deflate_per_thread_idle_monitors_using_JT() (in another thread) can 1674 // run at the same time as om_flush() so we have to follow a careful 1675 // protocol to prevent list corruption. 1676 1677 void ObjectSynchronizer::om_flush(Thread* self) { 1678 // This function can race with an async deflater thread. Since 1679 // deflation has to process the per-thread in-use list before 1680 // prepending the deflated ObjectMonitors to the global free list, 1681 // we process the per-thread lists in the same order to prevent 1682 // ordering races. 1683 int in_use_count = 0; 1684 ObjectMonitor* in_use_list = NULL; 1685 ObjectMonitor* in_use_tail = NULL; 1686 1687 // An async deflation thread checks to see if the target thread 1688 // is exiting, but if it has made it past that check before we 1689 // started exiting, then it is racing to get to the in-use list. 1690 if ((in_use_list = get_list_head_locked(&self->om_in_use_list)) != NULL) { 1691 // At this point, we have marked the in-use list head so an 1692 // async deflation thread cannot come in after us. If an async 1693 // deflation thread is ahead of us, then we'll detect that and 1694 // wait for it to finish its work. 1695 // 1696 // The thread is going away, however the ObjectMonitors on the 1697 // om_in_use_list may still be in-use by other threads. Link 1698 // them to in_use_tail, which will be linked into the global 1699 // in-use list (LVars.in_use_list) below. 1700 // 1701 // Account for the in-use list head before the loop since it is 1702 // already marked (by this thread): 1703 in_use_tail = in_use_list; 1704 in_use_count++; 1705 for (ObjectMonitor* cur_om = unmarked_next(in_use_list); cur_om != NULL;) { 1706 if (is_locked(cur_om)) { 1707 // cur_om is locked so there must be an async deflater 1708 // thread ahead of us so we'll give it a chance to finish. 1709 while (is_locked(cur_om)) { 1710 os::naked_short_sleep(1); 1711 } 1712 // Refetch the possibly changed next field and try again. 1713 cur_om = unmarked_next(in_use_tail); 1714 continue; 1715 } 1716 if (cur_om->is_free()) { 1717 // cur_om was deflated and the allocation state was changed 1718 // to Free while it was marked. We happened to see it just 1719 // after it was unmarked (and added to the free list). 1720 // Refetch the possibly changed next field and try again. 1721 cur_om = unmarked_next(in_use_tail); 1722 continue; 1723 } 1724 in_use_tail = cur_om; 1725 in_use_count++; 1726 cur_om = unmarked_next(cur_om); 1727 } 1728 guarantee(in_use_tail != NULL, "invariant"); 1729 int l_om_in_use_count = self->om_in_use_count; 1730 ADIM_guarantee(l_om_in_use_count == in_use_count, "in-use counts don't " 1731 "match: l_om_in_use_count=%d, in_use_count=%d", 1732 l_om_in_use_count, in_use_count); 1733 self->om_in_use_count = 0; 1734 // Clear the in-use list head (which also unlocks it): 1735 Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL); 1736 om_unlock(in_use_list); 1737 } 1738 1739 int free_count = 0; 1740 ObjectMonitor* free_list = self->om_free_list; 1741 ObjectMonitor* free_tail = NULL; 1742 if (free_list != NULL) { 1743 // The thread is going away. Set 'free_tail' to the last per-thread free 1744 // monitor which will be linked to LVars.free_list below. 1745 stringStream ss; 1746 for (ObjectMonitor* s = free_list; s != NULL; s = unmarked_next(s)) { 1747 free_count++; 1748 free_tail = s; 1749 guarantee(s->object() == NULL, "invariant"); 1750 guarantee(!s->is_busy(), "must be !is_busy: %s", s->is_busy_to_string(&ss)); 1751 } 1752 guarantee(free_tail != NULL, "invariant"); 1753 int l_om_free_count = self->om_free_count; 1754 ADIM_guarantee(l_om_free_count == free_count, "free counts don't match: " 1755 "l_om_free_count=%d, free_count=%d", l_om_free_count, 1756 free_count); 1757 self->om_free_count = 0; 1758 Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL); 1759 } 1760 1761 if (free_tail != NULL) { 1762 prepend_list_to_global_free_list(free_list, free_tail, free_count); 1763 } 1764 1765 if (in_use_tail != NULL) { 1766 prepend_list_to_global_in_use_list(in_use_list, in_use_tail, in_use_count); 1767 } 1768 1769 LogStreamHandle(Debug, monitorinflation) lsh_debug; 1770 LogStreamHandle(Info, monitorinflation) lsh_info; 1771 LogStream* ls = NULL; 1772 if (log_is_enabled(Debug, monitorinflation)) { 1773 ls = &lsh_debug; 1774 } else if ((free_count != 0 || in_use_count != 0) && 1775 log_is_enabled(Info, monitorinflation)) { 1776 ls = &lsh_info; 1777 } 1778 if (ls != NULL) { 1779 ls->print_cr("om_flush: jt=" INTPTR_FORMAT ", free_count=%d" 1780 ", in_use_count=%d" ", om_free_provision=%d", 1781 p2i(self), free_count, in_use_count, self->om_free_provision); 1782 } 1783 } 1784 1785 static void post_monitor_inflate_event(EventJavaMonitorInflate* event, 1786 const oop obj, 1787 ObjectSynchronizer::InflateCause cause) { 1788 assert(event != NULL, "invariant"); 1789 assert(event->should_commit(), "invariant"); 1790 event->set_monitorClass(obj->klass()); 1791 event->set_address((uintptr_t)(void*)obj); 1792 event->set_cause((u1)cause); 1793 event->commit(); 1794 } 1795 1796 // Fast path code shared by multiple functions 1797 void ObjectSynchronizer::inflate_helper(ObjectMonitorHandle* omh_p, oop obj) { 1798 while (true) { 1799 markWord mark = obj->mark(); 1800 if (mark.has_monitor()) { 1801 if (!omh_p->save_om_ptr(obj, mark)) { 1802 // Lost a race with async deflation so try again. 1803 assert(AsyncDeflateIdleMonitors, "sanity check"); 1804 continue; 1805 } 1806 ObjectMonitor* monitor = omh_p->om_ptr(); 1807 assert(ObjectSynchronizer::verify_objmon_isinpool(monitor), "monitor is invalid"); 1808 markWord dmw = monitor->header(); 1809 assert(dmw.is_neutral(), "sanity check: header=" INTPTR_FORMAT, dmw.value()); 1810 return; 1811 } 1812 inflate(omh_p, Thread::current(), obj, inflate_cause_vm_internal); 1813 return; 1814 } 1815 } 1816 1817 void ObjectSynchronizer::inflate(ObjectMonitorHandle* omh_p, Thread* self, 1818 oop object, const InflateCause cause) { 1819 // Inflate mutates the heap ... 1820 // Relaxing assertion for bug 6320749. 1821 assert(Universe::verify_in_progress() || 1822 !SafepointSynchronize::is_at_safepoint(), "invariant"); 1823 1824 EventJavaMonitorInflate event; 1825 1826 for (;;) { 1827 const markWord mark = object->mark(); 1828 assert(!mark.has_bias_pattern(), "invariant"); 1829 1830 // The mark can be in one of the following states: 1831 // * Inflated - just return 1832 // * Stack-locked - coerce it to inflated 1833 // * INFLATING - busy wait for conversion to complete 1834 // * Neutral - aggressively inflate the object. 1835 // * BIASED - Illegal. We should never see this 1836 1837 // CASE: inflated 1838 if (mark.has_monitor()) { 1839 if (!omh_p->save_om_ptr(object, mark)) { 1840 // Lost a race with async deflation so try again. 1841 assert(AsyncDeflateIdleMonitors, "sanity check"); 1842 continue; 1843 } 1844 ObjectMonitor* inf = omh_p->om_ptr(); 1845 markWord dmw = inf->header(); 1846 assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1847 assert(inf->object() == object, "invariant"); 1848 assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid"); 1849 return; 1850 } 1851 1852 // CASE: inflation in progress - inflating over a stack-lock. 1853 // Some other thread is converting from stack-locked to inflated. 1854 // Only that thread can complete inflation -- other threads must wait. 1855 // The INFLATING value is transient. 1856 // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish. 1857 // We could always eliminate polling by parking the thread on some auxiliary list. 1858 if (mark == markWord::INFLATING()) { 1859 read_stable_mark(object); 1860 continue; 1861 } 1862 1863 // CASE: stack-locked 1864 // Could be stack-locked either by this thread or by some other thread. 1865 // 1866 // Note that we allocate the objectmonitor speculatively, _before_ attempting 1867 // to install INFLATING into the mark word. We originally installed INFLATING, 1868 // allocated the objectmonitor, and then finally STed the address of the 1869 // objectmonitor into the mark. This was correct, but artificially lengthened 1870 // the interval in which INFLATED appeared in the mark, thus increasing 1871 // the odds of inflation contention. 1872 // 1873 // We now use per-thread private objectmonitor free lists. 1874 // These list are reprovisioned from the global free list outside the 1875 // critical INFLATING...ST interval. A thread can transfer 1876 // multiple objectmonitors en-mass from the global free list to its local free list. 1877 // This reduces coherency traffic and lock contention on the global free list. 1878 // Using such local free lists, it doesn't matter if the om_alloc() call appears 1879 // before or after the CAS(INFLATING) operation. 1880 // See the comments in om_alloc(). 1881 1882 LogStreamHandle(Trace, monitorinflation) lsh; 1883 1884 if (mark.has_locker()) { 1885 ObjectMonitor* m = om_alloc(self, cause); 1886 // Optimistically prepare the objectmonitor - anticipate successful CAS 1887 // We do this before the CAS in order to minimize the length of time 1888 // in which INFLATING appears in the mark. 1889 m->Recycle(); 1890 m->_Responsible = NULL; 1891 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // Consider: maintain by type/class 1892 1893 markWord cmp = object->cas_set_mark(markWord::INFLATING(), mark); 1894 if (cmp != mark) { 1895 // om_release() will reset the allocation state from New to Free. 1896 om_release(self, m, true); 1897 continue; // Interference -- just retry 1898 } 1899 1900 // We've successfully installed INFLATING (0) into the mark-word. 1901 // This is the only case where 0 will appear in a mark-word. 1902 // Only the singular thread that successfully swings the mark-word 1903 // to 0 can perform (or more precisely, complete) inflation. 1904 // 1905 // Why do we CAS a 0 into the mark-word instead of just CASing the 1906 // mark-word from the stack-locked value directly to the new inflated state? 1907 // Consider what happens when a thread unlocks a stack-locked object. 1908 // It attempts to use CAS to swing the displaced header value from the 1909 // on-stack BasicLock back into the object header. Recall also that the 1910 // header value (hash code, etc) can reside in (a) the object header, or 1911 // (b) a displaced header associated with the stack-lock, or (c) a displaced 1912 // header in an ObjectMonitor. The inflate() routine must copy the header 1913 // value from the BasicLock on the owner's stack to the ObjectMonitor, all 1914 // the while preserving the hashCode stability invariants. If the owner 1915 // decides to release the lock while the value is 0, the unlock will fail 1916 // and control will eventually pass from slow_exit() to inflate. The owner 1917 // will then spin, waiting for the 0 value to disappear. Put another way, 1918 // the 0 causes the owner to stall if the owner happens to try to 1919 // drop the lock (restoring the header from the BasicLock to the object) 1920 // while inflation is in-progress. This protocol avoids races that might 1921 // would otherwise permit hashCode values to change or "flicker" for an object. 1922 // Critically, while object->mark is 0 mark.displaced_mark_helper() is stable. 1923 // 0 serves as a "BUSY" inflate-in-progress indicator. 1924 1925 1926 // fetch the displaced mark from the owner's stack. 1927 // The owner can't die or unwind past the lock while our INFLATING 1928 // object is in the mark. Furthermore the owner can't complete 1929 // an unlock on the object, either. 1930 markWord dmw = mark.displaced_mark_helper(); 1931 // Catch if the object's header is not neutral (not locked and 1932 // not marked is what we care about here). 1933 ADIM_guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 1934 1935 // Setup monitor fields to proper values -- prepare the monitor 1936 m->set_header(dmw); 1937 1938 // Optimization: if the mark.locker stack address is associated 1939 // with this thread we could simply set m->_owner = self. 1940 // Note that a thread can inflate an object 1941 // that it has stack-locked -- as might happen in wait() -- directly 1942 // with CAS. That is, we can avoid the xchg-NULL .... ST idiom. 1943 if (AsyncDeflateIdleMonitors) { 1944 m->simply_set_owner_from(mark.locker(), NULL, DEFLATER_MARKER); 1945 } else { 1946 m->simply_set_owner_from(mark.locker(), NULL); 1947 } 1948 m->set_object(object); 1949 // TODO-FIXME: assert BasicLock->dhw != 0. 1950 1951 omh_p->set_om_ptr(m); 1952 1953 // Must preserve store ordering. The monitor state must 1954 // be stable at the time of publishing the monitor address. 1955 guarantee(object->mark() == markWord::INFLATING(), "invariant"); 1956 object->release_set_mark(markWord::encode(m)); 1957 1958 // Once ObjectMonitor is configured and the object is associated 1959 // with the ObjectMonitor, it is safe to allow async deflation: 1960 assert(m->is_new(), "freshly allocated monitor must be new"); 1961 m->set_allocation_state(ObjectMonitor::Old); 1962 1963 // Hopefully the performance counters are allocated on distinct cache lines 1964 // to avoid false sharing on MP systems ... 1965 OM_PERFDATA_OP(Inflations, inc()); 1966 if (log_is_enabled(Trace, monitorinflation)) { 1967 ResourceMark rm(self); 1968 lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark=" 1969 INTPTR_FORMAT ", type='%s'", p2i(object), 1970 object->mark().value(), object->klass()->external_name()); 1971 } 1972 if (event.should_commit()) { 1973 post_monitor_inflate_event(&event, object, cause); 1974 } 1975 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 1976 return; 1977 } 1978 1979 // CASE: neutral 1980 // TODO-FIXME: for entry we currently inflate and then try to CAS _owner. 1981 // If we know we're inflating for entry it's better to inflate by swinging a 1982 // pre-locked ObjectMonitor pointer into the object header. A successful 1983 // CAS inflates the object *and* confers ownership to the inflating thread. 1984 // In the current implementation we use a 2-step mechanism where we CAS() 1985 // to inflate and then CAS() again to try to swing _owner from NULL to self. 1986 // An inflateTry() method that we could call from enter() would be useful. 1987 1988 // Catch if the object's header is not neutral (not locked and 1989 // not marked is what we care about here). 1990 ADIM_guarantee(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT,mark.value()); 1991 ObjectMonitor* m = om_alloc(self, cause); 1992 // prepare m for installation - set monitor to initial state 1993 m->Recycle(); 1994 m->set_header(mark); 1995 // If we leave _owner == DEFLATER_MARKER here, then the simple C2 1996 // ObjectMonitor enter optimization can no longer race with async 1997 // deflation and reuse. 1998 m->set_object(object); 1999 m->_Responsible = NULL; 2000 m->_SpinDuration = ObjectMonitor::Knob_SpinLimit; // consider: keep metastats by type/class 2001 2002 omh_p->set_om_ptr(m); 2003 2004 if (object->cas_set_mark(markWord::encode(m), mark) != mark) { 2005 m->set_header(markWord::zero()); 2006 m->set_object(NULL); 2007 m->Recycle(); 2008 omh_p->set_om_ptr(NULL); 2009 // om_release() will reset the allocation state from New to Free. 2010 om_release(self, m, true); 2011 m = NULL; 2012 continue; 2013 // interference - the markword changed - just retry. 2014 // The state-transitions are one-way, so there's no chance of 2015 // live-lock -- "Inflated" is an absorbing state. 2016 } 2017 2018 // Once the ObjectMonitor is configured and object is associated 2019 // with the ObjectMonitor, it is safe to allow async deflation: 2020 assert(m->is_new(), "freshly allocated monitor must be new"); 2021 m->set_allocation_state(ObjectMonitor::Old); 2022 2023 // Hopefully the performance counters are allocated on distinct 2024 // cache lines to avoid false sharing on MP systems ... 2025 OM_PERFDATA_OP(Inflations, inc()); 2026 if (log_is_enabled(Trace, monitorinflation)) { 2027 ResourceMark rm(self); 2028 lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark=" 2029 INTPTR_FORMAT ", type='%s'", p2i(object), 2030 object->mark().value(), object->klass()->external_name()); 2031 } 2032 if (event.should_commit()) { 2033 post_monitor_inflate_event(&event, object, cause); 2034 } 2035 ADIM_guarantee(!m->is_free(), "inflated monitor to be returned cannot be free"); 2036 return; 2037 } 2038 } 2039 2040 2041 // We maintain a list of in-use monitors for each thread. 2042 // 2043 // For safepoint based deflation: 2044 // deflate_thread_local_monitors() scans a single thread's in-use list, while 2045 // deflate_idle_monitors() scans only a global list of in-use monitors which 2046 // is populated only as a thread dies (see om_flush()). 2047 // 2048 // These operations are called at all safepoints, immediately after mutators 2049 // are stopped, but before any objects have moved. Collectively they traverse 2050 // the population of in-use monitors, deflating where possible. The scavenged 2051 // monitors are returned to the global monitor free list. 2052 // 2053 // Beware that we scavenge at *every* stop-the-world point. Having a large 2054 // number of monitors in-use could negatively impact performance. We also want 2055 // to minimize the total # of monitors in circulation, as they incur a small 2056 // footprint penalty. 2057 // 2058 // Perversely, the heap size -- and thus the STW safepoint rate -- 2059 // typically drives the scavenge rate. Large heaps can mean infrequent GC, 2060 // which in turn can mean large(r) numbers of ObjectMonitors in circulation. 2061 // This is an unfortunate aspect of this design. 2062 // 2063 // For async deflation: 2064 // If a special deflation request is made, then the safepoint based 2065 // deflation mechanism is used. Otherwise, an async deflation request 2066 // is registered with the ServiceThread and it is notified. 2067 2068 void ObjectSynchronizer::do_safepoint_work(DeflateMonitorCounters* counters) { 2069 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2070 2071 // The per-thread in-use lists are handled in 2072 // ParallelSPCleanupThreadClosure::do_thread(). 2073 2074 if (!AsyncDeflateIdleMonitors || is_special_deflation_requested()) { 2075 // Use the older mechanism for the global in-use list or if a 2076 // special deflation has been requested before the safepoint. 2077 ObjectSynchronizer::deflate_idle_monitors(counters); 2078 return; 2079 } 2080 2081 log_debug(monitorinflation)("requesting async deflation of idle monitors."); 2082 // Request deflation of idle monitors by the ServiceThread: 2083 set_is_async_deflation_requested(true); 2084 MonitorLocker ml(Service_lock, Mutex::_no_safepoint_check_flag); 2085 ml.notify_all(); 2086 2087 if (log_is_enabled(Debug, monitorinflation)) { 2088 // exit_globals()'s call to audit_and_print_stats() is done 2089 // at the Info level and not at a safepoint. 2090 // For safepoint based deflation, audit_and_print_stats() is called 2091 // in ObjectSynchronizer::finish_deflate_idle_monitors() at the 2092 // Debug level at a safepoint. 2093 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2094 } 2095 } 2096 2097 // Deflate a single monitor if not in-use 2098 // Return true if deflated, false if in-use 2099 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj, 2100 ObjectMonitor** free_head_p, 2101 ObjectMonitor** free_tail_p) { 2102 bool deflated; 2103 // Normal case ... The monitor is associated with obj. 2104 const markWord mark = obj->mark(); 2105 guarantee(mark == markWord::encode(mid), "should match: mark=" 2106 INTPTR_FORMAT ", encoded mid=" INTPTR_FORMAT, mark.value(), 2107 markWord::encode(mid).value()); 2108 // Make sure that mark.monitor() and markWord::encode() agree: 2109 guarantee(mark.monitor() == mid, "should match: monitor()=" INTPTR_FORMAT 2110 ", mid=" INTPTR_FORMAT, p2i(mark.monitor()), p2i(mid)); 2111 const markWord dmw = mid->header(); 2112 guarantee(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value()); 2113 2114 if (mid->is_busy() || mid->ref_count() != 0) { 2115 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 2116 // is in use so no deflation. 2117 deflated = false; 2118 } else { 2119 // Deflate the monitor if it is no longer being used 2120 // It's idle - scavenge and return to the global free list 2121 // plain old deflation ... 2122 if (log_is_enabled(Trace, monitorinflation)) { 2123 ResourceMark rm; 2124 log_trace(monitorinflation)("deflate_monitor: " 2125 "object=" INTPTR_FORMAT ", mark=" 2126 INTPTR_FORMAT ", type='%s'", p2i(obj), 2127 mark.value(), obj->klass()->external_name()); 2128 } 2129 2130 // Restore the header back to obj 2131 obj->release_set_mark(dmw); 2132 if (AsyncDeflateIdleMonitors) { 2133 // clear() expects the owner field to be NULL and we won't race 2134 // with the simple C2 ObjectMonitor enter optimization since 2135 // we're at a safepoint. DEFLATER_MARKER is the only non-NULL 2136 // value we should see here. 2137 mid->try_set_owner_from(NULL, DEFLATER_MARKER); 2138 } 2139 mid->clear(); 2140 2141 assert(mid->object() == NULL, "invariant: object=" INTPTR_FORMAT, 2142 p2i(mid->object())); 2143 assert(mid->is_free(), "invariant"); 2144 2145 // Move the deflated ObjectMonitor to the working free list 2146 // defined by free_head_p and free_tail_p. No races on this list 2147 // so no need for load_acquire() or store_release(). 2148 if (*free_head_p == NULL) *free_head_p = mid; 2149 if (*free_tail_p != NULL) { 2150 // We append to the list so the caller can use mid->_next_om 2151 // to fix the linkages in its context. 2152 ObjectMonitor* prevtail = *free_tail_p; 2153 // Should have been cleaned up by the caller: 2154 // Note: Should not have to lock prevtail here since we're at a 2155 // safepoint and ObjectMonitors on the local free list should 2156 // not be accessed in parallel. 2157 assert(prevtail->_next_om == NULL, "must be NULL: _next_om=" 2158 INTPTR_FORMAT, p2i(prevtail->_next_om)); 2159 set_next(prevtail, mid); 2160 } 2161 *free_tail_p = mid; 2162 // At this point, mid->_next_om still refers to its current 2163 // value and another ObjectMonitor's _next_om field still 2164 // refers to this ObjectMonitor. Those linkages have to be 2165 // cleaned up by the caller who has the complete context. 2166 deflated = true; 2167 } 2168 return deflated; 2169 } 2170 2171 // Deflate the specified ObjectMonitor if not in-use using a JavaThread. 2172 // Returns true if it was deflated and false otherwise. 2173 // 2174 // The async deflation protocol sets owner to DEFLATER_MARKER and 2175 // makes ref_count negative as signals to contending threads that 2176 // an async deflation is in progress. There are a number of checks 2177 // as part of the protocol to make sure that the calling thread has 2178 // not lost the race to a contending thread or to a thread that just 2179 // wants to use the ObjectMonitor*. 2180 // 2181 // The ObjectMonitor has been successfully async deflated when: 2182 // (owner == DEFLATER_MARKER && ref_count < 0) 2183 // Contending threads or ObjectMonitor* using threads that see those 2184 // values know to retry their operation. 2185 // 2186 bool ObjectSynchronizer::deflate_monitor_using_JT(ObjectMonitor* mid, 2187 ObjectMonitor** free_head_p, 2188 ObjectMonitor** free_tail_p) { 2189 assert(AsyncDeflateIdleMonitors, "sanity check"); 2190 assert(Thread::current()->is_Java_thread(), "precondition"); 2191 // A newly allocated ObjectMonitor should not be seen here so we 2192 // avoid an endless inflate/deflate cycle. 2193 assert(mid->is_old(), "must be old: allocation_state=%d", 2194 (int) mid->allocation_state()); 2195 2196 if (mid->is_busy() || mid->ref_count() != 0) { 2197 // Easy checks are first - the ObjectMonitor is busy or ObjectMonitor* 2198 // is in use so no deflation. 2199 return false; 2200 } 2201 2202 if (mid->try_set_owner_from(DEFLATER_MARKER, NULL) == NULL) { 2203 // ObjectMonitor is not owned by another thread. Our setting 2204 // owner to DEFLATER_MARKER forces any contending thread through 2205 // the slow path. This is just the first part of the async 2206 // deflation dance. 2207 2208 if (mid->_contentions != 0 || mid->_waiters != 0) { 2209 // Another thread has raced to enter the ObjectMonitor after 2210 // mid->is_busy() above or has already entered and waited on 2211 // it which makes it busy so no deflation. Restore owner to 2212 // NULL if it is still DEFLATER_MARKER. 2213 mid->try_set_owner_from(NULL, DEFLATER_MARKER); 2214 return false; 2215 } 2216 2217 if (Atomic::cmpxchg(&mid->_ref_count, (jint)0, -max_jint) == 0) { 2218 // Make ref_count negative to force any contending threads or 2219 // ObjectMonitor* using threads to retry. This is the second 2220 // part of the async deflation dance. 2221 2222 if (mid->owner_is_DEFLATER_MARKER()) { 2223 // If owner is still DEFLATER_MARKER, then we have successfully 2224 // signaled any contending threads to retry. If it is not, then we 2225 // have lost the race to an entering thread and the ObjectMonitor 2226 // is now busy. This is the third and final part of the async 2227 // deflation dance. 2228 // Note: This owner check solves the ABA problem with ref_count 2229 // where another thread acquired the ObjectMonitor, finished 2230 // using it and restored the ref_count to zero. 2231 2232 // Sanity checks for the races: 2233 guarantee(mid->_contentions == 0, "must be 0: contentions=%d", 2234 mid->_contentions); 2235 guarantee(mid->_waiters == 0, "must be 0: waiters=%d", mid->_waiters); 2236 guarantee(mid->_cxq == NULL, "must be no contending threads: cxq=" 2237 INTPTR_FORMAT, p2i(mid->_cxq)); 2238 guarantee(mid->_EntryList == NULL, 2239 "must be no entering threads: EntryList=" INTPTR_FORMAT, 2240 p2i(mid->_EntryList)); 2241 2242 const oop obj = (oop) mid->object(); 2243 if (log_is_enabled(Trace, monitorinflation)) { 2244 ResourceMark rm; 2245 log_trace(monitorinflation)("deflate_monitor_using_JT: " 2246 "object=" INTPTR_FORMAT ", mark=" 2247 INTPTR_FORMAT ", type='%s'", 2248 p2i(obj), obj->mark().value(), 2249 obj->klass()->external_name()); 2250 } 2251 2252 // Install the old mark word if nobody else has already done it. 2253 mid->install_displaced_markword_in_object(obj); 2254 mid->clear_using_JT(); 2255 2256 assert(mid->object() == NULL, "must be NULL: object=" INTPTR_FORMAT, 2257 p2i(mid->object())); 2258 assert(mid->is_free(), "must be free: allocation_state=%d", 2259 (int) mid->allocation_state()); 2260 2261 // Move the deflated ObjectMonitor to the working free list 2262 // defined by free_head_p and free_tail_p. No races on this list 2263 // so no need for load_acquire() or store_release(). 2264 if (*free_head_p == NULL) { 2265 // First one on the list. 2266 *free_head_p = mid; 2267 } 2268 if (*free_tail_p != NULL) { 2269 // We append to the list so the caller can use mid->_next_om 2270 // to fix the linkages in its context. 2271 ObjectMonitor* prevtail = *free_tail_p; 2272 // Should have been cleaned up by the caller: 2273 om_lock(prevtail); 2274 assert(unmarked_next(prevtail) == NULL, "must be NULL: _next_om=" 2275 INTPTR_FORMAT, p2i(unmarked_next(prevtail))); 2276 set_next(prevtail, mid); // prevtail now points to mid (and is unlocked) 2277 } 2278 *free_tail_p = mid; 2279 2280 // At this point, mid->_next_om still refers to its current 2281 // value and another ObjectMonitor's _next_om field still 2282 // refers to this ObjectMonitor. Those linkages have to be 2283 // cleaned up by the caller who has the complete context. 2284 2285 // We leave owner == DEFLATER_MARKER and ref_count < 0 2286 // to force any racing threads to retry. 2287 return true; // Success, ObjectMonitor has been deflated. 2288 } 2289 2290 // The owner was changed from DEFLATER_MARKER so we lost the 2291 // race since the ObjectMonitor is now busy. 2292 2293 // Add back max_jint to restore the ref_count field to its 2294 // proper value (which may not be what we saw above): 2295 Atomic::add(&mid->_ref_count, max_jint); 2296 2297 #ifdef ASSERT 2298 jint l_ref_count = mid->ref_count(); 2299 #endif 2300 assert(l_ref_count >= 0, "must not be negative: l_ref_count=%d, ref_count=%d", 2301 l_ref_count, mid->ref_count()); 2302 return false; 2303 } 2304 2305 // The ref_count was no longer 0 so we lost the race since the 2306 // ObjectMonitor is now busy or the ObjectMonitor* is now is use. 2307 // Restore owner to NULL if it is still DEFLATER_MARKER: 2308 mid->try_set_owner_from(NULL, DEFLATER_MARKER); 2309 } 2310 2311 // The owner field is no longer NULL so we lost the race since the 2312 // ObjectMonitor is now busy. 2313 return false; 2314 } 2315 2316 // Walk a given monitor list, and deflate idle monitors. 2317 // The given list could be a per-thread list or a global list. 2318 // 2319 // In the case of parallel processing of thread local monitor lists, 2320 // work is done by Threads::parallel_threads_do() which ensures that 2321 // each Java thread is processed by exactly one worker thread, and 2322 // thus avoid conflicts that would arise when worker threads would 2323 // process the same monitor lists concurrently. 2324 // 2325 // See also ParallelSPCleanupTask and 2326 // SafepointSynchronize::do_cleanup_tasks() in safepoint.cpp and 2327 // Threads::parallel_java_threads_do() in thread.cpp. 2328 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** list_p, 2329 int* count_p, 2330 ObjectMonitor** free_head_p, 2331 ObjectMonitor** free_tail_p) { 2332 ObjectMonitor* cur_mid_in_use = NULL; 2333 ObjectMonitor* mid = NULL; 2334 ObjectMonitor* next = NULL; 2335 int deflated_count = 0; 2336 2337 // We use the simpler lock-mid-as-we-go protocol since there are no 2338 // parallel list deletions since we are at a safepoint. 2339 if ((mid = get_list_head_locked(list_p)) == NULL) { 2340 return 0; // The list is empty so nothing to deflate. 2341 } 2342 next = unmarked_next(mid); 2343 2344 while (true) { 2345 oop obj = (oop) mid->object(); 2346 if (obj != NULL && deflate_monitor(mid, obj, free_head_p, free_tail_p)) { 2347 // Deflation succeeded and already updated free_head_p and 2348 // free_tail_p as needed. Finish the move to the local free list 2349 // by unlinking mid from the global or per-thread in-use list. 2350 if (cur_mid_in_use == NULL) { 2351 // mid is the list head and it is locked. Switch the list head 2352 // to next which unlocks the list head, but leaves mid locked: 2353 Atomic::store(list_p, next); 2354 } else { 2355 // mid is locked. Switch cur_mid_in_use's next field to next 2356 // which is safe because we have no parallel list deletions, 2357 // but we leave mid locked: 2358 set_next(cur_mid_in_use, next); 2359 } 2360 // At this point mid is disconnected from the in-use list so 2361 // its lock no longer has any effects on the in-use list. 2362 deflated_count++; 2363 Atomic::dec(count_p); 2364 // mid is current tail in the free_head_p list so NULL terminate it 2365 // (which also unlocks it): 2366 set_next(mid, NULL); 2367 } else { 2368 om_unlock(mid); 2369 cur_mid_in_use = mid; 2370 } 2371 // All the list management is done so move on to the next one: 2372 mid = next; 2373 if (mid == NULL) { 2374 break; // Reached end of the list so nothing more to deflate. 2375 } 2376 // Lock mid so we can possibly deflate it: 2377 om_lock(mid); 2378 next = unmarked_next(mid); 2379 } 2380 return deflated_count; 2381 } 2382 2383 // Walk a given ObjectMonitor list and deflate idle ObjectMonitors using 2384 // a JavaThread. Returns the number of deflated ObjectMonitors. The given 2385 // list could be a per-thread in-use list or the global in-use list. 2386 // If a safepoint has started, then we save state via saved_mid_in_use_p 2387 // and return to the caller to honor the safepoint. 2388 // 2389 int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p, 2390 int* count_p, 2391 ObjectMonitor** free_head_p, 2392 ObjectMonitor** free_tail_p, 2393 ObjectMonitor** saved_mid_in_use_p) { 2394 assert(AsyncDeflateIdleMonitors, "sanity check"); 2395 JavaThread* self = JavaThread::current(); 2396 2397 ObjectMonitor* cur_mid_in_use = NULL; 2398 ObjectMonitor* mid = NULL; 2399 ObjectMonitor* next = NULL; 2400 ObjectMonitor* next_next = NULL; 2401 int deflated_count = 0; 2402 2403 // We use the more complicated lock-cur_mid_in_use-and-mid-as-we-go 2404 // protocol because om_release() can do list deletions in parallel. 2405 // We also lock-next-next-as-we-go to prevent an om_flush() that is 2406 // behind this thread from passing us. 2407 if (*saved_mid_in_use_p == NULL) { 2408 // No saved state so start at the beginning. 2409 // Lock the list head so we can possibly deflate it: 2410 if ((mid = get_list_head_locked(list_p)) == NULL) { 2411 return 0; // The list is empty so nothing to deflate. 2412 } 2413 next = unmarked_next(mid); 2414 } else { 2415 // We're restarting after a safepoint so restore the necessary state 2416 // before we resume. 2417 cur_mid_in_use = *saved_mid_in_use_p; 2418 // Lock cur_mid_in_use so we can possibly update its 2419 // next field to extract a deflated ObjectMonitor. 2420 om_lock(cur_mid_in_use); 2421 mid = unmarked_next(cur_mid_in_use); 2422 if (mid == NULL) { 2423 om_unlock(cur_mid_in_use); 2424 *saved_mid_in_use_p = NULL; 2425 return 0; // The remainder is empty so nothing more to deflate. 2426 } 2427 // Lock mid so we can possibly deflate it: 2428 om_lock(mid); 2429 next = unmarked_next(mid); 2430 } 2431 2432 while (true) { 2433 // The current mid's next field is marked at this point. If we have 2434 // a cur_mid_in_use, then its next field is also marked at this point. 2435 2436 if (next != NULL) { 2437 // We lock next so that an om_flush() thread that is behind us 2438 // cannot pass us when we unlock the current mid. 2439 om_lock(next); 2440 next_next = unmarked_next(next); 2441 } 2442 2443 // Only try to deflate if there is an associated Java object and if 2444 // mid is old (is not newly allocated and is not newly freed). 2445 if (mid->object() != NULL && mid->is_old() && 2446 deflate_monitor_using_JT(mid, free_head_p, free_tail_p)) { 2447 // Deflation succeeded and already updated free_head_p and 2448 // free_tail_p as needed. Finish the move to the local free list 2449 // by unlinking mid from the global or per-thread in-use list. 2450 if (cur_mid_in_use == NULL) { 2451 // mid is the list head and it is locked. Switch the list head 2452 // to next which is also locked (if not NULL) and also leave 2453 // mid locked: 2454 Atomic::store(list_p, next); 2455 } else { 2456 ObjectMonitor* locked_next = mark_om_ptr(next); 2457 // mid and cur_mid_in_use are locked. Switch cur_mid_in_use's 2458 // next field to locked_next and also leave mid locked: 2459 set_next(cur_mid_in_use, locked_next); 2460 } 2461 // At this point mid is disconnected from the in-use list so 2462 // its lock longer has any effects on in-use list. 2463 deflated_count++; 2464 Atomic::dec(count_p); 2465 // mid is current tail in the free_head_p list so NULL terminate it 2466 // (which also unlocks it): 2467 set_next(mid, NULL); 2468 2469 // All the list management is done so move on to the next one: 2470 mid = next; // mid keeps non-NULL next's locked next field 2471 next = next_next; 2472 } else { 2473 // mid is considered in-use if it does not have an associated 2474 // Java object or mid is not old or deflation did not succeed. 2475 // A mid->is_new() node can be seen here when it is freshly 2476 // returned by om_alloc() (and skips the deflation code path). 2477 // A mid->is_old() node can be seen here when deflation failed. 2478 // A mid->is_free() node can be seen here when a fresh node from 2479 // om_alloc() is released by om_release() due to losing the race 2480 // in inflate(). 2481 2482 // All the list management is done so move on to the next one: 2483 if (cur_mid_in_use != NULL) { 2484 om_unlock(cur_mid_in_use); 2485 } 2486 // The next cur_mid_in_use keeps mid's lock state so 2487 // that it is stable for a possible next field change. It 2488 // cannot be modified by om_release() while it is locked. 2489 cur_mid_in_use = mid; 2490 mid = next; // mid keeps non-NULL next's locked state 2491 next = next_next; 2492 2493 if (SafepointMechanism::should_block(self) && 2494 cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) { 2495 // If a safepoint has started and cur_mid_in_use is not the list 2496 // head and is old, then it is safe to use as saved state. Return 2497 // to the caller before blocking. 2498 *saved_mid_in_use_p = cur_mid_in_use; 2499 om_unlock(cur_mid_in_use); 2500 if (mid != NULL) { 2501 om_unlock(mid); 2502 } 2503 return deflated_count; 2504 } 2505 } 2506 if (mid == NULL) { 2507 if (cur_mid_in_use != NULL) { 2508 om_unlock(cur_mid_in_use); 2509 } 2510 break; // Reached end of the list so nothing more to deflate. 2511 } 2512 2513 // The current mid's next field is locked at this point. If we have 2514 // a cur_mid_in_use, then it is also locked at this point. 2515 } 2516 // We finished the list without a safepoint starting so there's 2517 // no need to save state. 2518 *saved_mid_in_use_p = NULL; 2519 return deflated_count; 2520 } 2521 2522 void ObjectSynchronizer::prepare_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2523 counters->n_in_use = 0; // currently associated with objects 2524 counters->n_in_circulation = 0; // extant 2525 counters->n_scavenged = 0; // reclaimed (global and per-thread) 2526 counters->per_thread_scavenged = 0; // per-thread scavenge total 2527 counters->per_thread_times = 0.0; // per-thread scavenge times 2528 OrderAccess::storestore(); // flush inits for worker threads 2529 } 2530 2531 void ObjectSynchronizer::deflate_idle_monitors(DeflateMonitorCounters* counters) { 2532 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2533 2534 if (AsyncDeflateIdleMonitors) { 2535 // Nothing to do when global idle ObjectMonitors are deflated using 2536 // a JavaThread unless a special deflation has been requested. 2537 if (!is_special_deflation_requested()) { 2538 return; 2539 } 2540 } 2541 2542 bool deflated = false; 2543 2544 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2545 ObjectMonitor* free_tail_p = NULL; 2546 elapsedTimer timer; 2547 2548 if (log_is_enabled(Info, monitorinflation)) { 2549 timer.start(); 2550 } 2551 2552 // Note: the thread-local monitors lists get deflated in 2553 // a separate pass. See deflate_thread_local_monitors(). 2554 2555 // For moribund threads, scan LVars.in_use_list 2556 int deflated_count = 0; 2557 if (Atomic::load(&LVars.in_use_list) != NULL) { 2558 // Update n_in_circulation before LVars.in_use_count is updated by deflation. 2559 Atomic::add(&counters->n_in_circulation, Atomic::load(&LVars.in_use_count)); 2560 2561 deflated_count = deflate_monitor_list(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p); 2562 Atomic::add(&counters->n_in_use, Atomic::load(&LVars.in_use_count)); 2563 } 2564 2565 if (free_head_p != NULL) { 2566 // Move the deflated ObjectMonitors back to the global free list. 2567 // No races on the working free list so no need for load_acquire(). 2568 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2569 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om=" 2570 INTPTR_FORMAT, p2i(free_tail_p->_next_om)); 2571 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2572 Atomic::add(&counters->n_scavenged, deflated_count); 2573 } 2574 timer.stop(); 2575 2576 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2577 LogStreamHandle(Info, monitorinflation) lsh_info; 2578 LogStream* ls = NULL; 2579 if (log_is_enabled(Debug, monitorinflation)) { 2580 ls = &lsh_debug; 2581 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2582 ls = &lsh_info; 2583 } 2584 if (ls != NULL) { 2585 ls->print_cr("deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2586 } 2587 } 2588 2589 class HandshakeForDeflation : public HandshakeClosure { 2590 public: 2591 HandshakeForDeflation() : HandshakeClosure("HandshakeForDeflation") {} 2592 2593 void do_thread(Thread* thread) { 2594 log_trace(monitorinflation)("HandshakeForDeflation::do_thread: thread=" 2595 INTPTR_FORMAT, p2i(thread)); 2596 } 2597 }; 2598 2599 void ObjectSynchronizer::deflate_idle_monitors_using_JT() { 2600 assert(AsyncDeflateIdleMonitors, "sanity check"); 2601 2602 // Deflate any global idle monitors. 2603 deflate_global_idle_monitors_using_JT(); 2604 2605 int count = 0; 2606 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2607 if (jt->om_in_use_count > 0 && !jt->is_exiting()) { 2608 // This JavaThread is using ObjectMonitors so deflate any that 2609 // are idle unless this JavaThread is exiting; do not race with 2610 // ObjectSynchronizer::om_flush(). 2611 deflate_per_thread_idle_monitors_using_JT(jt); 2612 count++; 2613 } 2614 } 2615 if (count > 0) { 2616 log_debug(monitorinflation)("did async deflation of idle monitors for %d thread(s).", count); 2617 } 2618 2619 log_info(monitorinflation)("async global_population=%d, global_in_use_count=%d, " 2620 "global_free_count=%d, global_wait_count=%d", 2621 Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count), 2622 Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count)); 2623 2624 // The ServiceThread's async deflation request has been processed. 2625 set_is_async_deflation_requested(false); 2626 2627 if (HandshakeAfterDeflateIdleMonitors && Atomic::load(&LVars.wait_count) > 0) { 2628 // There are deflated ObjectMonitors waiting for a handshake 2629 // (or a safepoint) for safety. 2630 2631 ObjectMonitor* list = Atomic::load(&LVars.wait_list); 2632 ADIM_guarantee(list != NULL, "LVars.wait_list must not be NULL"); 2633 int count = Atomic::load(&LVars.wait_count); 2634 Atomic::store(&LVars.wait_count, 0); 2635 Atomic::store(&LVars.wait_list, (ObjectMonitor*)NULL); 2636 2637 // Find the tail for prepend_list_to_common(). No need to mark 2638 // ObjectMonitors for this list walk since only the deflater 2639 // thread manages the wait list. 2640 int l_count = 0; 2641 ObjectMonitor* tail = NULL; 2642 for (ObjectMonitor* n = list; n != NULL; n = unmarked_next(n)) { 2643 tail = n; 2644 l_count++; 2645 } 2646 ADIM_guarantee(count == l_count, "count=%d != l_count=%d", count, l_count); 2647 2648 // Will execute a safepoint if !ThreadLocalHandshakes: 2649 HandshakeForDeflation hfd_hc; 2650 Handshake::execute(&hfd_hc); 2651 2652 prepend_list_to_common(list, tail, count, &LVars.free_list, &LVars.free_count); 2653 2654 log_info(monitorinflation)("moved %d idle monitors from global waiting list to global free list", count); 2655 } 2656 } 2657 2658 // Deflate global idle ObjectMonitors using a JavaThread. 2659 // 2660 void ObjectSynchronizer::deflate_global_idle_monitors_using_JT() { 2661 assert(AsyncDeflateIdleMonitors, "sanity check"); 2662 assert(Thread::current()->is_Java_thread(), "precondition"); 2663 JavaThread* self = JavaThread::current(); 2664 2665 deflate_common_idle_monitors_using_JT(true /* is_global */, self); 2666 } 2667 2668 // Deflate the specified JavaThread's idle ObjectMonitors using a JavaThread. 2669 // 2670 void ObjectSynchronizer::deflate_per_thread_idle_monitors_using_JT(JavaThread* target) { 2671 assert(AsyncDeflateIdleMonitors, "sanity check"); 2672 assert(Thread::current()->is_Java_thread(), "precondition"); 2673 2674 deflate_common_idle_monitors_using_JT(false /* !is_global */, target); 2675 } 2676 2677 // Deflate global or per-thread idle ObjectMonitors using a JavaThread. 2678 // 2679 void ObjectSynchronizer::deflate_common_idle_monitors_using_JT(bool is_global, JavaThread* target) { 2680 JavaThread* self = JavaThread::current(); 2681 2682 int deflated_count = 0; 2683 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged ObjectMonitors 2684 ObjectMonitor* free_tail_p = NULL; 2685 ObjectMonitor* saved_mid_in_use_p = NULL; 2686 elapsedTimer timer; 2687 2688 if (log_is_enabled(Info, monitorinflation)) { 2689 timer.start(); 2690 } 2691 2692 if (is_global) { 2693 OM_PERFDATA_OP(MonExtant, set_value(Atomic::load(&LVars.in_use_count))); 2694 } else { 2695 OM_PERFDATA_OP(MonExtant, inc(target->om_in_use_count)); 2696 } 2697 2698 do { 2699 int local_deflated_count; 2700 if (is_global) { 2701 local_deflated_count = deflate_monitor_list_using_JT(&LVars.in_use_list, &LVars.in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); 2702 } else { 2703 local_deflated_count = deflate_monitor_list_using_JT(&target->om_in_use_list, &target->om_in_use_count, &free_head_p, &free_tail_p, &saved_mid_in_use_p); 2704 } 2705 deflated_count += local_deflated_count; 2706 2707 if (free_head_p != NULL) { 2708 // Move the deflated ObjectMonitors to the global free list. 2709 // No races on the working list so no need for load_acquire(). 2710 guarantee(free_tail_p != NULL && local_deflated_count > 0, "free_tail_p=" INTPTR_FORMAT ", local_deflated_count=%d", p2i(free_tail_p), local_deflated_count); 2711 // Note: The target thread can be doing an om_alloc() that 2712 // is trying to prepend an ObjectMonitor on its in-use list 2713 // at the same time that we have deflated the current in-use 2714 // list head and put it on the local free list. prepend_to_common() 2715 // will detect the race and retry which avoids list corruption, 2716 // but the next field in free_tail_p can flicker to marked 2717 // and then unmarked while prepend_to_common() is sorting it 2718 // all out. 2719 assert(unmarked_next(free_tail_p) == NULL, "must be NULL: _next_om=" 2720 INTPTR_FORMAT, p2i(unmarked_next(free_tail_p))); 2721 2722 if (HandshakeAfterDeflateIdleMonitors) { 2723 prepend_list_to_global_wait_list(free_head_p, free_tail_p, local_deflated_count); 2724 } else { 2725 prepend_list_to_global_free_list(free_head_p, free_tail_p, local_deflated_count); 2726 } 2727 2728 OM_PERFDATA_OP(Deflations, inc(local_deflated_count)); 2729 } 2730 2731 if (saved_mid_in_use_p != NULL) { 2732 // deflate_monitor_list_using_JT() detected a safepoint starting. 2733 timer.stop(); 2734 { 2735 if (is_global) { 2736 log_debug(monitorinflation)("pausing deflation of global idle monitors for a safepoint."); 2737 } else { 2738 log_debug(monitorinflation)("jt=" INTPTR_FORMAT ": pausing deflation of per-thread idle monitors for a safepoint.", p2i(target)); 2739 } 2740 assert(SafepointMechanism::should_block(self), "sanity check"); 2741 ThreadBlockInVM blocker(self); 2742 } 2743 // Prepare for another loop after the safepoint. 2744 free_head_p = NULL; 2745 free_tail_p = NULL; 2746 if (log_is_enabled(Info, monitorinflation)) { 2747 timer.start(); 2748 } 2749 } 2750 } while (saved_mid_in_use_p != NULL); 2751 timer.stop(); 2752 2753 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2754 LogStreamHandle(Info, monitorinflation) lsh_info; 2755 LogStream* ls = NULL; 2756 if (log_is_enabled(Debug, monitorinflation)) { 2757 ls = &lsh_debug; 2758 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2759 ls = &lsh_info; 2760 } 2761 if (ls != NULL) { 2762 if (is_global) { 2763 ls->print_cr("async-deflating global idle monitors, %3.7f secs, %d monitors", timer.seconds(), deflated_count); 2764 } else { 2765 ls->print_cr("jt=" INTPTR_FORMAT ": async-deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(target), timer.seconds(), deflated_count); 2766 } 2767 } 2768 } 2769 2770 void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* counters) { 2771 // Report the cumulative time for deflating each thread's idle 2772 // monitors. Note: if the work is split among more than one 2773 // worker thread, then the reported time will likely be more 2774 // than a beginning to end measurement of the phase. 2775 log_info(safepoint, cleanup)("deflating per-thread idle monitors, %3.7f secs, monitors=%d", counters->per_thread_times, counters->per_thread_scavenged); 2776 2777 bool needs_special_deflation = is_special_deflation_requested(); 2778 if (AsyncDeflateIdleMonitors && !needs_special_deflation) { 2779 // Nothing to do when idle ObjectMonitors are deflated using 2780 // a JavaThread unless a special deflation has been requested. 2781 return; 2782 } 2783 2784 if (log_is_enabled(Debug, monitorinflation)) { 2785 // exit_globals()'s call to audit_and_print_stats() is done 2786 // at the Info level and not at a safepoint. 2787 // For async deflation, audit_and_print_stats() is called in 2788 // ObjectSynchronizer::do_safepoint_work() at the Debug level 2789 // at a safepoint. 2790 ObjectSynchronizer::audit_and_print_stats(false /* on_exit */); 2791 } else if (log_is_enabled(Info, monitorinflation)) { 2792 log_info(monitorinflation)("global_population=%d, global_in_use_count=%d, " 2793 "global_free_count=%d, global_wait_count=%d", 2794 Atomic::load(&LVars.population), Atomic::load(&LVars.in_use_count), 2795 Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count)); 2796 } 2797 2798 Atomic::store(&_forceMonitorScavenge, 0); // Reset 2799 2800 OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged)); 2801 OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation)); 2802 2803 GVars.stw_random = os::random(); 2804 GVars.stw_cycle++; 2805 2806 if (needs_special_deflation) { 2807 set_is_special_deflation_requested(false); // special deflation is done 2808 } 2809 } 2810 2811 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) { 2812 assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); 2813 2814 if (AsyncDeflateIdleMonitors && !is_special_deflation_requested()) { 2815 // Nothing to do if a special deflation has NOT been requested. 2816 return; 2817 } 2818 2819 ObjectMonitor* free_head_p = NULL; // Local SLL of scavenged monitors 2820 ObjectMonitor* free_tail_p = NULL; 2821 elapsedTimer timer; 2822 2823 if (log_is_enabled(Info, safepoint, cleanup) || 2824 log_is_enabled(Info, monitorinflation)) { 2825 timer.start(); 2826 } 2827 2828 // Update n_in_circulation before om_in_use_count is updated by deflation. 2829 Atomic::add(&counters->n_in_circulation, thread->om_in_use_count); 2830 2831 int deflated_count = deflate_monitor_list(&thread->om_in_use_list, &thread->om_in_use_count, &free_head_p, &free_tail_p); 2832 Atomic::add(&counters->n_in_use, thread->om_in_use_count); 2833 2834 if (free_head_p != NULL) { 2835 // Move the deflated ObjectMonitors back to the global free list. 2836 // No races on the working list so no need for load_acquire(). 2837 guarantee(free_tail_p != NULL && deflated_count > 0, "invariant"); 2838 assert(free_tail_p->_next_om == NULL, "must be NULL: _next_om=" 2839 INTPTR_FORMAT, p2i(free_tail_p->_next_om)); 2840 prepend_list_to_global_free_list(free_head_p, free_tail_p, deflated_count); 2841 Atomic::add(&counters->n_scavenged, deflated_count); 2842 Atomic::add(&counters->per_thread_scavenged, deflated_count); 2843 } 2844 2845 timer.stop(); 2846 // Safepoint logging cares about cumulative per_thread_times and 2847 // we'll capture most of the cost, but not the muxRelease() which 2848 // should be cheap. 2849 counters->per_thread_times += timer.seconds(); 2850 2851 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2852 LogStreamHandle(Info, monitorinflation) lsh_info; 2853 LogStream* ls = NULL; 2854 if (log_is_enabled(Debug, monitorinflation)) { 2855 ls = &lsh_debug; 2856 } else if (deflated_count != 0 && log_is_enabled(Info, monitorinflation)) { 2857 ls = &lsh_info; 2858 } 2859 if (ls != NULL) { 2860 ls->print_cr("jt=" INTPTR_FORMAT ": deflating per-thread idle monitors, %3.7f secs, %d monitors", p2i(thread), timer.seconds(), deflated_count); 2861 } 2862 } 2863 2864 // Monitor cleanup on JavaThread::exit 2865 2866 // Iterate through monitor cache and attempt to release thread's monitors 2867 // Gives up on a particular monitor if an exception occurs, but continues 2868 // the overall iteration, swallowing the exception. 2869 class ReleaseJavaMonitorsClosure: public MonitorClosure { 2870 private: 2871 TRAPS; 2872 2873 public: 2874 ReleaseJavaMonitorsClosure(Thread* thread) : THREAD(thread) {} 2875 void do_monitor(ObjectMonitor* mid) { 2876 if (mid->owner() == THREAD) { 2877 (void)mid->complete_exit(CHECK); 2878 } 2879 } 2880 }; 2881 2882 // Release all inflated monitors owned by THREAD. Lightweight monitors are 2883 // ignored. This is meant to be called during JNI thread detach which assumes 2884 // all remaining monitors are heavyweight. All exceptions are swallowed. 2885 // Scanning the extant monitor list can be time consuming. 2886 // A simple optimization is to add a per-thread flag that indicates a thread 2887 // called jni_monitorenter() during its lifetime. 2888 // 2889 // Instead of No_Savepoint_Verifier it might be cheaper to 2890 // use an idiom of the form: 2891 // auto int tmp = SafepointSynchronize::_safepoint_counter ; 2892 // <code that must not run at safepoint> 2893 // guarantee (((tmp ^ _safepoint_counter) | (tmp & 1)) == 0) ; 2894 // Since the tests are extremely cheap we could leave them enabled 2895 // for normal product builds. 2896 2897 void ObjectSynchronizer::release_monitors_owned_by_thread(TRAPS) { 2898 assert(THREAD == JavaThread::current(), "must be current Java thread"); 2899 NoSafepointVerifier nsv; 2900 ReleaseJavaMonitorsClosure rjmc(THREAD); 2901 ObjectSynchronizer::monitors_iterate(&rjmc); 2902 THREAD->clear_pending_exception(); 2903 } 2904 2905 const char* ObjectSynchronizer::inflate_cause_name(const InflateCause cause) { 2906 switch (cause) { 2907 case inflate_cause_vm_internal: return "VM Internal"; 2908 case inflate_cause_monitor_enter: return "Monitor Enter"; 2909 case inflate_cause_wait: return "Monitor Wait"; 2910 case inflate_cause_notify: return "Monitor Notify"; 2911 case inflate_cause_hash_code: return "Monitor Hash Code"; 2912 case inflate_cause_jni_enter: return "JNI Monitor Enter"; 2913 case inflate_cause_jni_exit: return "JNI Monitor Exit"; 2914 default: 2915 ShouldNotReachHere(); 2916 } 2917 return "Unknown"; 2918 } 2919 2920 //------------------------------------------------------------------------------ 2921 // Debugging code 2922 2923 u_char* ObjectSynchronizer::get_gvars_addr() { 2924 return (u_char*)&GVars; 2925 } 2926 2927 u_char* ObjectSynchronizer::get_gvars_hc_sequence_addr() { 2928 return (u_char*)&GVars.hc_sequence; 2929 } 2930 2931 size_t ObjectSynchronizer::get_gvars_size() { 2932 return sizeof(SharedGlobals); 2933 } 2934 2935 u_char* ObjectSynchronizer::get_gvars_stw_random_addr() { 2936 return (u_char*)&GVars.stw_random; 2937 } 2938 2939 void ObjectSynchronizer::audit_and_print_stats(bool on_exit) { 2940 assert(on_exit || SafepointSynchronize::is_at_safepoint(), "invariant"); 2941 2942 LogStreamHandle(Debug, monitorinflation) lsh_debug; 2943 LogStreamHandle(Info, monitorinflation) lsh_info; 2944 LogStreamHandle(Trace, monitorinflation) lsh_trace; 2945 LogStream* ls = NULL; 2946 if (log_is_enabled(Trace, monitorinflation)) { 2947 ls = &lsh_trace; 2948 } else if (log_is_enabled(Debug, monitorinflation)) { 2949 ls = &lsh_debug; 2950 } else if (log_is_enabled(Info, monitorinflation)) { 2951 ls = &lsh_info; 2952 } 2953 assert(ls != NULL, "sanity check"); 2954 2955 // Log counts for the global and per-thread monitor lists: 2956 int chk_om_population = log_monitor_list_counts(ls); 2957 int error_cnt = 0; 2958 2959 ls->print_cr("Checking global lists:"); 2960 2961 // Check LVars.population: 2962 if (Atomic::load(&LVars.population) == chk_om_population) { 2963 ls->print_cr("global_population=%d equals chk_om_population=%d", 2964 Atomic::load(&LVars.population), chk_om_population); 2965 } else { 2966 // With lock free access to the monitor lists, it is possible for 2967 // log_monitor_list_counts() to return a value that doesn't match 2968 // LVars.population. So far a higher value has been seen in testing 2969 // so something is being double counted by log_monitor_list_counts(). 2970 ls->print_cr("WARNING: global_population=%d is not equal to " 2971 "chk_om_population=%d", Atomic::load(&LVars.population), chk_om_population); 2972 } 2973 2974 // Check LVars.in_use_list and LVars.in_use_count: 2975 chk_global_in_use_list_and_count(ls, &error_cnt); 2976 2977 // Check LVars.free_list and LVars.free_count: 2978 chk_global_free_list_and_count(ls, &error_cnt); 2979 2980 if (HandshakeAfterDeflateIdleMonitors) { 2981 // Check LVars.wait_list and LVars.wait_count: 2982 chk_global_wait_list_and_count(ls, &error_cnt); 2983 } 2984 2985 ls->print_cr("Checking per-thread lists:"); 2986 2987 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 2988 // Check om_in_use_list and om_in_use_count: 2989 chk_per_thread_in_use_list_and_count(jt, ls, &error_cnt); 2990 2991 // Check om_free_list and om_free_count: 2992 chk_per_thread_free_list_and_count(jt, ls, &error_cnt); 2993 } 2994 2995 if (error_cnt == 0) { 2996 ls->print_cr("No errors found in monitor list checks."); 2997 } else { 2998 log_error(monitorinflation)("found monitor list errors: error_cnt=%d", error_cnt); 2999 } 3000 3001 if ((on_exit && log_is_enabled(Info, monitorinflation)) || 3002 (!on_exit && log_is_enabled(Trace, monitorinflation))) { 3003 // When exiting this log output is at the Info level. When called 3004 // at a safepoint, this log output is at the Trace level since 3005 // there can be a lot of it. 3006 log_in_use_monitor_details(ls); 3007 } 3008 3009 ls->flush(); 3010 3011 guarantee(error_cnt == 0, "ERROR: found monitor list errors: error_cnt=%d", error_cnt); 3012 } 3013 3014 // Check a free monitor entry; log any errors. 3015 void ObjectSynchronizer::chk_free_entry(JavaThread* jt, ObjectMonitor* n, 3016 outputStream * out, int *error_cnt_p) { 3017 stringStream ss; 3018 if (n->is_busy()) { 3019 if (jt != NULL) { 3020 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3021 ": free per-thread monitor must not be busy: %s", p2i(jt), 3022 p2i(n), n->is_busy_to_string(&ss)); 3023 } else { 3024 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3025 "must not be busy: %s", p2i(n), n->is_busy_to_string(&ss)); 3026 } 3027 *error_cnt_p = *error_cnt_p + 1; 3028 } 3029 if (n->header().value() != 0) { 3030 if (jt != NULL) { 3031 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3032 ": free per-thread monitor must have NULL _header " 3033 "field: _header=" INTPTR_FORMAT, p2i(jt), p2i(n), 3034 n->header().value()); 3035 *error_cnt_p = *error_cnt_p + 1; 3036 } else if (!AsyncDeflateIdleMonitors) { 3037 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3038 "must have NULL _header field: _header=" INTPTR_FORMAT, 3039 p2i(n), n->header().value()); 3040 *error_cnt_p = *error_cnt_p + 1; 3041 } 3042 } 3043 if (n->object() != NULL) { 3044 if (jt != NULL) { 3045 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3046 ": free per-thread monitor must have NULL _object " 3047 "field: _object=" INTPTR_FORMAT, p2i(jt), p2i(n), 3048 p2i(n->object())); 3049 } else { 3050 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": free global monitor " 3051 "must have NULL _object field: _object=" INTPTR_FORMAT, 3052 p2i(n), p2i(n->object())); 3053 } 3054 *error_cnt_p = *error_cnt_p + 1; 3055 } 3056 } 3057 3058 // Lock the next ObjectMonitor for traversal. The current ObjectMonitor 3059 // is unlocked after the next ObjectMonitor is locked. *cur_p and *next_p 3060 // are updated to their next values in the list traversal. *cur_p is set 3061 // to NULL when the end of the list is reached. 3062 static void lock_next_for_traversal(ObjectMonitor** cur_p, ObjectMonitor** next_p) { 3063 ObjectMonitor* prev = *cur_p; // Save current for unlocking. 3064 if (*next_p == NULL) { // Reached the end of the list. 3065 om_unlock(prev); // Unlock previous. 3066 *cur_p = NULL; // Tell the caller we are done. 3067 return; 3068 } 3069 om_lock(*next_p); // Lock next. 3070 om_unlock(prev); // Unlock previous. 3071 *cur_p = *next_p; // Update current. 3072 *next_p = unmarked_next(*cur_p); // Update next. 3073 } 3074 3075 // Check the global free list and count; log the results of the checks. 3076 void ObjectSynchronizer::chk_global_free_list_and_count(outputStream * out, 3077 int *error_cnt_p) { 3078 int chk_om_free_count = 0; 3079 ObjectMonitor* cur = NULL; 3080 ObjectMonitor* next = NULL; 3081 if ((cur = get_list_head_locked(&LVars.free_list)) != NULL) { 3082 next = unmarked_next(cur); 3083 // Marked the global free list head so process the list. 3084 while (true) { 3085 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3086 chk_om_free_count++; 3087 3088 lock_next_for_traversal(&cur, &next); 3089 if (cur == NULL) { 3090 break; 3091 } 3092 } 3093 } 3094 if (Atomic::load(&LVars.free_count) == chk_om_free_count) { 3095 out->print_cr("global_free_count=%d equals chk_om_free_count=%d", 3096 Atomic::load(&LVars.free_count), chk_om_free_count); 3097 } else { 3098 // With lock free access to LVars.free_list, it is possible for an 3099 // ObjectMonitor to be prepended to LVars.free_list after we started 3100 // calculating chk_om_free_count so LVars.free_count may not 3101 // match anymore. 3102 out->print_cr("WARNING: global_free_count=%d is not equal to " 3103 "chk_om_free_count=%d", Atomic::load(&LVars.free_count), chk_om_free_count); 3104 } 3105 } 3106 3107 // Check the global wait list and count; log the results of the checks. 3108 void ObjectSynchronizer::chk_global_wait_list_and_count(outputStream * out, 3109 int *error_cnt_p) { 3110 int chk_om_wait_count = 0; 3111 ObjectMonitor* cur = NULL; 3112 ObjectMonitor* next = NULL; 3113 if ((cur = get_list_head_locked(&LVars.wait_list)) != NULL) { 3114 next = unmarked_next(cur); 3115 // Marked the global wait list head so process the list. 3116 while (true) { 3117 // Rules for LVars.wait_list are the same as of LVars.free_list: 3118 chk_free_entry(NULL /* jt */, cur, out, error_cnt_p); 3119 chk_om_wait_count++; 3120 3121 lock_next_for_traversal(&cur, &next); 3122 if (cur == NULL) { 3123 break; 3124 } 3125 } 3126 } 3127 if (Atomic::load(&LVars.wait_count) == chk_om_wait_count) { 3128 out->print_cr("global_wait_count=%d equals chk_om_wait_count=%d", 3129 Atomic::load(&LVars.wait_count), chk_om_wait_count); 3130 } else { 3131 out->print_cr("ERROR: global_wait_count=%d is not equal to " 3132 "chk_om_wait_count=%d", Atomic::load(&LVars.wait_count), chk_om_wait_count); 3133 *error_cnt_p = *error_cnt_p + 1; 3134 } 3135 } 3136 3137 // Check the global in-use list and count; log the results of the checks. 3138 void ObjectSynchronizer::chk_global_in_use_list_and_count(outputStream * out, 3139 int *error_cnt_p) { 3140 int chk_om_in_use_count = 0; 3141 ObjectMonitor* cur = NULL; 3142 ObjectMonitor* next = NULL; 3143 if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) { 3144 next = unmarked_next(cur); 3145 // Marked the global in-use list head so process the list. 3146 while (true) { 3147 chk_in_use_entry(NULL /* jt */, cur, out, error_cnt_p); 3148 chk_om_in_use_count++; 3149 3150 lock_next_for_traversal(&cur, &next); 3151 if (cur == NULL) { 3152 break; 3153 } 3154 } 3155 } 3156 if (Atomic::load(&LVars.in_use_count) == chk_om_in_use_count) { 3157 out->print_cr("global_in_use_count=%d equals chk_om_in_use_count=%d", 3158 Atomic::load(&LVars.in_use_count), chk_om_in_use_count); 3159 } else { 3160 // With lock free access to the monitor lists, it is possible for 3161 // an exiting JavaThread to put its in-use ObjectMonitors on the 3162 // global in-use list after chk_om_in_use_count is calculated above. 3163 out->print_cr("WARNING: global_in_use_count=%d is not equal to chk_om_in_use_count=%d", 3164 Atomic::load(&LVars.in_use_count), chk_om_in_use_count); 3165 } 3166 } 3167 3168 // Check an in-use monitor entry; log any errors. 3169 void ObjectSynchronizer::chk_in_use_entry(JavaThread* jt, ObjectMonitor* n, 3170 outputStream * out, int *error_cnt_p) { 3171 if (n->header().value() == 0) { 3172 if (jt != NULL) { 3173 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3174 ": in-use per-thread monitor must have non-NULL _header " 3175 "field.", p2i(jt), p2i(n)); 3176 } else { 3177 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3178 "must have non-NULL _header field.", p2i(n)); 3179 } 3180 *error_cnt_p = *error_cnt_p + 1; 3181 } 3182 if (n->object() == NULL) { 3183 if (jt != NULL) { 3184 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3185 ": in-use per-thread monitor must have non-NULL _object " 3186 "field.", p2i(jt), p2i(n)); 3187 } else { 3188 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global monitor " 3189 "must have non-NULL _object field.", p2i(n)); 3190 } 3191 *error_cnt_p = *error_cnt_p + 1; 3192 } 3193 const oop obj = (oop)n->object(); 3194 const markWord mark = obj->mark(); 3195 if (!mark.has_monitor()) { 3196 if (jt != NULL) { 3197 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3198 ": in-use per-thread monitor's object does not think " 3199 "it has a monitor: obj=" INTPTR_FORMAT ", mark=" 3200 INTPTR_FORMAT, p2i(jt), p2i(n), p2i(obj), mark.value()); 3201 } else { 3202 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3203 "monitor's object does not think it has a monitor: obj=" 3204 INTPTR_FORMAT ", mark=" INTPTR_FORMAT, p2i(n), 3205 p2i(obj), mark.value()); 3206 } 3207 *error_cnt_p = *error_cnt_p + 1; 3208 } 3209 ObjectMonitor* const obj_mon = mark.monitor(); 3210 if (n != obj_mon) { 3211 if (jt != NULL) { 3212 out->print_cr("ERROR: jt=" INTPTR_FORMAT ", monitor=" INTPTR_FORMAT 3213 ": in-use per-thread monitor's object does not refer " 3214 "to the same monitor: obj=" INTPTR_FORMAT ", mark=" 3215 INTPTR_FORMAT ", obj_mon=" INTPTR_FORMAT, p2i(jt), 3216 p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3217 } else { 3218 out->print_cr("ERROR: monitor=" INTPTR_FORMAT ": in-use global " 3219 "monitor's object does not refer to the same monitor: obj=" 3220 INTPTR_FORMAT ", mark=" INTPTR_FORMAT ", obj_mon=" 3221 INTPTR_FORMAT, p2i(n), p2i(obj), mark.value(), p2i(obj_mon)); 3222 } 3223 *error_cnt_p = *error_cnt_p + 1; 3224 } 3225 } 3226 3227 // Check the thread's free list and count; log the results of the checks. 3228 void ObjectSynchronizer::chk_per_thread_free_list_and_count(JavaThread *jt, 3229 outputStream * out, 3230 int *error_cnt_p) { 3231 int chk_om_free_count = 0; 3232 ObjectMonitor* cur = NULL; 3233 ObjectMonitor* next = NULL; 3234 if ((cur = get_list_head_locked(&jt->om_free_list)) != NULL) { 3235 next = unmarked_next(cur); 3236 // Marked the per-thread free list head so process the list. 3237 while (true) { 3238 chk_free_entry(jt, cur, out, error_cnt_p); 3239 chk_om_free_count++; 3240 3241 lock_next_for_traversal(&cur, &next); 3242 if (cur == NULL) { 3243 break; 3244 } 3245 } 3246 } 3247 if (jt->om_free_count == chk_om_free_count) { 3248 out->print_cr("jt=" INTPTR_FORMAT ": om_free_count=%d equals " 3249 "chk_om_free_count=%d", p2i(jt), jt->om_free_count, 3250 chk_om_free_count); 3251 } else { 3252 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_free_count=%d is not " 3253 "equal to chk_om_free_count=%d", p2i(jt), jt->om_free_count, 3254 chk_om_free_count); 3255 *error_cnt_p = *error_cnt_p + 1; 3256 } 3257 } 3258 3259 // Check the thread's in-use list and count; log the results of the checks. 3260 void ObjectSynchronizer::chk_per_thread_in_use_list_and_count(JavaThread *jt, 3261 outputStream * out, 3262 int *error_cnt_p) { 3263 int chk_om_in_use_count = 0; 3264 ObjectMonitor* cur = NULL; 3265 ObjectMonitor* next = NULL; 3266 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3267 next = unmarked_next(cur); 3268 // Marked the per-thread in-use list head so process the list. 3269 while (true) { 3270 chk_in_use_entry(jt, cur, out, error_cnt_p); 3271 chk_om_in_use_count++; 3272 3273 lock_next_for_traversal(&cur, &next); 3274 if (cur == NULL) { 3275 break; 3276 } 3277 } 3278 } 3279 if (jt->om_in_use_count == chk_om_in_use_count) { 3280 out->print_cr("jt=" INTPTR_FORMAT ": om_in_use_count=%d equals " 3281 "chk_om_in_use_count=%d", p2i(jt), 3282 jt->om_in_use_count, chk_om_in_use_count); 3283 } else { 3284 out->print_cr("ERROR: jt=" INTPTR_FORMAT ": om_in_use_count=%d is not " 3285 "equal to chk_om_in_use_count=%d", p2i(jt), 3286 jt->om_in_use_count, chk_om_in_use_count); 3287 *error_cnt_p = *error_cnt_p + 1; 3288 } 3289 } 3290 3291 // Log details about ObjectMonitors on the in-use lists. The 'BHL' 3292 // flags indicate why the entry is in-use, 'object' and 'object type' 3293 // indicate the associated object and its type. 3294 void ObjectSynchronizer::log_in_use_monitor_details(outputStream * out) { 3295 stringStream ss; 3296 if (Atomic::load(&LVars.in_use_count) > 0) { 3297 out->print_cr("In-use global monitor info:"); 3298 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3299 out->print_cr("%18s %s %7s %18s %18s", 3300 "monitor", "BHL", "ref_cnt", "object", "object type"); 3301 out->print_cr("================== === ======= ================== =================="); 3302 ObjectMonitor* cur = NULL; 3303 ObjectMonitor* next = NULL; 3304 if ((cur = get_list_head_locked(&LVars.in_use_list)) != NULL) { 3305 next = unmarked_next(cur); 3306 // Marked the global in-use list head so process the list. 3307 while (true) { 3308 const oop obj = (oop) cur->object(); 3309 const markWord mark = cur->header(); 3310 ResourceMark rm; 3311 out->print(INTPTR_FORMAT " %d%d%d %7d " INTPTR_FORMAT " %s", 3312 p2i(cur), cur->is_busy() != 0, mark.hash() != 0, 3313 cur->owner() != NULL, (int)cur->ref_count(), p2i(obj), 3314 obj->klass()->external_name()); 3315 if (cur->is_busy() != 0) { 3316 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3317 ss.reset(); 3318 } 3319 out->cr(); 3320 3321 lock_next_for_traversal(&cur, &next); 3322 if (cur == NULL) { 3323 break; 3324 } 3325 } 3326 } 3327 } 3328 3329 out->print_cr("In-use per-thread monitor info:"); 3330 out->print_cr("(B -> is_busy, H -> has hash code, L -> lock status)"); 3331 out->print_cr("%18s %18s %s %7s %18s %18s", 3332 "jt", "monitor", "BHL", "ref_cnt", "object", "object type"); 3333 out->print_cr("================== ================== === ======= ================== =================="); 3334 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3335 ObjectMonitor* cur = NULL; 3336 ObjectMonitor* next = NULL; 3337 if ((cur = get_list_head_locked(&jt->om_in_use_list)) != NULL) { 3338 next = unmarked_next(cur); 3339 // Marked the global in-use list head so process the list. 3340 while (true) { 3341 const oop obj = (oop) cur->object(); 3342 const markWord mark = cur->header(); 3343 ResourceMark rm; 3344 out->print(INTPTR_FORMAT " " INTPTR_FORMAT " %d%d%d %7d " 3345 INTPTR_FORMAT " %s", p2i(jt), p2i(cur), cur->is_busy() != 0, 3346 mark.hash() != 0, cur->owner() != NULL, (int)cur->ref_count(), 3347 p2i(obj), obj->klass()->external_name()); 3348 if (cur->is_busy() != 0) { 3349 out->print(" (%s)", cur->is_busy_to_string(&ss)); 3350 ss.reset(); 3351 } 3352 out->cr(); 3353 3354 lock_next_for_traversal(&cur, &next); 3355 if (cur == NULL) { 3356 break; 3357 } 3358 } 3359 } 3360 } 3361 3362 out->flush(); 3363 } 3364 3365 // Log counts for the global and per-thread monitor lists and return 3366 // the population count. 3367 int ObjectSynchronizer::log_monitor_list_counts(outputStream * out) { 3368 int pop_count = 0; 3369 out->print_cr("%18s %10s %10s %10s %10s", 3370 "Global Lists:", "InUse", "Free", "Wait", "Total"); 3371 out->print_cr("================== ========== ========== ========== =========="); 3372 out->print_cr("%18s %10d %10d %10d %10d", "", Atomic::load(&LVars.in_use_count), 3373 Atomic::load(&LVars.free_count), Atomic::load(&LVars.wait_count), Atomic::load(&LVars.population)); 3374 pop_count += Atomic::load(&LVars.in_use_count) + Atomic::load(&LVars.free_count); 3375 if (HandshakeAfterDeflateIdleMonitors) { 3376 pop_count += Atomic::load(&LVars.wait_count); 3377 } 3378 3379 out->print_cr("%18s %10s %10s %10s", 3380 "Per-Thread Lists:", "InUse", "Free", "Provision"); 3381 out->print_cr("================== ========== ========== =========="); 3382 3383 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *jt = jtiwh.next(); ) { 3384 out->print_cr(INTPTR_FORMAT " %10d %10d %10d", p2i(jt), 3385 jt->om_in_use_count, jt->om_free_count, jt->om_free_provision); 3386 pop_count += jt->om_in_use_count + jt->om_free_count; 3387 } 3388 return pop_count; 3389 } 3390 3391 #ifndef PRODUCT 3392 3393 // Check if monitor belongs to the monitor cache 3394 // The list is grow-only so it's *relatively* safe to traverse 3395 // the list of extant blocks without taking a lock. 3396 3397 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) { 3398 PaddedObjectMonitor* block = Atomic::load(&g_block_list); 3399 while (block != NULL) { 3400 assert(block->object() == CHAINMARKER, "must be a block header"); 3401 if (monitor > &block[0] && monitor < &block[_BLOCKSIZE]) { 3402 address mon = (address)monitor; 3403 address blk = (address)block; 3404 size_t diff = mon - blk; 3405 assert((diff % sizeof(PaddedObjectMonitor)) == 0, "must be aligned"); 3406 return 1; 3407 } 3408 // unmarked_next() is not needed with g_block_list (no locking 3409 // used with with block linkage _next_om fields). 3410 block = (PaddedObjectMonitor*)Atomic::load(&block->_next_om); 3411 } 3412 return 0; 3413 } 3414 3415 #endif