< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 56634 : imported patch 8230876.patch
rev 56635 : v2.00 -> v2.05 (CR5/v2.05/8-for-jdk13) patches combined into one; merge with 8229212.patch; merge with jdk-14+11; merge with 8230184.patch; merge with 8230876.patch; merge with jdk-14+15; merge with jdk-14+18.
rev 56637 : Add OM_CACHE_LINE_SIZE so that ObjectMonitor cache line sizes can be experimented with independently of DEFAULT_CACHE_LINE_SIZE; for SPARC and X64 configs that use 128 for DEFAULT_CACHE_LINE_SIZE, we are experimenting with 64; move _previous_owner_tid and _allocation_state fields to share the cache line with ObjectMonitor::_header; put ObjectMonitor::_ref_count on its own cache line after _owner; add 'int* count_p' parameter to deflate_monitor_list() and deflate_monitor_list_using_JT() and push counter updates down to where the ObjectMonitors are actually removed from the in-use lists; monitors_iterate() async deflation check should use negative ref_count; add 'JavaThread* target' param to deflate_per_thread_idle_monitors_using_JT() add deflate_common_idle_monitors_using_JT() to make it clear which JavaThread* is the target of the work and which is the calling JavaThread* (self); g_free_list, g_om_in_use_list and g_om_in_use_count are now static to synchronizer.cpp (reduce scope); add more diagnostic info to some assert()'s; minor code cleanups and code motion; save_om_ptr() should detect a race with a deflating thread that is bailing out and cause a retry when the ref_count field is not positive; merge with jdk-14+11; add special GC support for TestHumongousClassLoader.java; merge with 8230184.patch; merge with jdk-14+14; merge with jdk-14+18.
rev 56639 : loosen a couple more counter checks due to races observed in testing; simplify om_release() extraction of mid since list head or cur_mid_in_use is marked; simplify deflate_monitor_list() extraction of mid since there are no parallel deleters due to the safepoint; simplify deflate_monitor_list_using_JT() extraction of mid since list head or cur_mid_in_use is marked; prepend_block_to_lists() - simplify based on David H's comments; does not need load_acquire() or release_store() because of the cmpxchg(); prepend_to_common() - simplify to use mark_next_loop() for m and use mark_list_head() and release_store() for the non-empty list case; add more debugging for "Non-balanced monitor enter/exit" failure mode; fix race in inflate() in the "CASE: neutral" code path; install_displaced_markword_in_object() does not need to clear the header field since that is handled when the ObjectMonitor is moved from the global free list; LSuccess should clear boxReg to set ICC.ZF=1 to avoid depending on existing boxReg contents; update fast_unlock() to detect when object no longer refers to the same ObjectMonitor and take fast path exit instead; clarify fast_lock() code where we detect when object no longer refers to the same ObjectMonitor; add/update comments for movptr() calls where we move a literal into an Address; remove set_owner(); refactor setting of owner field into set_owner_from(2 versions), set_owner_from_BasicLock(), and try_set_owner_from(); the new functions include monitorinflation+owner logging; extract debug code from v2.06 and v2.07 and move to v2.07.debug; change 'jccb' -> 'jcc' and 'jmpb' -> 'jmp' as needed; checkpoint initial version of MacroAssembler::inc_om_ref_count(); update LP64 MacroAssembler::fast_lock() and fast_unlock() to use inc_om_ref_count(); fast_lock() return flag setting logic can use 'testptr(tmpReg, tmpReg)' instead of 'cmpptr(tmpReg, 0)' since that's more efficient; fast_unlock() LSuccess return flag setting logic can use 'testl (boxReg, 0)' instead of 'xorptr(boxReg, boxReg)' since that's more efficient; cleanup "fast-path" vs "fast path" and "slow-path" vs "slow path"; update MacroAssembler::rtm_inflated_locking() to use inc_om_ref_count(); update MacroAssembler::fast_lock() to preserve the flags before decrementing ref_count and restore the flags afterwards; this is more clean than depending on the contents of rax/tmpReg; coleenp CR - refactor async monitor deflation work from ServiceThread::service_thread_entry() to ObjectSynchronizer::deflate_idle_monitors_using_JT(); rehn,eosterlund CR - add support for HandshakeAfterDeflateIdleMonitors for platforms that don't have ObjectMonitor ref_count support implemented in C2 fast_lock() and fast_unlock().


 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {
 242   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 243 
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void * cur = Atomic::cmpxchg(Self, &_owner, (void*)NULL);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned ((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     // Commute owner from a thread-specific on-stack BasicLockObject address to
 264     // a full-fledged "Thread *".
 265     _owner = Self;
 266     return;
 267   }
 268 
 269   if (AsyncDeflateIdleMonitors &&
 270       Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 271     // The deflation protocol finished the first part (setting owner),
 272     // but it failed the second part (making ref_count negative) and
 273     // bailed. Or the ObjectMonitor was async deflated and reused.
 274     // Acquired the monitor.
 275     assert(_recursions == 0, "invariant");
 276     return;
 277   }
 278 
 279   // We've encountered genuine contention.
 280   assert(Self->_Stalled == 0, "invariant");
 281   Self->_Stalled = intptr_t(this);
 282 
 283   // Try one round of spinning *before* enqueueing Self
 284   // and before going through the awkward and expensive state
 285   // transitions.  The following spin is strictly optional ...
 286   // Note that if we acquire the monitor from an initial spin
 287   // we forgo posting JVMTI events and firing DTRACE probes.
 288   if (TrySpin(Self) > 0) {
 289     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 290     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);


 398 
 399     // The current thread already owns the monitor and is not going to
 400     // call park() for the remainder of the monitor enter protocol. So
 401     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 402     // event handler consumed an unpark() issued by the thread that
 403     // just exited the monitor.
 404   }
 405   if (event.should_commit()) {
 406     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 407     event.commit();
 408   }
 409   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 410 }
 411 
 412 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 413 // Callers must compensate as needed.
 414 
 415 int ObjectMonitor::TryLock(Thread * Self) {
 416   void * own = _owner;
 417   if (own != NULL) return 0;
 418   if (Atomic::replace_if_null(Self, &_owner)) {
 419     assert(_recursions == 0, "invariant");
 420     return 1;
 421   }
 422   // The lock had been free momentarily, but we lost the race to the lock.
 423   // Interference -- the CAS failed.
 424   // We can either return -1 or retry.
 425   // Retry doesn't make as much sense because the lock was just acquired.
 426   return -1;
 427 }
 428 
 429 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 430 // into the header of the object associated with the monitor. This
 431 // idempotent method is called by a thread that is deflating a
 432 // monitor and by other threads that have detected a race with the
 433 // deflation process.
 434 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 435   // This function must only be called when (owner == DEFLATER_MARKER
 436   // && ref_count <= 0), but we can't guarantee that here because
 437   // those values could change when the ObjectMonitor gets moved from
 438   // the global free list to a per-thread free list.
 439 
 440   guarantee(obj != NULL, "must be non-NULL");
 441   if (object() != obj) {
 442     // ObjectMonitor's object ref no longer refers to the target object
 443     // so the object's header has already been restored.
 444     return;
 445   }
 446 
 447   markWord dmw = header();
 448   if (dmw.value() == 0) {
 449     // ObjectMonitor's header/dmw has been cleared so the object's
 450     // header has already been restored.
 451     return;
 452   }
 453 
 454   // A non-NULL dmw has to be either neutral (not locked and not marked)
 455   // or is already participating in this restoration protocol.
 456   assert(dmw.is_neutral() || (dmw.is_marked() && dmw.hash() == 0),
 457          "failed precondition: dmw=" INTPTR_FORMAT, dmw.value());
 458 
 459   markWord marked_dmw = markWord::zero();
 460   if (!dmw.is_marked() && dmw.hash() == 0) {
 461     // This dmw has not yet started the restoration protocol so we
 462     // mark a copy of the dmw to begin the protocol.
 463     // Note: A dmw with a hashcode does not take this code path.
 464     marked_dmw = dmw.set_marked();
 465 
 466     // All of the callers to this function can be racing with each
 467     // other trying to update the _header field.
 468     dmw = (markWord) Atomic::cmpxchg(marked_dmw, &_header, dmw);
 469     if (dmw.value() == 0) {
 470       // ObjectMonitor's header/dmw has been cleared so the object's


 480   if (dmw.is_marked()) {
 481     // Clear the mark from the header/dmw copy in preparation for
 482     // possible restoration from this thread.
 483     assert(dmw.hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 484            dmw.value());
 485     dmw = dmw.set_unmarked();
 486   }
 487   assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
 488 
 489   // Install displaced mark word if the object's header still points
 490   // to this ObjectMonitor. All racing callers to this function will
 491   // reach this point, but only one can win.
 492   obj->cas_set_mark(dmw, markWord::encode(this));
 493 
 494   // Note: It does not matter which thread restored the header/dmw
 495   // into the object's header. The thread deflating the monitor just
 496   // wanted the object's header restored and it is. The threads that
 497   // detected a race with the deflation process also wanted the
 498   // object's header restored before they retry their operation and
 499   // because it is restored they will only retry once.
 500 
 501   if (marked_dmw.value() != 0) {
 502     // Clear _header to NULL if it is still marked_dmw so a racing
 503     // install_displaced_markword_in_object() can bail out sooner.
 504     Atomic::cmpxchg(markWord::zero(), &_header, marked_dmw);
 505   }
 506 }
 507 
 508 // Convert the fields used by is_busy() to a string that can be
 509 // used for diagnostic output.
 510 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 511   ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
 512   if (!AsyncDeflateIdleMonitors) {
 513     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 514   } else if (_owner != DEFLATER_MARKER) {
 515     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 516   } else {
 517     ss->print("owner=" INTPTR_FORMAT, NULL);
 518   }
 519   ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
 520             p2i(_EntryList));
 521   return ss->base();
 522 }
 523 
 524 #define MAX_RECHECK_INTERVAL 1000
 525 
 526 void ObjectMonitor::EnterI(TRAPS) {
 527   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 528 
 529   Thread * const Self = THREAD;
 530   assert(Self->is_Java_thread(), "invariant");
 531   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 532 
 533   // Try the lock - TATAS
 534   if (TryLock (Self) > 0) {
 535     assert(_succ != Self, "invariant");
 536     assert(_owner == Self, "invariant");
 537     assert(_Responsible != Self, "invariant");
 538     return;
 539   }
 540 
 541   if (AsyncDeflateIdleMonitors &&
 542       Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 543     // The deflation protocol finished the first part (setting owner),
 544     // but it failed the second part (making ref_count negative) and
 545     // bailed. Or the ObjectMonitor was async deflated and reused.
 546     // Acquired the monitor.
 547     assert(_succ != Self, "invariant");
 548     assert(_Responsible != Self, "invariant");
 549     return;
 550   }
 551 
 552   assert(InitDone, "Unexpectedly not initialized");
 553 
 554   // We try one round of spinning *before* enqueueing Self.
 555   //
 556   // If the _owner is ready but OFFPROC we could use a YieldTo()
 557   // operation to donate the remainder of this thread's quantum
 558   // to the owner.  This has subtle but beneficial affinity
 559   // effects.
 560 
 561   if (TrySpin(Self) > 0) {
 562     assert(_owner == Self, "invariant");


 649   for (;;) {
 650 
 651     if (TryLock(Self) > 0) break;
 652     assert(_owner != Self, "invariant");
 653 
 654     // park self
 655     if (_Responsible == Self) {
 656       Self->_ParkEvent->park((jlong) recheckInterval);
 657       // Increase the recheckInterval, but clamp the value.
 658       recheckInterval *= 8;
 659       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 660         recheckInterval = MAX_RECHECK_INTERVAL;
 661       }
 662     } else {
 663       Self->_ParkEvent->park();
 664     }
 665 
 666     if (TryLock(Self) > 0) break;
 667 
 668     if (AsyncDeflateIdleMonitors &&
 669         Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 670       // The deflation protocol finished the first part (setting owner),
 671       // but it failed the second part (making ref_count negative) and
 672       // bailed. Or the ObjectMonitor was async deflated and reused.
 673       // Acquired the monitor.
 674       break;
 675     }
 676 
 677     // The lock is still contested.
 678     // Keep a tally of the # of futile wakeups.
 679     // Note that the counter is not protected by a lock or updated by atomics.
 680     // That is by design - we trade "lossy" counters which are exposed to
 681     // races during updates for a lower probe effect.
 682 
 683     // This PerfData object can be used in parallel with a safepoint.
 684     // See the work around in PerfDataManager::destroy().
 685     OM_PERFDATA_OP(FutileWakeups, inc());
 686     ++nWakeups;
 687 
 688     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 689     // We can defer clearing _succ until after the spin completes


 781   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 782 
 783   assert(Self != NULL, "invariant");
 784   assert(SelfNode != NULL, "invariant");
 785   assert(SelfNode->_thread == Self, "invariant");
 786   assert(_waiters > 0, "invariant");
 787   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 788   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 789   JavaThread * jt = (JavaThread *) Self;
 790 
 791   int nWakeups = 0;
 792   for (;;) {
 793     ObjectWaiter::TStates v = SelfNode->TState;
 794     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 795     assert(_owner != Self, "invariant");
 796 
 797     if (TryLock(Self) > 0) break;
 798     if (TrySpin(Self) > 0) break;
 799 
 800     if (AsyncDeflateIdleMonitors &&
 801         Atomic::cmpxchg(Self, &_owner, DEFLATER_MARKER) == DEFLATER_MARKER) {
 802       // The deflation protocol finished the first part (setting owner),
 803       // but it failed the second part (making ref_count negative) and
 804       // bailed. Or the ObjectMonitor was async deflated and reused.
 805       // Acquired the monitor.
 806       break;
 807     }
 808 
 809     // State transition wrappers around park() ...
 810     // ReenterI() wisely defers state transitions until
 811     // it's clear we must park the thread.
 812     {
 813       OSThreadContendState osts(Self->osthread());
 814       ThreadBlockInVM tbivm(jt);
 815 
 816       // cleared by handle_special_suspend_equivalent_condition()
 817       // or java_suspend_self()
 818       jt->set_suspend_equivalent();
 819       Self->_ParkEvent->park();
 820 
 821       // were we externally suspended while we were waiting?


 978 // the timer expires.  If the lock is high traffic then the stranding latency
 979 // will be low due to (a).  If the lock is low traffic then the odds of
 980 // stranding are lower, although the worst-case stranding latency
 981 // is longer.  Critically, we don't want to put excessive load in the
 982 // platform's timer subsystem.  We want to minimize both the timer injection
 983 // rate (timers created/sec) as well as the number of timers active at
 984 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 985 // the integral of the # of active timers at any instant over time).
 986 // Both impinge on OS scalability.  Given that, at most one thread parked on
 987 // a monitor will use a timer.
 988 //
 989 // There is also the risk of a futile wake-up. If we drop the lock
 990 // another thread can reacquire the lock immediately, and we can
 991 // then wake a thread unnecessarily. This is benign, and we've
 992 // structured the code so the windows are short and the frequency
 993 // of such futile wakups is low.
 994 
 995 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 996   Thread * const Self = THREAD;
 997   if (THREAD != _owner) {
 998     if (THREAD->is_lock_owned((address) _owner)) {
 999       // Transmute _owner from a BasicLock pointer to a Thread address.
1000       // We don't need to hold _mutex for this transition.
1001       // Non-null to Non-null is safe as long as all readers can
1002       // tolerate either flavor.
1003       assert(_recursions == 0, "invariant");
1004       _owner = THREAD;
1005       _recursions = 0;
1006     } else {
1007       // Apparent unbalanced locking ...
1008       // Naively we'd like to throw IllegalMonitorStateException.
1009       // As a practical matter we can neither allocate nor throw an
1010       // exception as ::exit() can be called from leaf routines.
1011       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1012       // Upon deeper reflection, however, in a properly run JVM the only
1013       // way we should encounter this situation is in the presence of
1014       // unbalanced JNI locking. TODO: CheckJNICalls.
1015       // See also: CR4414101
1016       assert(false, "Non-balanced monitor enter/exit! Likely JNI locking: "
1017              "owner=" INTPTR_FORMAT, p2i(_owner));






1018       return;
1019     }
1020   }
1021 
1022   if (_recursions != 0) {
1023     _recursions--;        // this is simple recursive enter
1024     return;
1025   }
1026 
1027   // Invariant: after setting Responsible=null an thread must execute
1028   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1029   _Responsible = NULL;
1030 
1031 #if INCLUDE_JFR
1032   // get the owner's thread id for the MonitorEnter event
1033   // if it is enabled and the thread isn't suspended
1034   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1035     _previous_owner_tid = JFR_THREAD_ID(Self);
1036   }
1037 #endif
1038 
1039   for (;;) {
1040     assert(THREAD == _owner, "invariant");
1041 
1042     // release semantics: prior loads and stores from within the critical section
1043     // must not float (reorder) past the following store that drops the lock.
1044     // On SPARC that requires MEMBAR #loadstore|#storestore.
1045     // But of course in TSO #loadstore|#storestore is not required.



1046     OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
1047     OrderAccess::storeload();                        // See if we need to wake a successor

1048     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1049       return;
1050     }
1051     // Other threads are blocked trying to acquire the lock.
1052 
1053     // Normally the exiting thread is responsible for ensuring succession,
1054     // but if other successors are ready or other entering threads are spinning
1055     // then this thread can simply store NULL into _owner and exit without
1056     // waking a successor.  The existence of spinners or ready successors
1057     // guarantees proper succession (liveness).  Responsibility passes to the
1058     // ready or running successors.  The exiting thread delegates the duty.
1059     // More precisely, if a successor already exists this thread is absolved
1060     // of the responsibility of waking (unparking) one.
1061     //
1062     // The _succ variable is critical to reducing futile wakeup frequency.
1063     // _succ identifies the "heir presumptive" thread that has been made
1064     // ready (unparked) but that has not yet run.  We need only one such
1065     // successor thread to guarantee progress.
1066     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1067     // section 3.3 "Futile Wakeup Throttling" for details.


1069     // Note that spinners in Enter() also set _succ non-null.
1070     // In the current implementation spinners opportunistically set
1071     // _succ so that exiting threads might avoid waking a successor.
1072     // Another less appealing alternative would be for the exiting thread
1073     // to drop the lock and then spin briefly to see if a spinner managed
1074     // to acquire the lock.  If so, the exiting thread could exit
1075     // immediately without waking a successor, otherwise the exiting
1076     // thread would need to dequeue and wake a successor.
1077     // (Note that we'd need to make the post-drop spin short, but no
1078     // shorter than the worst-case round-trip cache-line migration time.
1079     // The dropped lock needs to become visible to the spinner, and then
1080     // the acquisition of the lock by the spinner must become visible to
1081     // the exiting thread).
1082 
1083     // It appears that an heir-presumptive (successor) must be made ready.
1084     // Only the current lock owner can manipulate the EntryList or
1085     // drain _cxq, so we need to reacquire the lock.  If we fail
1086     // to reacquire the lock the responsibility for ensuring succession
1087     // falls to the new owner.
1088     //
1089     if (!Atomic::replace_if_null(THREAD, &_owner)) {
1090       return;
1091     }
1092 
1093     guarantee(_owner == THREAD, "invariant");
1094 
1095     ObjectWaiter * w = NULL;
1096 
1097     w = _EntryList;
1098     if (w != NULL) {
1099       // I'd like to write: guarantee (w->_thread != Self).
1100       // But in practice an exiting thread may find itself on the EntryList.
1101       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1102       // then calls exit().  Exit release the lock by setting O._owner to NULL.
1103       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1104       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1105       // release the lock "O".  T2 resumes immediately after the ST of null into
1106       // _owner, above.  T2 notices that the EntryList is populated, so it
1107       // reacquires the lock and then finds itself on the EntryList.
1108       // Given all that, we have to tolerate the circumstance where "w" is
1109       // associated with Self.


1202 
1203 
1204 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1205   assert(_owner == Self, "invariant");
1206 
1207   // Exit protocol:
1208   // 1. ST _succ = wakee
1209   // 2. membar #loadstore|#storestore;
1210   // 2. ST _owner = NULL
1211   // 3. unpark(wakee)
1212 
1213   _succ = Wakee->_thread;
1214   ParkEvent * Trigger = Wakee->_event;
1215 
1216   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1217   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1218   // out-of-scope (non-extant).
1219   Wakee  = NULL;
1220 
1221   // Drop the lock



1222   OrderAccess::release_store(&_owner, (void*)NULL);
1223   OrderAccess::fence();                               // ST _owner vs LD in unpark()

1224 
1225   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1226   Trigger->unpark();
1227 
1228   // Maintain stats and report events to JVMTI
1229   OM_PERFDATA_OP(Parks, inc());
1230 }
1231 
1232 
1233 // -----------------------------------------------------------------------------
1234 // Class Loader deadlock handling.
1235 //
1236 // complete_exit exits a lock returning recursion count
1237 // complete_exit/reenter operate as a wait without waiting
1238 // complete_exit requires an inflated monitor
1239 // The _owner field is not always the Thread addr even with an
1240 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1241 // thread due to contention.
1242 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1243   Thread * const Self = THREAD;
1244   assert(Self->is_Java_thread(), "Must be Java thread!");
1245   JavaThread *jt = (JavaThread *)THREAD;
1246 
1247   assert(InitDone, "Unexpectedly not initialized");
1248 
1249   if (THREAD != _owner) {
1250     if (THREAD->is_lock_owned ((address)_owner)) {

1251       assert(_recursions == 0, "internal state error");
1252       _owner = THREAD;   // Convert from basiclock addr to Thread addr
1253       _recursions = 0;
1254     }
1255   }
1256 
1257   guarantee(Self == _owner, "complete_exit not owner");
1258   intptr_t save = _recursions; // record the old recursion count
1259   _recursions = 0;        // set the recursion level to be 0
1260   exit(true, Self);           // exit the monitor
1261   guarantee(_owner != Self, "invariant");
1262   return save;
1263 }
1264 
1265 // reenter() enters a lock and sets recursion count
1266 // complete_exit/reenter operate as a wait without waiting
1267 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1268   Thread * const Self = THREAD;
1269   assert(Self->is_Java_thread(), "Must be Java thread!");
1270   JavaThread *jt = (JavaThread *)THREAD;
1271 
1272   guarantee(_owner != Self, "reenter already owner");


1280 // immediate return if it doesn't. We don't use the CHECK macro
1281 // because we want the IMSE to be the only exception that is thrown
1282 // from the call site when false is returned. Any other pending
1283 // exception is ignored.
1284 #define CHECK_OWNER()                                                  \
1285   do {                                                                 \
1286     if (!check_owner(THREAD)) {                                        \
1287        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1288        return;                                                         \
1289      }                                                                 \
1290   } while (false)
1291 
1292 // Returns true if the specified thread owns the ObjectMonitor.
1293 // Otherwise returns false and throws IllegalMonitorStateException
1294 // (IMSE). If there is a pending exception and the specified thread
1295 // is not the owner, that exception will be replaced by the IMSE.
1296 bool ObjectMonitor::check_owner(Thread* THREAD) {
1297   if (_owner == THREAD) {
1298     return true;
1299   }
1300   if (THREAD->is_lock_owned((address)_owner)) {
1301     _owner = THREAD;  // convert from BasicLock addr to Thread addr

1302     _recursions = 0;
1303     return true;
1304   }
1305   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1306              "current thread is not owner", false);
1307 }
1308 
1309 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1310                                     ObjectMonitor* monitor,
1311                                     jlong notifier_tid,
1312                                     jlong timeout,
1313                                     bool timedout) {
1314   assert(event != NULL, "invariant");
1315   assert(monitor != NULL, "invariant");
1316   event->set_monitorClass(((oop)monitor->object())->klass());
1317   event->set_timeout(timeout);
1318   event->set_address((uintptr_t)monitor->object_addr());
1319   event->set_notifier(notifier_tid);
1320   event->set_timedOut(timedout);
1321   event->commit();


1786     // We periodically check to see if there's a safepoint pending.
1787     if ((ctr & 0xFF) == 0) {
1788       if (SafepointMechanism::should_block(Self)) {
1789         goto Abort;           // abrupt spin egress
1790       }
1791       SpinPause();
1792     }
1793 
1794     // Probe _owner with TATAS
1795     // If this thread observes the monitor transition or flicker
1796     // from locked to unlocked to locked, then the odds that this
1797     // thread will acquire the lock in this spin attempt go down
1798     // considerably.  The same argument applies if the CAS fails
1799     // or if we observe _owner change from one non-null value to
1800     // another non-null value.   In such cases we might abort
1801     // the spin without prejudice or apply a "penalty" to the
1802     // spin count-down variable "ctr", reducing it by 100, say.
1803 
1804     Thread * ox = (Thread *) _owner;
1805     if (ox == NULL) {
1806       ox = (Thread*)Atomic::cmpxchg(Self, &_owner, (void*)NULL);
1807       if (ox == NULL) {
1808         // The CAS succeeded -- this thread acquired ownership
1809         // Take care of some bookkeeping to exit spin state.
1810         if (_succ == Self) {
1811           _succ = NULL;
1812         }
1813 
1814         // Increase _SpinDuration :
1815         // The spin was successful (profitable) so we tend toward
1816         // longer spin attempts in the future.
1817         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1818         // If we acquired the lock early in the spin cycle it
1819         // makes sense to increase _SpinDuration proportionally.
1820         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1821         int x = _SpinDuration;
1822         if (x < Knob_SpinLimit) {
1823           if (x < Knob_Poverty) x = Knob_Poverty;
1824           _SpinDuration = x + Knob_Bonus;
1825         }
1826         return 1;


2139 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2140   if (_om_ptr == NULL) {
2141     ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2142     om_ptr->inc_ref_count();
2143     _om_ptr = om_ptr;
2144   } else {
2145     ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2146     _om_ptr->dec_ref_count();
2147     _om_ptr = NULL;
2148   }
2149 }
2150 
2151 void ObjectMonitor::print_on(outputStream* st) const {
2152   // The minimal things to print for markWord printing, more can be added for debugging and logging.
2153   st->print("{contentions=0x%08x,waiters=0x%08x"
2154             ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2155             contentions(), waiters(), recursions(),
2156             p2i(owner()));
2157 }
2158 void ObjectMonitor::print() const { print_on(tty); }
























































































 228 void* ObjectMonitor::operator new[] (size_t size) throw() {
 229   return operator new (size);
 230 }
 231 void ObjectMonitor::operator delete(void* p) {
 232   FreeHeap(p);
 233 }
 234 void ObjectMonitor::operator delete[] (void *p) {
 235   operator delete(p);
 236 }
 237 
 238 // -----------------------------------------------------------------------------
 239 // Enter support
 240 
 241 void ObjectMonitor::enter(TRAPS) {
 242   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 243 
 244   // The following code is ordered to check the most common cases first
 245   // and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
 246   Thread * const Self = THREAD;
 247 
 248   void* cur = try_set_owner_from(Self, NULL);
 249   if (cur == NULL) {
 250     assert(_recursions == 0, "invariant");
 251     return;
 252   }
 253 
 254   if (cur == Self) {
 255     // TODO-FIXME: check for integer overflow!  BUGID 6557169.
 256     _recursions++;
 257     return;
 258   }
 259 
 260   if (Self->is_lock_owned ((address)cur)) {
 261     assert(_recursions == 0, "internal state error");
 262     _recursions = 1;
 263     set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.


 264     return;
 265   }
 266 
 267   if (AsyncDeflateIdleMonitors &&
 268       try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 269     // The deflation protocol finished the first part (setting owner),
 270     // but it failed the second part (making ref_count negative) and
 271     // bailed. Or the ObjectMonitor was async deflated and reused.
 272     // Acquired the monitor.
 273     assert(_recursions == 0, "invariant");
 274     return;
 275   }
 276 
 277   // We've encountered genuine contention.
 278   assert(Self->_Stalled == 0, "invariant");
 279   Self->_Stalled = intptr_t(this);
 280 
 281   // Try one round of spinning *before* enqueueing Self
 282   // and before going through the awkward and expensive state
 283   // transitions.  The following spin is strictly optional ...
 284   // Note that if we acquire the monitor from an initial spin
 285   // we forgo posting JVMTI events and firing DTRACE probes.
 286   if (TrySpin(Self) > 0) {
 287     assert(_owner == Self, "must be Self: owner=" INTPTR_FORMAT, p2i(_owner));
 288     assert(_recursions == 0, "must be 0: recursions=" INTX_FORMAT, _recursions);


 396 
 397     // The current thread already owns the monitor and is not going to
 398     // call park() for the remainder of the monitor enter protocol. So
 399     // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
 400     // event handler consumed an unpark() issued by the thread that
 401     // just exited the monitor.
 402   }
 403   if (event.should_commit()) {
 404     event.set_previousOwner((uintptr_t)_previous_owner_tid);
 405     event.commit();
 406   }
 407   OM_PERFDATA_OP(ContendedLockAttempts, inc());
 408 }
 409 
 410 // Caveat: TryLock() is not necessarily serializing if it returns failure.
 411 // Callers must compensate as needed.
 412 
 413 int ObjectMonitor::TryLock(Thread * Self) {
 414   void * own = _owner;
 415   if (own != NULL) return 0;
 416   if (try_set_owner_from(Self, NULL) == NULL) {
 417     assert(_recursions == 0, "invariant");
 418     return 1;
 419   }
 420   // The lock had been free momentarily, but we lost the race to the lock.
 421   // Interference -- the CAS failed.
 422   // We can either return -1 or retry.
 423   // Retry doesn't make as much sense because the lock was just acquired.
 424   return -1;
 425 }
 426 
 427 // Install the displaced mark word (dmw) of a deflating ObjectMonitor
 428 // into the header of the object associated with the monitor. This
 429 // idempotent method is called by a thread that is deflating a
 430 // monitor and by other threads that have detected a race with the
 431 // deflation process.
 432 void ObjectMonitor::install_displaced_markword_in_object(const oop obj) {
 433   // This function must only be called when (owner == DEFLATER_MARKER
 434   // && ref_count <= 0), but we can't guarantee that here because
 435   // those values could change when the ObjectMonitor gets moved from
 436   // the global free list to a per-thread free list.
 437 
 438   guarantee(obj != NULL, "must be non-NULL");
 439   if (object() != obj) {
 440     // ObjectMonitor's object ref no longer refers to the target object
 441     // so the object's header has already been restored.
 442     return;
 443   }
 444 
 445   markWord dmw = header();
 446   if (dmw.value() == 0) {
 447     // ObjectMonitor's header/dmw has been cleared so the ObjectMonitor
 448     // has been deflated and taken off the global free list.
 449     return;
 450   }
 451 
 452   // A non-NULL dmw has to be either neutral (not locked and not marked)
 453   // or is already participating in this restoration protocol.
 454   assert(dmw.is_neutral() || (dmw.is_marked() && dmw.hash() == 0),
 455          "failed precondition: dmw=" INTPTR_FORMAT, dmw.value());
 456 
 457   markWord marked_dmw = markWord::zero();
 458   if (!dmw.is_marked() && dmw.hash() == 0) {
 459     // This dmw has not yet started the restoration protocol so we
 460     // mark a copy of the dmw to begin the protocol.
 461     // Note: A dmw with a hashcode does not take this code path.
 462     marked_dmw = dmw.set_marked();
 463 
 464     // All of the callers to this function can be racing with each
 465     // other trying to update the _header field.
 466     dmw = (markWord) Atomic::cmpxchg(marked_dmw, &_header, dmw);
 467     if (dmw.value() == 0) {
 468       // ObjectMonitor's header/dmw has been cleared so the object's


 478   if (dmw.is_marked()) {
 479     // Clear the mark from the header/dmw copy in preparation for
 480     // possible restoration from this thread.
 481     assert(dmw.hash() == 0, "hashcode must be 0: dmw=" INTPTR_FORMAT,
 482            dmw.value());
 483     dmw = dmw.set_unmarked();
 484   }
 485   assert(dmw.is_neutral(), "must be neutral: dmw=" INTPTR_FORMAT, dmw.value());
 486 
 487   // Install displaced mark word if the object's header still points
 488   // to this ObjectMonitor. All racing callers to this function will
 489   // reach this point, but only one can win.
 490   obj->cas_set_mark(dmw, markWord::encode(this));
 491 
 492   // Note: It does not matter which thread restored the header/dmw
 493   // into the object's header. The thread deflating the monitor just
 494   // wanted the object's header restored and it is. The threads that
 495   // detected a race with the deflation process also wanted the
 496   // object's header restored before they retry their operation and
 497   // because it is restored they will only retry once.






 498 }
 499 
 500 // Convert the fields used by is_busy() to a string that can be
 501 // used for diagnostic output.
 502 const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
 503   ss->print("is_busy: contentions=%d, waiters=%d, ", _contentions, _waiters);
 504   if (!AsyncDeflateIdleMonitors) {
 505     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 506   } else if (_owner != DEFLATER_MARKER) {
 507     ss->print("owner=" INTPTR_FORMAT, p2i(_owner));
 508   } else {
 509     ss->print("owner=" INTPTR_FORMAT, NULL);
 510   }
 511   ss->print(", cxq=" INTPTR_FORMAT ", EntryList=" INTPTR_FORMAT, p2i(_cxq),
 512             p2i(_EntryList));
 513   return ss->base();
 514 }
 515 
 516 #define MAX_RECHECK_INTERVAL 1000
 517 
 518 void ObjectMonitor::EnterI(TRAPS) {
 519   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 520 
 521   Thread * const Self = THREAD;
 522   assert(Self->is_Java_thread(), "invariant");
 523   assert(((JavaThread *) Self)->thread_state() == _thread_blocked, "invariant");
 524 
 525   // Try the lock - TATAS
 526   if (TryLock (Self) > 0) {
 527     assert(_succ != Self, "invariant");
 528     assert(_owner == Self, "invariant");
 529     assert(_Responsible != Self, "invariant");
 530     return;
 531   }
 532 
 533   if (AsyncDeflateIdleMonitors &&
 534       try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 535     // The deflation protocol finished the first part (setting owner),
 536     // but it failed the second part (making ref_count negative) and
 537     // bailed. Or the ObjectMonitor was async deflated and reused.
 538     // Acquired the monitor.
 539     assert(_succ != Self, "invariant");
 540     assert(_Responsible != Self, "invariant");
 541     return;
 542   }
 543 
 544   assert(InitDone, "Unexpectedly not initialized");
 545 
 546   // We try one round of spinning *before* enqueueing Self.
 547   //
 548   // If the _owner is ready but OFFPROC we could use a YieldTo()
 549   // operation to donate the remainder of this thread's quantum
 550   // to the owner.  This has subtle but beneficial affinity
 551   // effects.
 552 
 553   if (TrySpin(Self) > 0) {
 554     assert(_owner == Self, "invariant");


 641   for (;;) {
 642 
 643     if (TryLock(Self) > 0) break;
 644     assert(_owner != Self, "invariant");
 645 
 646     // park self
 647     if (_Responsible == Self) {
 648       Self->_ParkEvent->park((jlong) recheckInterval);
 649       // Increase the recheckInterval, but clamp the value.
 650       recheckInterval *= 8;
 651       if (recheckInterval > MAX_RECHECK_INTERVAL) {
 652         recheckInterval = MAX_RECHECK_INTERVAL;
 653       }
 654     } else {
 655       Self->_ParkEvent->park();
 656     }
 657 
 658     if (TryLock(Self) > 0) break;
 659 
 660     if (AsyncDeflateIdleMonitors &&
 661         try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 662       // The deflation protocol finished the first part (setting owner),
 663       // but it failed the second part (making ref_count negative) and
 664       // bailed. Or the ObjectMonitor was async deflated and reused.
 665       // Acquired the monitor.
 666       break;
 667     }
 668 
 669     // The lock is still contested.
 670     // Keep a tally of the # of futile wakeups.
 671     // Note that the counter is not protected by a lock or updated by atomics.
 672     // That is by design - we trade "lossy" counters which are exposed to
 673     // races during updates for a lower probe effect.
 674 
 675     // This PerfData object can be used in parallel with a safepoint.
 676     // See the work around in PerfDataManager::destroy().
 677     OM_PERFDATA_OP(FutileWakeups, inc());
 678     ++nWakeups;
 679 
 680     // Assuming this is not a spurious wakeup we'll normally find _succ == Self.
 681     // We can defer clearing _succ until after the spin completes


 773   ADIM_guarantee(ref_count() > 0, "must be positive: ref_count=%d", ref_count());
 774 
 775   assert(Self != NULL, "invariant");
 776   assert(SelfNode != NULL, "invariant");
 777   assert(SelfNode->_thread == Self, "invariant");
 778   assert(_waiters > 0, "invariant");
 779   assert(((oop)(object()))->mark() == markWord::encode(this), "invariant");
 780   assert(((JavaThread *)Self)->thread_state() != _thread_blocked, "invariant");
 781   JavaThread * jt = (JavaThread *) Self;
 782 
 783   int nWakeups = 0;
 784   for (;;) {
 785     ObjectWaiter::TStates v = SelfNode->TState;
 786     guarantee(v == ObjectWaiter::TS_ENTER || v == ObjectWaiter::TS_CXQ, "invariant");
 787     assert(_owner != Self, "invariant");
 788 
 789     if (TryLock(Self) > 0) break;
 790     if (TrySpin(Self) > 0) break;
 791 
 792     if (AsyncDeflateIdleMonitors &&
 793         try_set_owner_from(Self, DEFLATER_MARKER) == DEFLATER_MARKER) {
 794       // The deflation protocol finished the first part (setting owner),
 795       // but it failed the second part (making ref_count negative) and
 796       // bailed. Or the ObjectMonitor was async deflated and reused.
 797       // Acquired the monitor.
 798       break;
 799     }
 800 
 801     // State transition wrappers around park() ...
 802     // ReenterI() wisely defers state transitions until
 803     // it's clear we must park the thread.
 804     {
 805       OSThreadContendState osts(Self->osthread());
 806       ThreadBlockInVM tbivm(jt);
 807 
 808       // cleared by handle_special_suspend_equivalent_condition()
 809       // or java_suspend_self()
 810       jt->set_suspend_equivalent();
 811       Self->_ParkEvent->park();
 812 
 813       // were we externally suspended while we were waiting?


 970 // the timer expires.  If the lock is high traffic then the stranding latency
 971 // will be low due to (a).  If the lock is low traffic then the odds of
 972 // stranding are lower, although the worst-case stranding latency
 973 // is longer.  Critically, we don't want to put excessive load in the
 974 // platform's timer subsystem.  We want to minimize both the timer injection
 975 // rate (timers created/sec) as well as the number of timers active at
 976 // any one time.  (more precisely, we want to minimize timer-seconds, which is
 977 // the integral of the # of active timers at any instant over time).
 978 // Both impinge on OS scalability.  Given that, at most one thread parked on
 979 // a monitor will use a timer.
 980 //
 981 // There is also the risk of a futile wake-up. If we drop the lock
 982 // another thread can reacquire the lock immediately, and we can
 983 // then wake a thread unnecessarily. This is benign, and we've
 984 // structured the code so the windows are short and the frequency
 985 // of such futile wakups is low.
 986 
 987 void ObjectMonitor::exit(bool not_suspended, TRAPS) {
 988   Thread * const Self = THREAD;
 989   if (THREAD != _owner) {
 990     void* cur = _owner;
 991     if (THREAD->is_lock_owned((address)cur)) {



 992       assert(_recursions == 0, "invariant");
 993       set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
 994       _recursions = 0;
 995     } else {
 996       // Apparent unbalanced locking ...
 997       // Naively we'd like to throw IllegalMonitorStateException.
 998       // As a practical matter we can neither allocate nor throw an
 999       // exception as ::exit() can be called from leaf routines.
1000       // see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
1001       // Upon deeper reflection, however, in a properly run JVM the only
1002       // way we should encounter this situation is in the presence of
1003       // unbalanced JNI locking. TODO: CheckJNICalls.
1004       // See also: CR4414101
1005       tty->print_cr("ERROR: ObjectMonitor::exit(): thread=" INTPTR_FORMAT
1006                     " is exiting an ObjectMonitor it does not own.",
1007                     p2i(THREAD));
1008       tty->print_cr("The imbalance is possibly caused by JNI locking.");
1009       print_debug_style_on(tty);
1010       // Changing this from an assert() to ADIM_guarantee() may run
1011       // afoul of any test that is inducing non-balanced JNI locking.
1012       ADIM_guarantee(false, "Non-balanced monitor enter/exit!");
1013       return;
1014     }
1015   }
1016 
1017   if (_recursions != 0) {
1018     _recursions--;        // this is simple recursive enter
1019     return;
1020   }
1021 
1022   // Invariant: after setting Responsible=null an thread must execute
1023   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
1024   _Responsible = NULL;
1025 
1026 #if INCLUDE_JFR
1027   // get the owner's thread id for the MonitorEnter event
1028   // if it is enabled and the thread isn't suspended
1029   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
1030     _previous_owner_tid = JFR_THREAD_ID(Self);
1031   }
1032 #endif
1033 
1034   for (;;) {
1035     assert(THREAD == _owner, "invariant");
1036 
1037     // release semantics: prior loads and stores from within the critical section
1038     // must not float (reorder) past the following store that drops the lock.
1039     // On SPARC that requires MEMBAR #loadstore|#storestore.
1040     // But of course in TSO #loadstore|#storestore is not required.
1041     if (AsyncDeflateIdleMonitors) {
1042       set_owner_from(NULL, Self);
1043     } else {
1044       OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
1045       OrderAccess::storeload();                        // See if we need to wake a successor
1046     }
1047     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
1048       return;
1049     }
1050     // Other threads are blocked trying to acquire the lock.
1051 
1052     // Normally the exiting thread is responsible for ensuring succession,
1053     // but if other successors are ready or other entering threads are spinning
1054     // then this thread can simply store NULL into _owner and exit without
1055     // waking a successor.  The existence of spinners or ready successors
1056     // guarantees proper succession (liveness).  Responsibility passes to the
1057     // ready or running successors.  The exiting thread delegates the duty.
1058     // More precisely, if a successor already exists this thread is absolved
1059     // of the responsibility of waking (unparking) one.
1060     //
1061     // The _succ variable is critical to reducing futile wakeup frequency.
1062     // _succ identifies the "heir presumptive" thread that has been made
1063     // ready (unparked) but that has not yet run.  We need only one such
1064     // successor thread to guarantee progress.
1065     // See http://www.usenix.org/events/jvm01/full_papers/dice/dice.pdf
1066     // section 3.3 "Futile Wakeup Throttling" for details.


1068     // Note that spinners in Enter() also set _succ non-null.
1069     // In the current implementation spinners opportunistically set
1070     // _succ so that exiting threads might avoid waking a successor.
1071     // Another less appealing alternative would be for the exiting thread
1072     // to drop the lock and then spin briefly to see if a spinner managed
1073     // to acquire the lock.  If so, the exiting thread could exit
1074     // immediately without waking a successor, otherwise the exiting
1075     // thread would need to dequeue and wake a successor.
1076     // (Note that we'd need to make the post-drop spin short, but no
1077     // shorter than the worst-case round-trip cache-line migration time.
1078     // The dropped lock needs to become visible to the spinner, and then
1079     // the acquisition of the lock by the spinner must become visible to
1080     // the exiting thread).
1081 
1082     // It appears that an heir-presumptive (successor) must be made ready.
1083     // Only the current lock owner can manipulate the EntryList or
1084     // drain _cxq, so we need to reacquire the lock.  If we fail
1085     // to reacquire the lock the responsibility for ensuring succession
1086     // falls to the new owner.
1087     //
1088     if (try_set_owner_from(Self, NULL) != NULL) {
1089       return;
1090     }
1091 
1092     guarantee(_owner == THREAD, "invariant");
1093 
1094     ObjectWaiter * w = NULL;
1095 
1096     w = _EntryList;
1097     if (w != NULL) {
1098       // I'd like to write: guarantee (w->_thread != Self).
1099       // But in practice an exiting thread may find itself on the EntryList.
1100       // Let's say thread T1 calls O.wait().  Wait() enqueues T1 on O's waitset and
1101       // then calls exit().  Exit release the lock by setting O._owner to NULL.
1102       // Let's say T1 then stalls.  T2 acquires O and calls O.notify().  The
1103       // notify() operation moves T1 from O's waitset to O's EntryList. T2 then
1104       // release the lock "O".  T2 resumes immediately after the ST of null into
1105       // _owner, above.  T2 notices that the EntryList is populated, so it
1106       // reacquires the lock and then finds itself on the EntryList.
1107       // Given all that, we have to tolerate the circumstance where "w" is
1108       // associated with Self.


1201 
1202 
1203 void ObjectMonitor::ExitEpilog(Thread * Self, ObjectWaiter * Wakee) {
1204   assert(_owner == Self, "invariant");
1205 
1206   // Exit protocol:
1207   // 1. ST _succ = wakee
1208   // 2. membar #loadstore|#storestore;
1209   // 2. ST _owner = NULL
1210   // 3. unpark(wakee)
1211 
1212   _succ = Wakee->_thread;
1213   ParkEvent * Trigger = Wakee->_event;
1214 
1215   // Hygiene -- once we've set _owner = NULL we can't safely dereference Wakee again.
1216   // The thread associated with Wakee may have grabbed the lock and "Wakee" may be
1217   // out-of-scope (non-extant).
1218   Wakee  = NULL;
1219 
1220   // Drop the lock
1221   if (AsyncDeflateIdleMonitors) {
1222     set_owner_from(NULL, Self);
1223   } else {
1224     OrderAccess::release_store(&_owner, (void*)NULL);
1225     OrderAccess::fence();                               // ST _owner vs LD in unpark()
1226   }
1227 
1228   DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self);
1229   Trigger->unpark();
1230 
1231   // Maintain stats and report events to JVMTI
1232   OM_PERFDATA_OP(Parks, inc());
1233 }
1234 
1235 
1236 // -----------------------------------------------------------------------------
1237 // Class Loader deadlock handling.
1238 //
1239 // complete_exit exits a lock returning recursion count
1240 // complete_exit/reenter operate as a wait without waiting
1241 // complete_exit requires an inflated monitor
1242 // The _owner field is not always the Thread addr even with an
1243 // inflated monitor, e.g. the monitor can be inflated by a non-owning
1244 // thread due to contention.
1245 intptr_t ObjectMonitor::complete_exit(TRAPS) {
1246   Thread * const Self = THREAD;
1247   assert(Self->is_Java_thread(), "Must be Java thread!");
1248   JavaThread *jt = (JavaThread *)THREAD;
1249 
1250   assert(InitDone, "Unexpectedly not initialized");
1251 
1252   if (THREAD != _owner) {
1253     void* cur = _owner;
1254     if (THREAD->is_lock_owned((address)cur)) {
1255       assert(_recursions == 0, "internal state error");
1256       set_owner_from_BasicLock(Self, cur);  // Convert from BasicLock* to Thread*.
1257       _recursions = 0;
1258     }
1259   }
1260 
1261   guarantee(Self == _owner, "complete_exit not owner");
1262   intptr_t save = _recursions; // record the old recursion count
1263   _recursions = 0;        // set the recursion level to be 0
1264   exit(true, Self);           // exit the monitor
1265   guarantee(_owner != Self, "invariant");
1266   return save;
1267 }
1268 
1269 // reenter() enters a lock and sets recursion count
1270 // complete_exit/reenter operate as a wait without waiting
1271 void ObjectMonitor::reenter(intptr_t recursions, TRAPS) {
1272   Thread * const Self = THREAD;
1273   assert(Self->is_Java_thread(), "Must be Java thread!");
1274   JavaThread *jt = (JavaThread *)THREAD;
1275 
1276   guarantee(_owner != Self, "reenter already owner");


1284 // immediate return if it doesn't. We don't use the CHECK macro
1285 // because we want the IMSE to be the only exception that is thrown
1286 // from the call site when false is returned. Any other pending
1287 // exception is ignored.
1288 #define CHECK_OWNER()                                                  \
1289   do {                                                                 \
1290     if (!check_owner(THREAD)) {                                        \
1291        assert(HAS_PENDING_EXCEPTION, "expected a pending IMSE here."); \
1292        return;                                                         \
1293      }                                                                 \
1294   } while (false)
1295 
1296 // Returns true if the specified thread owns the ObjectMonitor.
1297 // Otherwise returns false and throws IllegalMonitorStateException
1298 // (IMSE). If there is a pending exception and the specified thread
1299 // is not the owner, that exception will be replaced by the IMSE.
1300 bool ObjectMonitor::check_owner(Thread* THREAD) {
1301   if (_owner == THREAD) {
1302     return true;
1303   }
1304   void* cur = _owner;
1305   if (THREAD->is_lock_owned((address)cur)) {
1306     set_owner_from_BasicLock(THREAD, cur);  // Convert from BasicLock* to Thread*.
1307     _recursions = 0;
1308     return true;
1309   }
1310   THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
1311              "current thread is not owner", false);
1312 }
1313 
1314 static void post_monitor_wait_event(EventJavaMonitorWait* event,
1315                                     ObjectMonitor* monitor,
1316                                     jlong notifier_tid,
1317                                     jlong timeout,
1318                                     bool timedout) {
1319   assert(event != NULL, "invariant");
1320   assert(monitor != NULL, "invariant");
1321   event->set_monitorClass(((oop)monitor->object())->klass());
1322   event->set_timeout(timeout);
1323   event->set_address((uintptr_t)monitor->object_addr());
1324   event->set_notifier(notifier_tid);
1325   event->set_timedOut(timedout);
1326   event->commit();


1791     // We periodically check to see if there's a safepoint pending.
1792     if ((ctr & 0xFF) == 0) {
1793       if (SafepointMechanism::should_block(Self)) {
1794         goto Abort;           // abrupt spin egress
1795       }
1796       SpinPause();
1797     }
1798 
1799     // Probe _owner with TATAS
1800     // If this thread observes the monitor transition or flicker
1801     // from locked to unlocked to locked, then the odds that this
1802     // thread will acquire the lock in this spin attempt go down
1803     // considerably.  The same argument applies if the CAS fails
1804     // or if we observe _owner change from one non-null value to
1805     // another non-null value.   In such cases we might abort
1806     // the spin without prejudice or apply a "penalty" to the
1807     // spin count-down variable "ctr", reducing it by 100, say.
1808 
1809     Thread * ox = (Thread *) _owner;
1810     if (ox == NULL) {
1811       ox = (Thread*)try_set_owner_from(Self, NULL);
1812       if (ox == NULL) {
1813         // The CAS succeeded -- this thread acquired ownership
1814         // Take care of some bookkeeping to exit spin state.
1815         if (_succ == Self) {
1816           _succ = NULL;
1817         }
1818 
1819         // Increase _SpinDuration :
1820         // The spin was successful (profitable) so we tend toward
1821         // longer spin attempts in the future.
1822         // CONSIDER: factor "ctr" into the _SpinDuration adjustment.
1823         // If we acquired the lock early in the spin cycle it
1824         // makes sense to increase _SpinDuration proportionally.
1825         // Note that we don't clamp SpinDuration precisely at SpinLimit.
1826         int x = _SpinDuration;
1827         if (x < Knob_SpinLimit) {
1828           if (x < Knob_Poverty) x = Knob_Poverty;
1829           _SpinDuration = x + Knob_Bonus;
1830         }
1831         return 1;


2144 void ObjectMonitorHandle::set_om_ptr(ObjectMonitor * om_ptr) {
2145   if (_om_ptr == NULL) {
2146     ADIM_guarantee(om_ptr != NULL, "cannot clear an unset om_ptr");
2147     om_ptr->inc_ref_count();
2148     _om_ptr = om_ptr;
2149   } else {
2150     ADIM_guarantee(om_ptr == NULL, "can only clear a set om_ptr");
2151     _om_ptr->dec_ref_count();
2152     _om_ptr = NULL;
2153   }
2154 }
2155 
2156 void ObjectMonitor::print_on(outputStream* st) const {
2157   // The minimal things to print for markWord printing, more can be added for debugging and logging.
2158   st->print("{contentions=0x%08x,waiters=0x%08x"
2159             ",recursions=" INTX_FORMAT ",owner=" INTPTR_FORMAT "}",
2160             contentions(), waiters(), recursions(),
2161             p2i(owner()));
2162 }
2163 void ObjectMonitor::print() const { print_on(tty); }
2164 
2165 // Print the ObjectMonitor like a debugger would:
2166 //
2167 // (ObjectMonitor) 0x00007fdfb6012e40 = {
2168 //   _header = (_value = 1)
2169 //   _object = 0x000000070ff45fd0
2170 //   _allocation_state = Old
2171 //   _pad_buf0 = {
2172 //     [0] = '\0'
2173 //     ...
2174 //     [43] = '\0'
2175 //   }
2176 //   _owner = 0x0000000000000000
2177 //   _previous_owner_tid = 0
2178 //   _pad_buf1 = {
2179 //     [0] = '\0'
2180 //     ...
2181 //     [47] = '\0'
2182 //   }
2183 //   _ref_count = 1
2184 //   _pad_buf2 = {
2185 //     [0] = '\0'
2186 //     ...
2187 //     [59] = '\0'
2188 //   }
2189 //   _next_om = 0x0000000000000000
2190 //   _recursions = 0
2191 //   _EntryList = 0x0000000000000000
2192 //   _cxq = 0x0000000000000000
2193 //   _succ = 0x0000000000000000
2194 //   _Responsible = 0x0000000000000000
2195 //   _Spinner = 0
2196 //   _SpinDuration = 5000
2197 //   _contentions = 0
2198 //   _WaitSet = 0x0000700009756248
2199 //   _waiters = 1
2200 //   _WaitSetLock = 0
2201 // }
2202 //
2203 void ObjectMonitor::print_debug_style_on(outputStream* st) const {
2204   st->print_cr("(ObjectMonitor *) " INTPTR_FORMAT " = {", p2i(this));
2205   st->print_cr("  _header = " INTPTR_FORMAT, header().value());
2206   st->print_cr("  _object = " INTPTR_FORMAT, p2i(_object));
2207   st->print("  _allocation_state = ");
2208   if (is_free()) {
2209     st->print("Free");
2210   } else if (is_old()) {
2211     st->print("Old");
2212   } else if (is_new()) {
2213     st->print("New");
2214   } else {
2215     st->print("unknown=%d", _allocation_state);
2216   }
2217   st->cr();
2218   st->print_cr("  _pad_buf0 = {");
2219   st->print_cr("    [0] = '\\0'");
2220   st->print_cr("    ...");
2221   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf0) - 1);
2222   st->print_cr("  }");
2223   st->print_cr("  _owner = " INTPTR_FORMAT, p2i(_owner));
2224   st->print_cr("  _previous_owner_tid = " JLONG_FORMAT, _previous_owner_tid);
2225   st->print_cr("  _pad_buf1 = {");
2226   st->print_cr("    [0] = '\\0'");
2227   st->print_cr("    ...");
2228   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2229   st->print_cr("  }");
2230   st->print_cr("  _ref_count = %d", ref_count());
2231   st->print_cr("  _pad_buf2 = {");
2232   st->print_cr("    [0] = '\\0'");
2233   st->print_cr("    ...");
2234   st->print_cr("    [%d] = '\\0'", (int)sizeof(_pad_buf1) - 1);
2235   st->print_cr("  }");
2236   st->print_cr("  _next_om = " INTPTR_FORMAT, p2i(_next_om));
2237   st->print_cr("  _recursions = " INTX_FORMAT, _recursions);
2238   st->print_cr("  _EntryList = " INTPTR_FORMAT, p2i(_EntryList));
2239   st->print_cr("  _cxq = " INTPTR_FORMAT, p2i(_cxq));
2240   st->print_cr("  _succ = " INTPTR_FORMAT, p2i(_succ));
2241   st->print_cr("  _Responsible = " INTPTR_FORMAT, p2i(_Responsible));
2242   st->print_cr("  _Spinner = %d", _Spinner);
2243   st->print_cr("  _SpinDuration = %d", _SpinDuration);
2244   st->print_cr("  _contentions = %d", _contentions);
2245   st->print_cr("  _WaitSet = " INTPTR_FORMAT, p2i(_WaitSet));
2246   st->print_cr("  _waiters = %d", _waiters);
2247   st->print_cr("  _WaitSetLock = %d", _WaitSetLock);
2248   st->print_cr("}");
2249 }
< prev index next >