< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 51675 : imported patch 8210514

*** 544,554 **** // TODO: Defer all thread state transitions until park-time. // Since state transitions are heavy and inefficient we'd like // to defer the state transitions until absolutely necessary, // and in doing so avoid some transitions ... - TEVENT(Inflated enter - Contention); int nWakeups = 0; int recheckInterval = 1; for (;;) { --- 544,553 ----
*** 559,588 **** Atomic::replace_if_null(Self, &_Responsible); } // park self if (_Responsible == Self || (SyncFlags & 1)) { - TEVENT(Inflated enter - park TIMED); Self->_ParkEvent->park((jlong) recheckInterval); // Increase the recheckInterval, but clamp the value. recheckInterval *= 8; if (recheckInterval > MAX_RECHECK_INTERVAL) { recheckInterval = MAX_RECHECK_INTERVAL; } } else { - TEVENT(Inflated enter - park UNTIMED); Self->_ParkEvent->park(); } if (TryLock(Self) > 0) break; // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. // That is by design - we trade "lossy" counters which are exposed to // races during updates for a lower probe effect. ! TEVENT(Inflated enter - Futile wakeup); // This PerfData object can be used in parallel with a safepoint. // See the work around in PerfDataManager::destroy(). OM_PERFDATA_OP(FutileWakeups, inc()); ++nWakeups; --- 558,585 ---- Atomic::replace_if_null(Self, &_Responsible); } // park self if (_Responsible == Self || (SyncFlags & 1)) { Self->_ParkEvent->park((jlong) recheckInterval); // Increase the recheckInterval, but clamp the value. recheckInterval *= 8; if (recheckInterval > MAX_RECHECK_INTERVAL) { recheckInterval = MAX_RECHECK_INTERVAL; } } else { Self->_ParkEvent->park(); } if (TryLock(Self) > 0) break; // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. // That is by design - we trade "lossy" counters which are exposed to // races during updates for a lower probe effect. ! // This PerfData object can be used in parallel with a safepoint. // See the work around in PerfDataManager::destroy(). OM_PERFDATA_OP(FutileWakeups, inc()); ++nWakeups;
*** 705,716 **** assert(_owner != Self, "invariant"); if (TryLock(Self) > 0) break; if (TrySpin(Self) > 0) break; - TEVENT(Wait Reentry - parking); - // State transition wrappers around park() ... // ReenterI() wisely defers state transitions until // it's clear we must park the thread. { OSThreadContendState osts(Self->osthread()); --- 702,711 ----
*** 742,752 **** // The lock is still contested. // Keep a tally of the # of futile wakeups. // Note that the counter is not protected by a lock or updated by atomics. // That is by design - we trade "lossy" counters which are exposed to // races during updates for a lower probe effect. - TEVENT(Wait Reentry - futile wakeup); ++nWakeups; // Assuming this is not a spurious wakeup we'll normally // find that _succ == Self. if (_succ == Self) _succ = NULL; --- 737,746 ----
*** 793,803 **** if (nxt != NULL) nxt->_prev = prv; if (prv != NULL) prv->_next = nxt; if (SelfNode == _EntryList) _EntryList = nxt; assert(nxt == NULL || nxt->TState == ObjectWaiter::TS_ENTER, "invariant"); assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant"); - TEVENT(Unlink from EntryList); } else { assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant"); // Inopportune interleaving -- Self is still on the cxq. // This usually means the enqueue of self raced an exiting thread. // Normally we'll find Self near the front of the cxq, so --- 787,796 ----
*** 832,842 **** assert(p != _cxq, "invariant"); assert(q != NULL, "invariant"); assert(q->_next == p, "invariant"); q->_next = p->_next; } - TEVENT(Unlink from cxq); } #ifdef ASSERT // Diagnostic hygiene ... SelfNode->_prev = (ObjectWaiter *) 0xBAD; --- 825,834 ----
*** 921,939 **** // see x86_32.ad Fast_Unlock() and the I1 and I2 properties. // Upon deeper reflection, however, in a properly run JVM the only // way we should encounter this situation is in the presence of // unbalanced JNI locking. TODO: CheckJNICalls. // See also: CR4414101 - TEVENT(Exit - Throw IMSX); assert(false, "Non-balanced monitor enter/exit! Likely JNI locking"); return; } } if (_recursions != 0) { _recursions--; // this is simple recursive enter - TEVENT(Inflated exit - recursive); return; } // Invariant: after setting Responsible=null an thread must execute // a MEMBAR or other serializing instruction before fetching EntryList|cxq. --- 913,929 ----
*** 966,979 **** // Instead, I use release_store(), which is implemented as just a simple // ST on x64, x86 and SPARC. OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock OrderAccess::storeload(); // See if we need to wake a successor if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { - TEVENT(Inflated exit - simple egress); return; } - TEVENT(Inflated exit - complex egress); // Other threads are blocked trying to acquire the lock. // Normally the exiting thread is responsible for ensuring succession, // but if other successors are ready or other entering threads are spinning // then this thread can simply store NULL into _owner and exit without --- 956,967 ----
*** 1011,1028 **** // falls to the new owner. // if (!Atomic::replace_if_null(THREAD, &_owner)) { return; } - TEVENT(Exit - Reacquired); } else { if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) { OrderAccess::release_store(&_owner, (void*)NULL); // drop the lock OrderAccess::storeload(); // Ratify the previously observed values. if (_cxq == NULL || _succ != NULL) { - TEVENT(Inflated exit - simple egress); return; } // inopportune interleaving -- the exiting thread (this thread) // in the fast-exit path raced an entering thread in the slow-enter --- 999,1014 ----
*** 1034,1049 **** // fall-through into the code below which wakes a successor. // B. If the elements forming the EntryList|cxq are TSM // we could simply unpark() the lead thread and return // without having set _succ. if (!Atomic::replace_if_null(THREAD, &_owner)) { - TEVENT(Inflated exit - reacquired succeeded); return; } - TEVENT(Inflated exit - reacquired failed); - } else { - TEVENT(Inflated exit - complex egress); } } guarantee(_owner == THREAD, "invariant"); --- 1020,1031 ----
*** 1166,1176 **** assert(w != NULL, "Invariant"); ObjectWaiter * u = Atomic::cmpxchg((ObjectWaiter*)NULL, &_cxq, w); if (u == w) break; w = u; } - TEVENT(Inflated exit - drain cxq into EntryList); assert(w != NULL, "invariant"); assert(_EntryList == NULL, "invariant"); // Convert the LIFO SLL anchored by _cxq into a DLL. --- 1148,1157 ----
*** 1270,1280 **** assert(jSelf->is_suspend_equivalent(), "invariant"); jSelf->clear_suspend_equivalent(); if (2 == Mode) OrderAccess::storeload(); if (!jSelf->is_external_suspend()) return false; // We raced a suspension -- fall thru into the slow path - TEVENT(ExitSuspendEquivalent - raced); jSelf->set_suspend_equivalent(); } return jSelf->handle_special_suspend_equivalent_condition(); } --- 1251,1260 ----
*** 1298,1311 **** // Drop the lock OrderAccess::release_store(&_owner, (void*)NULL); OrderAccess::fence(); // ST _owner vs LD in unpark() - if (SafepointMechanism::poll(Self)) { - TEVENT(unpark before SAFEPOINT); - } - DTRACE_MONITOR_PROBE(contended__exit, this, object(), Self); Trigger->unpark(); // Maintain stats and report events to JVMTI OM_PERFDATA_OP(Parks, inc()); --- 1278,1287 ----
*** 1370,1390 **** if (THREAD != _owner) { \ if (THREAD->is_lock_owned((address) _owner)) { \ _owner = THREAD; /* Convert from basiclock addr to Thread addr */ \ _recursions = 0; \ } else { \ - TEVENT(Throw IMSX); \ THROW(vmSymbols::java_lang_IllegalMonitorStateException()); \ } \ } \ } while (false) // check_slow() is a misnomer. It's called to simply to throw an IMSX exception. // TODO-FIXME: remove check_slow() -- it's likely dead. void ObjectMonitor::check_slow(TRAPS) { - TEVENT(check_slow - throw IMSX); assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner"); THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner"); } static int Adjust(volatile int * adr, int dx) { --- 1346,1364 ----
*** 1442,1458 **** // this ObjectMonitor. } if (event.should_commit()) { post_monitor_wait_event(&event, this, 0, millis, false); } - TEVENT(Wait - Throw IEX); THROW(vmSymbols::java_lang_InterruptedException()); return; } - TEVENT(Wait); - assert(Self->_Stalled == 0, "invariant"); Self->_Stalled = intptr_t(this); jt->set_current_waiting_monitor(this); // create a node to be put into the queue --- 1416,1429 ----
*** 1629,1639 **** // check if the notification happened if (!WasNotified) { // no, it could be timeout or Thread.interrupt() or both // check for interrupt event, otherwise it is timeout if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) { - TEVENT(Wait - throw IEX from epilog); THROW(vmSymbols::java_lang_InterruptedException()); } } // NOTE: Spurious wake up will be consider as timeout. --- 1600,1609 ----
*** 1650,1660 **** const int policy = Knob_MoveNotifyee; Thread::SpinAcquire(&_WaitSetLock, "WaitSet - notify"); ObjectWaiter * iterator = DequeueWaiter(); if (iterator != NULL) { - TEVENT(Notify1 - Transfer); guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant"); guarantee(iterator->_notified == 0, "invariant"); // Disposition - what might we do with iterator ? // a. add it directly to the EntryList - either tail (policy == 1) // or head (policy == 0). --- 1620,1629 ----
*** 1764,1774 **** // variable and MAX_RECHECK_INTERVAL. void ObjectMonitor::notify(TRAPS) { CHECK_OWNER(); if (_WaitSet == NULL) { - TEVENT(Empty-Notify); return; } DTRACE_MONITOR_PROBE(notify, this, object(), THREAD); INotify(THREAD); OM_PERFDATA_OP(Notifications, inc(1)); --- 1733,1742 ----
*** 1783,1793 **** // mode the waitset will be empty and the EntryList will be "DCBAXYZ". void ObjectMonitor::notifyAll(TRAPS) { CHECK_OWNER(); if (_WaitSet == NULL) { - TEVENT(Empty-NotifyAll); return; } DTRACE_MONITOR_PROBE(notifyAll, this, object(), THREAD); int tally = 0; --- 1751,1760 ----
*** 1910,1927 **** if (ctr < Knob_SpinBase) ctr = Knob_SpinBase; if (ctr <= 0) return 0; if (Knob_SuccRestrict && _succ != NULL) return 0; if (Knob_OState && NotRunnable (Self, (Thread *) _owner)) { - TEVENT(Spin abort - notrunnable [TOP]); return 0; } int MaxSpin = Knob_MaxSpinners; if (MaxSpin >= 0) { if (_Spinner > MaxSpin) { - TEVENT(Spin abort -- too many spinners); return 0; } // Slightly racy, but benign ... Adjust(&_Spinner, 1); } --- 1877,1892 ----
*** 1954,1964 **** // this thread, if safe, doesn't steal cycles from GC. // This is in keeping with the "no loitering in runtime" rule. // We periodically check to see if there's a safepoint pending. if ((ctr & 0xFF) == 0) { if (SafepointMechanism::poll(Self)) { - TEVENT(Spin: safepoint); goto Abort; // abrupt spin egress } if (Knob_UsePause & 1) SpinPause(); } --- 1919,1928 ----
*** 2027,2046 **** // * penalize: ctr -= Knob_CASPenalty // * exit spin with prejudice -- goto Abort; // * exit spin without prejudice. // * Since CAS is high-latency, retry again immediately. prv = ox; - TEVENT(Spin: cas failed); if (caspty == -2) break; if (caspty == -1) goto Abort; ctr -= caspty; continue; } // Did lock ownership change hands ? if (ox != prv && prv != NULL) { - TEVENT(spin: Owner changed) if (oxpty == -2) break; if (oxpty == -1) goto Abort; ctr -= oxpty; } prv = ox; --- 1991,2008 ----
*** 2048,2067 **** // Abort the spin if the owner is not executing. // The owner must be executing in order to drop the lock. // Spinning while the owner is OFFPROC is idiocy. // Consider: ctr -= RunnablePenalty ; if (Knob_OState && NotRunnable (Self, ox)) { - TEVENT(Spin abort - notrunnable); goto Abort; } if (sss && _succ == NULL) _succ = Self; } // Spin failed with prejudice -- reduce _SpinDuration. // TODO: Use an AIMD-like policy to adjust _SpinDuration. // AIMD is globally stable. - TEVENT(Spin failure); { int x = _SpinDuration; if (x > 0) { // Consider an AIMD scheme like: x -= (x >> 3) + 100 // This is globally sample and tends to damp the response. --- 2010,2027 ----
< prev index next >