1 /*
2 * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
448 // section. Amdahl's parallel speedup law comes vividly into play.
449 //
450 // Another option might be to aggregate the events (thread local or
451 // per-monitor aggregation) and defer reporting until a more opportune
452 // time -- such as next time some thread encounters contention but has
453 // yet to acquire the lock. While spinning that thread could
454 // spinning we could increment JVMStat counters, etc.
455
456 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
457 if (JvmtiExport::should_post_monitor_contended_entered()) {
458 JvmtiExport::post_monitor_contended_entered(jt, this);
459
460 // The current thread already owns the monitor and is not going to
461 // call park() for the remainder of the monitor enter protocol. So
462 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
463 // event handler consumed an unpark() issued by the thread that
464 // just exited the monitor.
465 }
466
467 if (event.should_commit()) {
468 event.set_klass(((oop)this->object())->klass());
469 event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
470 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
471 event.commit();
472 }
473
474 if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
475 ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
476 }
477 }
478
479
480 // Caveat: TryLock() is not necessarily serializing if it returns failure.
481 // Callers must compensate as needed.
482
483 int ObjectMonitor::TryLock (Thread * Self) {
484 for (;;) {
485 void * own = _owner ;
486 if (own != NULL) return 0 ;
487 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
488 // Either guarantee _recursions == 0 or set _recursions = 0.
489 assert (_recursions == 0, "invariant") ;
977 return;
978 }
979 }
980
981 if (_recursions != 0) {
982 _recursions--; // this is simple recursive enter
983 TEVENT (Inflated exit - recursive) ;
984 return ;
985 }
986
987 // Invariant: after setting Responsible=null an thread must execute
988 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
989 if ((SyncFlags & 4) == 0) {
990 _Responsible = NULL ;
991 }
992
993 #if INCLUDE_TRACE
994 // get the owner's thread id for the MonitorEnter event
995 // if it is enabled and the thread isn't suspended
996 if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
997 _previous_owner_tid = SharedRuntime::get_java_tid(Self);
998 }
999 #endif
1000
1001 for (;;) {
1002 assert (THREAD == _owner, "invariant") ;
1003
1004
1005 if (Knob_ExitPolicy == 0) {
1006 // release semantics: prior loads and stores from within the critical section
1007 // must not float (reorder) past the following store that drops the lock.
1008 // On SPARC that requires MEMBAR #loadstore|#storestore.
1009 // But of course in TSO #loadstore|#storestore is not required.
1010 // I'd like to write one of the following:
1011 // A. OrderAccess::release() ; _owner = NULL
1012 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
1013 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
1014 // store into a _dummy variable. That store is not needed, but can result
1015 // in massive wasteful coherency traffic on classic SMP systems.
1016 // Instead, I use release_store(), which is implemented as just a simple
1017 // ST on x64, x86 and SPARC.
1430 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
1431 // TODO-FIXME: remove check_slow() -- it's likely dead.
1432
1433 void ObjectMonitor::check_slow(TRAPS) {
1434 TEVENT (check_slow - throw IMSX) ;
1435 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1436 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1437 }
1438
1439 static int Adjust (volatile int * adr, int dx) {
1440 int v ;
1441 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1442 return v ;
1443 }
1444
1445 // helper method for posting a monitor wait event
1446 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
1447 jlong notifier_tid,
1448 jlong timeout,
1449 bool timedout) {
1450 event->set_klass(((oop)this->object())->klass());
1451 event->set_timeout((TYPE_ULONG)timeout);
1452 event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
1453 event->set_notifier((TYPE_OSTHREAD)notifier_tid);
1454 event->set_timedOut((TYPE_BOOLEAN)timedout);
1455 event->commit();
1456 }
1457
1458 // -----------------------------------------------------------------------------
1459 // Wait/Notify/NotifyAll
1460 //
1461 // Note: a subset of changes to ObjectMonitor::wait()
1462 // will need to be replicated in complete_exit above
1463 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1464 Thread * const Self = THREAD ;
1465 assert(Self->is_Java_thread(), "Must be Java thread!");
1466 JavaThread *jt = (JavaThread *)THREAD;
1467
1468 DeferredInitialize () ;
1469
1470 // Throw IMSX or IEX.
1471 CHECK_OWNER();
1472
1473 EventJavaMonitorWait event;
1474
1699 CHECK_OWNER();
1700 if (_WaitSet == NULL) {
1701 TEVENT (Empty-Notify) ;
1702 return ;
1703 }
1704 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1705
1706 int Policy = Knob_MoveNotifyee ;
1707
1708 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1709 ObjectWaiter * iterator = DequeueWaiter() ;
1710 if (iterator != NULL) {
1711 TEVENT (Notify1 - Transfer) ;
1712 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1713 guarantee (iterator->_notified == 0, "invariant") ;
1714 if (Policy != 4) {
1715 iterator->TState = ObjectWaiter::TS_ENTER ;
1716 }
1717 iterator->_notified = 1 ;
1718 Thread * Self = THREAD;
1719 iterator->_notifier_tid = Self->osthread()->thread_id();
1720
1721 ObjectWaiter * List = _EntryList ;
1722 if (List != NULL) {
1723 assert (List->_prev == NULL, "invariant") ;
1724 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1725 assert (List != iterator, "invariant") ;
1726 }
1727
1728 if (Policy == 0) { // prepend to EntryList
1729 if (List == NULL) {
1730 iterator->_next = iterator->_prev = NULL ;
1731 _EntryList = iterator ;
1732 } else {
1733 List->_prev = iterator ;
1734 iterator->_next = List ;
1735 iterator->_prev = NULL ;
1736 _EntryList = iterator ;
1737 }
1738 } else
1739 if (Policy == 1) { // append to EntryList
1825
1826 int Policy = Knob_MoveNotifyee ;
1827 int Tally = 0 ;
1828 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1829
1830 for (;;) {
1831 iterator = DequeueWaiter () ;
1832 if (iterator == NULL) break ;
1833 TEVENT (NotifyAll - Transfer1) ;
1834 ++Tally ;
1835
1836 // Disposition - what might we do with iterator ?
1837 // a. add it directly to the EntryList - either tail or head.
1838 // b. push it onto the front of the _cxq.
1839 // For now we use (a).
1840
1841 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1842 guarantee (iterator->_notified == 0, "invariant") ;
1843 iterator->_notified = 1 ;
1844 Thread * Self = THREAD;
1845 iterator->_notifier_tid = Self->osthread()->thread_id();
1846 if (Policy != 4) {
1847 iterator->TState = ObjectWaiter::TS_ENTER ;
1848 }
1849
1850 ObjectWaiter * List = _EntryList ;
1851 if (List != NULL) {
1852 assert (List->_prev == NULL, "invariant") ;
1853 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1854 assert (List != iterator, "invariant") ;
1855 }
1856
1857 if (Policy == 0) { // prepend to EntryList
1858 if (List == NULL) {
1859 iterator->_next = iterator->_prev = NULL ;
1860 _EntryList = iterator ;
1861 } else {
1862 List->_prev = iterator ;
1863 iterator->_next = List ;
1864 iterator->_prev = NULL ;
1865 _EntryList = iterator ;
|
1 /*
2 * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
448 // section. Amdahl's parallel speedup law comes vividly into play.
449 //
450 // Another option might be to aggregate the events (thread local or
451 // per-monitor aggregation) and defer reporting until a more opportune
452 // time -- such as next time some thread encounters contention but has
453 // yet to acquire the lock. While spinning that thread could
454 // spinning we could increment JVMStat counters, etc.
455
456 DTRACE_MONITOR_PROBE(contended__entered, this, object(), jt);
457 if (JvmtiExport::should_post_monitor_contended_entered()) {
458 JvmtiExport::post_monitor_contended_entered(jt, this);
459
460 // The current thread already owns the monitor and is not going to
461 // call park() for the remainder of the monitor enter protocol. So
462 // it doesn't matter if the JVMTI_EVENT_MONITOR_CONTENDED_ENTERED
463 // event handler consumed an unpark() issued by the thread that
464 // just exited the monitor.
465 }
466
467 if (event.should_commit()) {
468 event.set_monitorClass(((oop)this->object())->klass());
469 event.set_previousOwner((TYPE_THREAD)_previous_owner_tid);
470 event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
471 event.commit();
472 }
473
474 if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
475 ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
476 }
477 }
478
479
480 // Caveat: TryLock() is not necessarily serializing if it returns failure.
481 // Callers must compensate as needed.
482
483 int ObjectMonitor::TryLock (Thread * Self) {
484 for (;;) {
485 void * own = _owner ;
486 if (own != NULL) return 0 ;
487 if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
488 // Either guarantee _recursions == 0 or set _recursions = 0.
489 assert (_recursions == 0, "invariant") ;
977 return;
978 }
979 }
980
981 if (_recursions != 0) {
982 _recursions--; // this is simple recursive enter
983 TEVENT (Inflated exit - recursive) ;
984 return ;
985 }
986
987 // Invariant: after setting Responsible=null an thread must execute
988 // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
989 if ((SyncFlags & 4) == 0) {
990 _Responsible = NULL ;
991 }
992
993 #if INCLUDE_TRACE
994 // get the owner's thread id for the MonitorEnter event
995 // if it is enabled and the thread isn't suspended
996 if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
997 _previous_owner_tid = THREAD_TRACE_ID(Self);
998 }
999 #endif
1000
1001 for (;;) {
1002 assert (THREAD == _owner, "invariant") ;
1003
1004
1005 if (Knob_ExitPolicy == 0) {
1006 // release semantics: prior loads and stores from within the critical section
1007 // must not float (reorder) past the following store that drops the lock.
1008 // On SPARC that requires MEMBAR #loadstore|#storestore.
1009 // But of course in TSO #loadstore|#storestore is not required.
1010 // I'd like to write one of the following:
1011 // A. OrderAccess::release() ; _owner = NULL
1012 // B. OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
1013 // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
1014 // store into a _dummy variable. That store is not needed, but can result
1015 // in massive wasteful coherency traffic on classic SMP systems.
1016 // Instead, I use release_store(), which is implemented as just a simple
1017 // ST on x64, x86 and SPARC.
1430 // check_slow() is a misnomer. It's called to simply to throw an IMSX exception.
1431 // TODO-FIXME: remove check_slow() -- it's likely dead.
1432
1433 void ObjectMonitor::check_slow(TRAPS) {
1434 TEVENT (check_slow - throw IMSX) ;
1435 assert(THREAD != _owner && !THREAD->is_lock_owned((address) _owner), "must not be owner");
1436 THROW_MSG(vmSymbols::java_lang_IllegalMonitorStateException(), "current thread not owner");
1437 }
1438
1439 static int Adjust (volatile int * adr, int dx) {
1440 int v ;
1441 for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
1442 return v ;
1443 }
1444
1445 // helper method for posting a monitor wait event
1446 void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
1447 jlong notifier_tid,
1448 jlong timeout,
1449 bool timedout) {
1450 assert(event != NULL, "invariant");
1451 event->set_monitorClass(((oop)this->object())->klass());
1452 event->set_timeout(timeout);
1453 event->set_address((TYPE_ADDRESS)this->object_addr());
1454 event->set_notifier(notifier_tid);
1455 event->set_timedOut(timedout);
1456 event->commit();
1457 }
1458
1459 // -----------------------------------------------------------------------------
1460 // Wait/Notify/NotifyAll
1461 //
1462 // Note: a subset of changes to ObjectMonitor::wait()
1463 // will need to be replicated in complete_exit above
1464 void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
1465 Thread * const Self = THREAD ;
1466 assert(Self->is_Java_thread(), "Must be Java thread!");
1467 JavaThread *jt = (JavaThread *)THREAD;
1468
1469 DeferredInitialize () ;
1470
1471 // Throw IMSX or IEX.
1472 CHECK_OWNER();
1473
1474 EventJavaMonitorWait event;
1475
1700 CHECK_OWNER();
1701 if (_WaitSet == NULL) {
1702 TEVENT (Empty-Notify) ;
1703 return ;
1704 }
1705 DTRACE_MONITOR_PROBE(notify, this, object(), THREAD);
1706
1707 int Policy = Knob_MoveNotifyee ;
1708
1709 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notify") ;
1710 ObjectWaiter * iterator = DequeueWaiter() ;
1711 if (iterator != NULL) {
1712 TEVENT (Notify1 - Transfer) ;
1713 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1714 guarantee (iterator->_notified == 0, "invariant") ;
1715 if (Policy != 4) {
1716 iterator->TState = ObjectWaiter::TS_ENTER ;
1717 }
1718 iterator->_notified = 1 ;
1719 Thread * Self = THREAD;
1720 iterator->_notifier_tid = THREAD_TRACE_ID(Self);
1721
1722 ObjectWaiter * List = _EntryList ;
1723 if (List != NULL) {
1724 assert (List->_prev == NULL, "invariant") ;
1725 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1726 assert (List != iterator, "invariant") ;
1727 }
1728
1729 if (Policy == 0) { // prepend to EntryList
1730 if (List == NULL) {
1731 iterator->_next = iterator->_prev = NULL ;
1732 _EntryList = iterator ;
1733 } else {
1734 List->_prev = iterator ;
1735 iterator->_next = List ;
1736 iterator->_prev = NULL ;
1737 _EntryList = iterator ;
1738 }
1739 } else
1740 if (Policy == 1) { // append to EntryList
1826
1827 int Policy = Knob_MoveNotifyee ;
1828 int Tally = 0 ;
1829 Thread::SpinAcquire (&_WaitSetLock, "WaitSet - notifyall") ;
1830
1831 for (;;) {
1832 iterator = DequeueWaiter () ;
1833 if (iterator == NULL) break ;
1834 TEVENT (NotifyAll - Transfer1) ;
1835 ++Tally ;
1836
1837 // Disposition - what might we do with iterator ?
1838 // a. add it directly to the EntryList - either tail or head.
1839 // b. push it onto the front of the _cxq.
1840 // For now we use (a).
1841
1842 guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
1843 guarantee (iterator->_notified == 0, "invariant") ;
1844 iterator->_notified = 1 ;
1845 Thread * Self = THREAD;
1846 iterator->_notifier_tid = THREAD_TRACE_ID(Self);
1847 if (Policy != 4) {
1848 iterator->TState = ObjectWaiter::TS_ENTER ;
1849 }
1850
1851 ObjectWaiter * List = _EntryList ;
1852 if (List != NULL) {
1853 assert (List->_prev == NULL, "invariant") ;
1854 assert (List->TState == ObjectWaiter::TS_ENTER, "invariant") ;
1855 assert (List != iterator, "invariant") ;
1856 }
1857
1858 if (Policy == 0) { // prepend to EntryList
1859 if (List == NULL) {
1860 iterator->_next = iterator->_prev = NULL ;
1861 _EntryList = iterator ;
1862 } else {
1863 List->_prev = iterator ;
1864 iterator->_next = List ;
1865 iterator->_prev = NULL ;
1866 _EntryList = iterator ;
|