< prev index next >

src/hotspot/share/runtime/objectMonitor.cpp

Print this page
rev 51814 : imported patch syncknobs-00-base
rev 51815 : imported patch syncknobs-01-Knob_ReportSettings
rev 51816 : imported patch syncknobs-02-Knob_SpinBackOff
rev 51817 : imported patch syncknobs-03-BackOffMask
rev 51818 : imported patch syncknobs-04-Knob_ExitRelease
rev 51820 : imported patch syncknobs-05-Knob_InlineNotify
rev 51821 : imported patch syncknobs-06-Knob_Verbose
rev 51822 : imported patch syncknobs-07-Knob_VerifyInUse
rev 51824 : imported patch syncknobs-08-Knob_VerifyMatch
rev 51825 : imported patch syncknobs-09-Knob_SpinBase
rev 51826 : imported patch syncknobs-10-Knob_CASPenalty
rev 51827 : imported patch syncknobs-11-Knob_OXPenalty
rev 51828 : imported patch syncknobs-12-Knob_SpinSetSucc
rev 51829 : imported patch syncknobs-13-Knob_SpinEarly
rev 51830 : imported patch syncknobs-14-Knob_SuccEnabled
rev 51831 : imported patch syncknobs-15-Knob_SuccRestrict
rev 51832 : imported patch syncknobs-16-Knob_MaxSpinners
rev 51833 : imported patch syncknobs-17-Knob_SpinAfterFutile
rev 51834 : imported patch syncknobs-18-Knob_OState
rev 51835 : imported patch syncknobs-19-Knob_UsePause
rev 51836 : imported patch syncknobs-20-Knob_ExitPolicy
rev 51837 : [mq]: syncknobs-20.2-Knob_ExitPolicy


 894 
 895   // Invariant: after setting Responsible=null an thread must execute
 896   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 897   _Responsible = NULL;
 898 
 899 #if INCLUDE_JFR
 900   // get the owner's thread id for the MonitorEnter event
 901   // if it is enabled and the thread isn't suspended
 902   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 903     _previous_owner_tid = JFR_THREAD_ID(Self);
 904   }
 905 #endif
 906 
 907   for (;;) {
 908     assert(THREAD == _owner, "invariant");
 909 
 910     // release semantics: prior loads and stores from within the critical section
 911     // must not float (reorder) past the following store that drops the lock.
 912     // On SPARC that requires MEMBAR #loadstore|#storestore.
 913     // But of course in TSO #loadstore|#storestore is not required.
 914     // I'd like to write one of the following:
 915     // A.  OrderAccess::release() ; _owner = NULL
 916     // B.  OrderAccess::loadstore(); OrderAccess::storestore(); _owner = NULL;
 917     // Unfortunately OrderAccess::release() and OrderAccess::loadstore() both
 918     // store into a _dummy variable.  That store is not needed, but can result
 919     // in massive wasteful coherency traffic on classic SMP systems.
 920     // Instead, I use release_store(), which is implemented as just a simple
 921     // ST on x64, x86 and SPARC.
 922     OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
 923     OrderAccess::storeload();                        // See if we need to wake a successor
 924     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 925       return;
 926     }
 927     // Other threads are blocked trying to acquire the lock.
 928 
 929     // Normally the exiting thread is responsible for ensuring succession,
 930     // but if other successors are ready or other entering threads are spinning
 931     // then this thread can simply store NULL into _owner and exit without
 932     // waking a successor.  The existence of spinners or ready successors
 933     // guarantees proper succession (liveness).  Responsibility passes to the
 934     // ready or running successors.  The exiting thread delegates the duty.
 935     // More precisely, if a successor already exists this thread is absolved
 936     // of the responsibility of waking (unparking) one.
 937     //
 938     // The _succ variable is critical to reducing futile wakeup frequency.
 939     // _succ identifies the "heir presumptive" thread that has been made
 940     // ready (unparked) but that has not yet run.  We need only one such
 941     // successor thread to guarantee progress.




 894 
 895   // Invariant: after setting Responsible=null an thread must execute
 896   // a MEMBAR or other serializing instruction before fetching EntryList|cxq.
 897   _Responsible = NULL;
 898 
 899 #if INCLUDE_JFR
 900   // get the owner's thread id for the MonitorEnter event
 901   // if it is enabled and the thread isn't suspended
 902   if (not_suspended && EventJavaMonitorEnter::is_enabled()) {
 903     _previous_owner_tid = JFR_THREAD_ID(Self);
 904   }
 905 #endif
 906 
 907   for (;;) {
 908     assert(THREAD == _owner, "invariant");
 909 
 910     // release semantics: prior loads and stores from within the critical section
 911     // must not float (reorder) past the following store that drops the lock.
 912     // On SPARC that requires MEMBAR #loadstore|#storestore.
 913     // But of course in TSO #loadstore|#storestore is not required.








 914     OrderAccess::release_store(&_owner, (void*)NULL);   // drop the lock
 915     OrderAccess::storeload();                        // See if we need to wake a successor
 916     if ((intptr_t(_EntryList)|intptr_t(_cxq)) == 0 || _succ != NULL) {
 917       return;
 918     }
 919     // Other threads are blocked trying to acquire the lock.
 920 
 921     // Normally the exiting thread is responsible for ensuring succession,
 922     // but if other successors are ready or other entering threads are spinning
 923     // then this thread can simply store NULL into _owner and exit without
 924     // waking a successor.  The existence of spinners or ready successors
 925     // guarantees proper succession (liveness).  Responsibility passes to the
 926     // ready or running successors.  The exiting thread delegates the duty.
 927     // More precisely, if a successor already exists this thread is absolved
 928     // of the responsibility of waking (unparking) one.
 929     //
 930     // The _succ variable is critical to reducing futile wakeup frequency.
 931     // _succ identifies the "heir presumptive" thread that has been made
 932     // ready (unparked) but that has not yet run.  We need only one such
 933     // successor thread to guarantee progress.


< prev index next >