< prev index next >

src/hotspot/share/runtime/mutex.cpp

Print this page
rev 48406 : 8194406: Use Atomic::replace_if_null
Reviewed-by: coleenp, dholmes


 450   if (TryFast()) {
 451  Exeunt:
 452     assert(ILocked(), "invariant");
 453     return;
 454   }
 455 
 456   ParkEvent * const ESelf = Self->_MutexEvent;
 457   assert(_OnDeck != ESelf, "invariant");
 458 
 459   // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT
 460   // Synchronizer.cpp uses a similar optimization.
 461   if (TrySpin(Self)) goto Exeunt;
 462 
 463   // Slow-path - the lock is contended.
 464   // Either Enqueue Self on cxq or acquire the outer lock.
 465   // LockWord encoding = (cxq,LOCKBYTE)
 466   ESelf->reset();
 467   OrderAccess::fence();
 468 
 469   // Optional optimization ... try barging on the inner lock
 470   if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) {
 471     goto OnDeck_LOOP;
 472   }
 473 
 474   if (AcquireOrPush(ESelf)) goto Exeunt;
 475 
 476   // At any given time there is at most one ondeck thread.
 477   // ondeck implies not resident on cxq and not resident on EntryList
 478   // Only the OnDeck thread can try to acquire -- contend for -- the lock.
 479   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
 480   // Deschedule Self so that others may run.
 481   while (OrderAccess::load_acquire(&_OnDeck) != ESelf) {
 482     ParkCommon(ESelf, 0);
 483   }
 484 
 485   // Self is now in the OnDeck position and will remain so until it
 486   // manages to acquire the lock.
 487  OnDeck_LOOP:
 488   for (;;) {
 489     assert(_OnDeck == ESelf, "invariant");
 490     if (TrySpin(Self)) break;


 557   if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
 558     return;      // normal fast-path exit - cxq and EntryList both empty
 559   }
 560   if (cxq & _LBIT) {
 561     // Optional optimization ...
 562     // Some other thread acquired the lock in the window since this
 563     // thread released it.  Succession is now that thread's responsibility.
 564     return;
 565   }
 566 
 567  Succession:
 568   // Slow-path exit - this thread must ensure succession and progress.
 569   // OnDeck serves as lock to protect cxq and EntryList.
 570   // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
 571   // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
 572   // but only one concurrent consumer (detacher of RATs).
 573   // Consider protecting this critical section with schedctl on Solaris.
 574   // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
 575   // picks a successor and marks that thread as OnDeck.  That successor
 576   // thread will then clear OnDeck once it eventually acquires the outer lock.
 577   if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) {
 578     return;
 579   }
 580 
 581   ParkEvent * List = _EntryList;
 582   if (List != NULL) {
 583     // Transfer the head of the EntryList to the OnDeck position.
 584     // Once OnDeck, a thread stays OnDeck until it acquires the lock.
 585     // For a given lock there is at most OnDeck thread at any one instant.
 586    WakeOne:
 587     assert(List == _EntryList, "invariant");
 588     ParkEvent * const w = List;
 589     assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
 590     _EntryList = w->ListNext;
 591     // as a diagnostic measure consider setting w->_ListNext = BAD
 592     assert(intptr_t(_OnDeck) == _LBIT, "invariant");
 593 
 594     // Pass OnDeck role to w, ensuring that _EntryList has been set first.
 595     // w will clear _OnDeck once it acquires the outer lock.
 596     // Note that once we set _OnDeck that thread can acquire the mutex, proceed
 597     // with its critical section and then enter this code to unlock the mutex. So




 450   if (TryFast()) {
 451  Exeunt:
 452     assert(ILocked(), "invariant");
 453     return;
 454   }
 455 
 456   ParkEvent * const ESelf = Self->_MutexEvent;
 457   assert(_OnDeck != ESelf, "invariant");
 458 
 459   // As an optimization, spinners could conditionally try to set _OnDeck to _LBIT
 460   // Synchronizer.cpp uses a similar optimization.
 461   if (TrySpin(Self)) goto Exeunt;
 462 
 463   // Slow-path - the lock is contended.
 464   // Either Enqueue Self on cxq or acquire the outer lock.
 465   // LockWord encoding = (cxq,LOCKBYTE)
 466   ESelf->reset();
 467   OrderAccess::fence();
 468 
 469   // Optional optimization ... try barging on the inner lock
 470   if ((NativeMonitorFlags & 32) && Atomic::replace_if_null(ESelf, &_OnDeck)) {
 471     goto OnDeck_LOOP;
 472   }
 473 
 474   if (AcquireOrPush(ESelf)) goto Exeunt;
 475 
 476   // At any given time there is at most one ondeck thread.
 477   // ondeck implies not resident on cxq and not resident on EntryList
 478   // Only the OnDeck thread can try to acquire -- contend for -- the lock.
 479   // CONSIDER: use Self->OnDeck instead of m->OnDeck.
 480   // Deschedule Self so that others may run.
 481   while (OrderAccess::load_acquire(&_OnDeck) != ESelf) {
 482     ParkCommon(ESelf, 0);
 483   }
 484 
 485   // Self is now in the OnDeck position and will remain so until it
 486   // manages to acquire the lock.
 487  OnDeck_LOOP:
 488   for (;;) {
 489     assert(_OnDeck == ESelf, "invariant");
 490     if (TrySpin(Self)) break;


 557   if (((cxq & ~_LBIT)|UNS(_EntryList)) == 0) {
 558     return;      // normal fast-path exit - cxq and EntryList both empty
 559   }
 560   if (cxq & _LBIT) {
 561     // Optional optimization ...
 562     // Some other thread acquired the lock in the window since this
 563     // thread released it.  Succession is now that thread's responsibility.
 564     return;
 565   }
 566 
 567  Succession:
 568   // Slow-path exit - this thread must ensure succession and progress.
 569   // OnDeck serves as lock to protect cxq and EntryList.
 570   // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq.
 571   // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS)
 572   // but only one concurrent consumer (detacher of RATs).
 573   // Consider protecting this critical section with schedctl on Solaris.
 574   // Unlike a normal lock, however, the exiting thread "locks" OnDeck,
 575   // picks a successor and marks that thread as OnDeck.  That successor
 576   // thread will then clear OnDeck once it eventually acquires the outer lock.
 577   if (!Atomic::replace_if_null((ParkEvent*)_LBIT, &_OnDeck)) {
 578     return;
 579   }
 580 
 581   ParkEvent * List = _EntryList;
 582   if (List != NULL) {
 583     // Transfer the head of the EntryList to the OnDeck position.
 584     // Once OnDeck, a thread stays OnDeck until it acquires the lock.
 585     // For a given lock there is at most OnDeck thread at any one instant.
 586    WakeOne:
 587     assert(List == _EntryList, "invariant");
 588     ParkEvent * const w = List;
 589     assert(RelaxAssert || w != Thread::current()->_MutexEvent, "invariant");
 590     _EntryList = w->ListNext;
 591     // as a diagnostic measure consider setting w->_ListNext = BAD
 592     assert(intptr_t(_OnDeck) == _LBIT, "invariant");
 593 
 594     // Pass OnDeck role to w, ensuring that _EntryList has been set first.
 595     // w will clear _OnDeck once it acquires the outer lock.
 596     // Note that once we set _OnDeck that thread can acquire the mutex, proceed
 597     // with its critical section and then enter this code to unlock the mutex. So


< prev index next >