< prev index next >

src/hotspot/share/utilities/singleWriterSynchronizer.cpp

Print this page




  68   } while (old != value);
  69   // Critical sections entered before we changed the polarity will use
  70   // the old exit counter.  Critical sections entered after the change
  71   // will use the new exit counter.
  72   volatile uint* old_ptr = &_exit[old & 1];
  73   assert(old_ptr != new_ptr, "invariant");
  74   // (3) Inform threads in in-progress critical sections that there is
  75   // a pending synchronize waiting.  The thread that completes the
  76   // request (_exit value == old) will signal the _wakeup semaphore to
  77   // allow us to proceed.
  78   _waiting_for = old;
  79   // Write of _waiting_for must precede read of _exit and associated
  80   // conditional semaphore wait.  If they were re-ordered then a
  81   // critical section exit could miss the wakeup request, failing to
  82   // signal us while we're waiting.
  83   OrderAccess::fence();
  84   // (4) Wait for all the critical sections started before the change
  85   // to complete, e.g. for the value of old_ptr to catch up with old.
  86   // Loop because there could be pending wakeups unrelated to this
  87   // synchronize request.
  88   while (old != OrderAccess::load_acquire(old_ptr)) {
  89     _wakeup.wait();
  90   }
  91   // (5) Drain any pending wakeups. A critical section exit may have
  92   // completed our request and seen our _waiting_for before we checked
  93   // for completion.  There are also possible (though rare) spurious
  94   // wakeup signals in the timing gap between changing the _enter
  95   // polarity and setting _waiting_for.  Enough of any of those could
  96   // lead to semaphore overflow.  This doesn't guarantee no unrelated
  97   // wakeups for the next wait, but prevents unbounded accumulation.
  98   while (_wakeup.trywait()) {}
  99   DEBUG_ONLY(Atomic::dec(&_writers);)
 100 }


  68   } while (old != value);
  69   // Critical sections entered before we changed the polarity will use
  70   // the old exit counter.  Critical sections entered after the change
  71   // will use the new exit counter.
  72   volatile uint* old_ptr = &_exit[old & 1];
  73   assert(old_ptr != new_ptr, "invariant");
  74   // (3) Inform threads in in-progress critical sections that there is
  75   // a pending synchronize waiting.  The thread that completes the
  76   // request (_exit value == old) will signal the _wakeup semaphore to
  77   // allow us to proceed.
  78   _waiting_for = old;
  79   // Write of _waiting_for must precede read of _exit and associated
  80   // conditional semaphore wait.  If they were re-ordered then a
  81   // critical section exit could miss the wakeup request, failing to
  82   // signal us while we're waiting.
  83   OrderAccess::fence();
  84   // (4) Wait for all the critical sections started before the change
  85   // to complete, e.g. for the value of old_ptr to catch up with old.
  86   // Loop because there could be pending wakeups unrelated to this
  87   // synchronize request.
  88   while (old != Atomic::load_acquire(old_ptr)) {
  89     _wakeup.wait();
  90   }
  91   // (5) Drain any pending wakeups. A critical section exit may have
  92   // completed our request and seen our _waiting_for before we checked
  93   // for completion.  There are also possible (though rare) spurious
  94   // wakeup signals in the timing gap between changing the _enter
  95   // polarity and setting _waiting_for.  Enough of any of those could
  96   // lead to semaphore overflow.  This doesn't guarantee no unrelated
  97   // wakeups for the next wait, but prevents unbounded accumulation.
  98   while (_wakeup.trywait()) {}
  99   DEBUG_ONLY(Atomic::dec(&_writers);)
 100 }
< prev index next >