< prev index next >

src/hotspot/share/utilities/singleWriterSynchronizer.cpp

Print this page




  27 #include "runtime/orderAccess.hpp"
  28 #include "runtime/os.hpp"
  29 #include "utilities/debug.hpp"
  30 #include "utilities/singleWriterSynchronizer.hpp"
  31 #include "utilities/macros.hpp"
  32 
  33 SingleWriterSynchronizer::SingleWriterSynchronizer() :
  34   _enter(0),
  35   _exit(),
  36   // The initial value of 1 for _waiting_for puts it on the inactive
  37   // track, so no thread exiting a critical section will match it.
  38   _waiting_for(1),
  39   _wakeup()
  40   DEBUG_ONLY(COMMA _writers(0))
  41 {}
  42 
  43 // Wait until all threads that entered a critical section before
  44 // synchronization have exited that critical section.
  45 void SingleWriterSynchronizer::synchronize() {
  46   // Side-effect in assert balanced by debug-only dec at end.
  47   assert(Atomic::add(1u, &_writers) == 1u, "multiple writers");
  48   // We don't know anything about the muxing between this invocation
  49   // and invocations in other threads.  We must start with the latest
  50   // _enter polarity, else we could clobber the wrong _exit value on
  51   // the first iteration.  So fence to ensure everything here follows
  52   // whatever muxing was used.
  53   OrderAccess::fence();
  54   uint value = _enter;
  55   // (1) Determine the old and new exit counters, based on the
  56   // polarity (bit0 value) of the on-entry enter counter.
  57   volatile uint* new_ptr = &_exit[(value + 1) & 1];
  58   // (2) Change the in-use exit counter to the new counter, by adding
  59   // 1 to the enter counter (flipping the polarity), meanwhile
  60   // "simultaneously" initializing the new exit counter to that enter
  61   // value.  Note: The new exit counter is not being used by read
  62   // operations until this change of _enter succeeds.
  63   uint old;
  64   do {
  65     old = value;
  66     *new_ptr = ++value;
  67     value = Atomic::cmpxchg(value, &_enter, old);
  68   } while (old != value);
  69   // Critical sections entered before we changed the polarity will use
  70   // the old exit counter.  Critical sections entered after the change
  71   // will use the new exit counter.
  72   volatile uint* old_ptr = &_exit[old & 1];
  73   assert(old_ptr != new_ptr, "invariant");
  74   // (3) Inform threads in in-progress critical sections that there is
  75   // a pending synchronize waiting.  The thread that completes the
  76   // request (_exit value == old) will signal the _wakeup semaphore to
  77   // allow us to proceed.
  78   _waiting_for = old;
  79   // Write of _waiting_for must precede read of _exit and associated
  80   // conditional semaphore wait.  If they were re-ordered then a
  81   // critical section exit could miss the wakeup request, failing to
  82   // signal us while we're waiting.
  83   OrderAccess::fence();
  84   // (4) Wait for all the critical sections started before the change
  85   // to complete, e.g. for the value of old_ptr to catch up with old.
  86   // Loop because there could be pending wakeups unrelated to this
  87   // synchronize request.


  27 #include "runtime/orderAccess.hpp"
  28 #include "runtime/os.hpp"
  29 #include "utilities/debug.hpp"
  30 #include "utilities/singleWriterSynchronizer.hpp"
  31 #include "utilities/macros.hpp"
  32 
  33 SingleWriterSynchronizer::SingleWriterSynchronizer() :
  34   _enter(0),
  35   _exit(),
  36   // The initial value of 1 for _waiting_for puts it on the inactive
  37   // track, so no thread exiting a critical section will match it.
  38   _waiting_for(1),
  39   _wakeup()
  40   DEBUG_ONLY(COMMA _writers(0))
  41 {}
  42 
  43 // Wait until all threads that entered a critical section before
  44 // synchronization have exited that critical section.
  45 void SingleWriterSynchronizer::synchronize() {
  46   // Side-effect in assert balanced by debug-only dec at end.
  47   assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers");
  48   // We don't know anything about the muxing between this invocation
  49   // and invocations in other threads.  We must start with the latest
  50   // _enter polarity, else we could clobber the wrong _exit value on
  51   // the first iteration.  So fence to ensure everything here follows
  52   // whatever muxing was used.
  53   OrderAccess::fence();
  54   uint value = _enter;
  55   // (1) Determine the old and new exit counters, based on the
  56   // polarity (bit0 value) of the on-entry enter counter.
  57   volatile uint* new_ptr = &_exit[(value + 1) & 1];
  58   // (2) Change the in-use exit counter to the new counter, by adding
  59   // 1 to the enter counter (flipping the polarity), meanwhile
  60   // "simultaneously" initializing the new exit counter to that enter
  61   // value.  Note: The new exit counter is not being used by read
  62   // operations until this change of _enter succeeds.
  63   uint old;
  64   do {
  65     old = value;
  66     *new_ptr = ++value;
  67     value = Atomic::cmpxchg(&_enter, old, value);
  68   } while (old != value);
  69   // Critical sections entered before we changed the polarity will use
  70   // the old exit counter.  Critical sections entered after the change
  71   // will use the new exit counter.
  72   volatile uint* old_ptr = &_exit[old & 1];
  73   assert(old_ptr != new_ptr, "invariant");
  74   // (3) Inform threads in in-progress critical sections that there is
  75   // a pending synchronize waiting.  The thread that completes the
  76   // request (_exit value == old) will signal the _wakeup semaphore to
  77   // allow us to proceed.
  78   _waiting_for = old;
  79   // Write of _waiting_for must precede read of _exit and associated
  80   // conditional semaphore wait.  If they were re-ordered then a
  81   // critical section exit could miss the wakeup request, failing to
  82   // signal us while we're waiting.
  83   OrderAccess::fence();
  84   // (4) Wait for all the critical sections started before the change
  85   // to complete, e.g. for the value of old_ptr to catch up with old.
  86   // Loop because there could be pending wakeups unrelated to this
  87   // synchronize request.
< prev index next >