47 assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers");
48 // We don't know anything about the muxing between this invocation
49 // and invocations in other threads. We must start with the latest
50 // _enter polarity, else we could clobber the wrong _exit value on
51 // the first iteration. So fence to ensure everything here follows
52 // whatever muxing was used.
53 OrderAccess::fence();
54 uint value = _enter;
55 // (1) Determine the old and new exit counters, based on the
56 // polarity (bit0 value) of the on-entry enter counter.
57 volatile uint* new_ptr = &_exit[(value + 1) & 1];
58 // (2) Change the in-use exit counter to the new counter, by adding
59 // 1 to the enter counter (flipping the polarity), meanwhile
60 // "simultaneously" initializing the new exit counter to that enter
61 // value. Note: The new exit counter is not being used by read
62 // operations until this change of _enter succeeds.
63 uint old;
64 do {
65 old = value;
66 *new_ptr = ++value;
67 value = Atomic::cmpxchg(value, &_enter, old);
68 } while (old != value);
69 // Critical sections entered before we changed the polarity will use
70 // the old exit counter. Critical sections entered after the change
71 // will use the new exit counter.
72 volatile uint* old_ptr = &_exit[old & 1];
73 assert(old_ptr != new_ptr, "invariant");
74 // (3) Inform threads in in-progress critical sections that there is
75 // a pending synchronize waiting. The thread that completes the
76 // request (_exit value == old) will signal the _wakeup semaphore to
77 // allow us to proceed.
78 _waiting_for = old;
79 // Write of _waiting_for must precede read of _exit and associated
80 // conditional semaphore wait. If they were re-ordered then a
81 // critical section exit could miss the wakeup request, failing to
82 // signal us while we're waiting.
83 OrderAccess::fence();
84 // (4) Wait for all the critical sections started before the change
85 // to complete, e.g. for the value of old_ptr to catch up with old.
86 // Loop because there could be pending wakeups unrelated to this
87 // synchronize request.
|
47 assert(Atomic::add(&_writers, 1u) == 1u, "multiple writers");
48 // We don't know anything about the muxing between this invocation
49 // and invocations in other threads. We must start with the latest
50 // _enter polarity, else we could clobber the wrong _exit value on
51 // the first iteration. So fence to ensure everything here follows
52 // whatever muxing was used.
53 OrderAccess::fence();
54 uint value = _enter;
55 // (1) Determine the old and new exit counters, based on the
56 // polarity (bit0 value) of the on-entry enter counter.
57 volatile uint* new_ptr = &_exit[(value + 1) & 1];
58 // (2) Change the in-use exit counter to the new counter, by adding
59 // 1 to the enter counter (flipping the polarity), meanwhile
60 // "simultaneously" initializing the new exit counter to that enter
61 // value. Note: The new exit counter is not being used by read
62 // operations until this change of _enter succeeds.
63 uint old;
64 do {
65 old = value;
66 *new_ptr = ++value;
67 value = Atomic::cmpxchg(&_enter, old, value);
68 } while (old != value);
69 // Critical sections entered before we changed the polarity will use
70 // the old exit counter. Critical sections entered after the change
71 // will use the new exit counter.
72 volatile uint* old_ptr = &_exit[old & 1];
73 assert(old_ptr != new_ptr, "invariant");
74 // (3) Inform threads in in-progress critical sections that there is
75 // a pending synchronize waiting. The thread that completes the
76 // request (_exit value == old) will signal the _wakeup semaphore to
77 // allow us to proceed.
78 _waiting_for = old;
79 // Write of _waiting_for must precede read of _exit and associated
80 // conditional semaphore wait. If they were re-ordered then a
81 // critical section exit could miss the wakeup request, failing to
82 // signal us while we're waiting.
83 OrderAccess::fence();
84 // (4) Wait for all the critical sections started before the change
85 // to complete, e.g. for the value of old_ptr to catch up with old.
86 // Loop because there could be pending wakeups unrelated to this
87 // synchronize request.
|