< prev index next >

src/hotspot/share/gc/shared/taskqueue.inline.hpp

Print this page
rev 55920 : 8229169: False failure of GenericTaskQueue::pop_local on architectures with weak memory model
Reviewed-by: mdoerr, kbarrett


 167   // before the store just above.
 168   OrderAccess::fence();
 169   // g++ complains if the volatile result of the assignment is
 170   // unused, so we cast the volatile away.  We cannot cast directly
 171   // to void, because gcc treats that as not using the result of the
 172   // assignment.  However, casting to E& means that we trigger an
 173   // unused-value warning.  So, we cast the E& to void.
 174   (void) const_cast<E&>(t = _elems[localBot]);
 175   // This is a second read of "age"; the "size()" above is the first.
 176   // If there's still at least one element in the queue, based on the
 177   // "_bottom" and "age" we've read, then there can be no interference with
 178   // a "pop_global" operation, and we're done.
 179   idx_t tp = _age.top();    // XXX
 180   if (size(localBot, tp) > 0) {
 181     assert(dirty_size(localBot, tp) != N - 1, "sanity");
 182     TASKQUEUE_STATS_ONLY(stats.record_pop());
 183     return true;
 184   } else {
 185     // Otherwise, the queue contained exactly one element; we take the slow
 186     // path.





 187     return pop_local_slow(localBot, _age.get());
 188   }
 189 }
 190 
 191 template <class E, MEMFLAGS F, unsigned int N>
 192 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
 193 {
 194   if (overflow_empty()) return false;
 195   t = overflow_stack()->pop();
 196   return true;
 197 }
 198 
 199 template<class E, MEMFLAGS F, unsigned int N>
 200 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
 201   Age oldAge = _age.get();
 202   // Architectures with weak memory model require a barrier here
 203   // to guarantee that bottom is not older than age,
 204   // which is crucial for the correctness of the algorithm.
 205 #if !(defined SPARC || defined IA32 || defined AMD64)
 206   OrderAccess::fence();




 167   // before the store just above.
 168   OrderAccess::fence();
 169   // g++ complains if the volatile result of the assignment is
 170   // unused, so we cast the volatile away.  We cannot cast directly
 171   // to void, because gcc treats that as not using the result of the
 172   // assignment.  However, casting to E& means that we trigger an
 173   // unused-value warning.  So, we cast the E& to void.
 174   (void) const_cast<E&>(t = _elems[localBot]);
 175   // This is a second read of "age"; the "size()" above is the first.
 176   // If there's still at least one element in the queue, based on the
 177   // "_bottom" and "age" we've read, then there can be no interference with
 178   // a "pop_global" operation, and we're done.
 179   idx_t tp = _age.top();    // XXX
 180   if (size(localBot, tp) > 0) {
 181     assert(dirty_size(localBot, tp) != N - 1, "sanity");
 182     TASKQUEUE_STATS_ONLY(stats.record_pop());
 183     return true;
 184   } else {
 185     // Otherwise, the queue contained exactly one element; we take the slow
 186     // path.
 187 
 188     // The barrier is required to prevent reordering the two reads of _age:
 189     // one is the _age.get() below, and the other is _age.top() above the if-stmt.
 190     // The algorithm may fail if _age.get() reads an older value than _age.top().
 191     OrderAccess::loadload();
 192     return pop_local_slow(localBot, _age.get());
 193   }
 194 }
 195 
 196 template <class E, MEMFLAGS F, unsigned int N>
 197 bool OverflowTaskQueue<E, F, N>::pop_overflow(E& t)
 198 {
 199   if (overflow_empty()) return false;
 200   t = overflow_stack()->pop();
 201   return true;
 202 }
 203 
 204 template<class E, MEMFLAGS F, unsigned int N>
 205 bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
 206   Age oldAge = _age.get();
 207   // Architectures with weak memory model require a barrier here
 208   // to guarantee that bottom is not older than age,
 209   // which is crucial for the correctness of the algorithm.
 210 #if !(defined SPARC || defined IA32 || defined AMD64)
 211   OrderAccess::fence();


< prev index next >