< prev index next >

src/hotspot/share/gc/shared/ptrQueue.cpp

Print this page




 180   // the allocation rate and the release rate are going to be fairly
 181   // similar, due to how the buffers are used.
 182   const size_t trigger_transfer = 10;
 183 
 184   // Add to pending list. Update count first so no underflow in transfer.
 185   size_t pending_count = Atomic::add(&_pending_count, 1u);
 186   _pending_list.push(*node);
 187   if (pending_count > trigger_transfer) {
 188     try_transfer_pending();
 189   }
 190 }
 191 
 192 // Try to transfer nodes from _pending_list to _free_list, with a
 193 // synchronization delay for any in-progress pops from the _free_list,
 194 // to solve ABA there.  Return true if performed a (possibly empty)
 195 // transfer, false if blocked from doing so by some other thread's
 196 // in-progress transfer.
 197 bool BufferNode::Allocator::try_transfer_pending() {
 198   // Attempt to claim the lock.
 199   if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
 200       Atomic::cmpxchg(true, &_transfer_lock, false)) {
 201     return false;
 202   }
 203   // Have the lock; perform the transfer.
 204 
 205   // Claim all the pending nodes.
 206   BufferNode* first = _pending_list.pop_all();
 207   if (first != NULL) {
 208     // Prepare to add the claimed nodes, and update _pending_count.
 209     BufferNode* last = first;
 210     size_t count = 1;
 211     for (BufferNode* next = first->next(); next != NULL; next = next->next()) {
 212       last = next;
 213       ++count;
 214     }
 215     Atomic::sub(&_pending_count, count);
 216 
 217     // Wait for any in-progress pops, to avoid ABA for them.
 218     GlobalCounter::write_synchronize();
 219 
 220     // Add synchronized nodes to _free_list.




 180   // the allocation rate and the release rate are going to be fairly
 181   // similar, due to how the buffers are used.
 182   const size_t trigger_transfer = 10;
 183 
 184   // Add to pending list. Update count first so no underflow in transfer.
 185   size_t pending_count = Atomic::add(&_pending_count, 1u);
 186   _pending_list.push(*node);
 187   if (pending_count > trigger_transfer) {
 188     try_transfer_pending();
 189   }
 190 }
 191 
 192 // Try to transfer nodes from _pending_list to _free_list, with a
 193 // synchronization delay for any in-progress pops from the _free_list,
 194 // to solve ABA there.  Return true if performed a (possibly empty)
 195 // transfer, false if blocked from doing so by some other thread's
 196 // in-progress transfer.
 197 bool BufferNode::Allocator::try_transfer_pending() {
 198   // Attempt to claim the lock.
 199   if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
 200       Atomic::cmpxchg(&_transfer_lock, false, true)) {
 201     return false;
 202   }
 203   // Have the lock; perform the transfer.
 204 
 205   // Claim all the pending nodes.
 206   BufferNode* first = _pending_list.pop_all();
 207   if (first != NULL) {
 208     // Prepare to add the claimed nodes, and update _pending_count.
 209     BufferNode* last = first;
 210     size_t count = 1;
 211     for (BufferNode* next = first->next(); next != NULL; next = next->next()) {
 212       last = next;
 213       ++count;
 214     }
 215     Atomic::sub(&_pending_count, count);
 216 
 217     // Wait for any in-progress pops, to avoid ABA for them.
 218     GlobalCounter::write_synchronize();
 219 
 220     // Add synchronized nodes to _free_list.


< prev index next >