< prev index next >

src/hotspot/share/gc/shared/ptrQueue.cpp

Print this page




 207   if (first != NULL) {
 208     // Prepare to add the claimed nodes, and update _pending_count.
 209     BufferNode* last = first;
 210     size_t count = 1;
 211     for (BufferNode* next = first->next(); next != NULL; next = next->next()) {
 212       last = next;
 213       ++count;
 214     }
 215     Atomic::sub(count, &_pending_count);
 216 
 217     // Wait for any in-progress pops, to avoid ABA for them.
 218     GlobalCounter::write_synchronize();
 219 
 220     // Add synchronized nodes to _free_list.
 221     // Update count first so no underflow in allocate().
 222     Atomic::add(count, &_free_count);
 223     _free_list.prepend(*first, *last);
 224     log_trace(gc, ptrqueue, freelist)
 225              ("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
 226   }
 227   OrderAccess::release_store(&_transfer_lock, false);
 228   return true;
 229 }
 230 
 231 size_t BufferNode::Allocator::reduce_free_list(size_t remove_goal) {
 232   try_transfer_pending();
 233   size_t removed = 0;
 234   for ( ; removed < remove_goal; ++removed) {
 235     BufferNode* node = _free_list.pop();
 236     if (node == NULL) break;
 237     BufferNode::deallocate(node);
 238   }
 239   size_t new_count = Atomic::sub(removed, &_free_count);
 240   log_debug(gc, ptrqueue, freelist)
 241            ("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT,
 242             name(), removed, new_count);
 243   return removed;
 244 }
 245 
 246 PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) :
 247   _allocator(allocator),


 207   if (first != NULL) {
 208     // Prepare to add the claimed nodes, and update _pending_count.
 209     BufferNode* last = first;
 210     size_t count = 1;
 211     for (BufferNode* next = first->next(); next != NULL; next = next->next()) {
 212       last = next;
 213       ++count;
 214     }
 215     Atomic::sub(count, &_pending_count);
 216 
 217     // Wait for any in-progress pops, to avoid ABA for them.
 218     GlobalCounter::write_synchronize();
 219 
 220     // Add synchronized nodes to _free_list.
 221     // Update count first so no underflow in allocate().
 222     Atomic::add(count, &_free_count);
 223     _free_list.prepend(*first, *last);
 224     log_trace(gc, ptrqueue, freelist)
 225              ("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
 226   }
 227   Atomic::release_store(&_transfer_lock, false);
 228   return true;
 229 }
 230 
 231 size_t BufferNode::Allocator::reduce_free_list(size_t remove_goal) {
 232   try_transfer_pending();
 233   size_t removed = 0;
 234   for ( ; removed < remove_goal; ++removed) {
 235     BufferNode* node = _free_list.pop();
 236     if (node == NULL) break;
 237     BufferNode::deallocate(node);
 238   }
 239   size_t new_count = Atomic::sub(removed, &_free_count);
 240   log_debug(gc, ptrqueue, freelist)
 241            ("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT,
 242             name(), removed, new_count);
 243   return removed;
 244 }
 245 
 246 PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) :
 247   _allocator(allocator),
< prev index next >