101 }
102
103 BufferNode::Allocator::~Allocator() {
104 delete_list(_free_list.pop_all());
105 delete_list(_pending_list.pop_all());
106 }
107
108 void BufferNode::Allocator::delete_list(BufferNode* list) {
109 while (list != NULL) {
110 BufferNode* next = list->next();
111 DEBUG_ONLY(list->set_next(NULL);)
112 BufferNode::deallocate(list);
113 list = next;
114 }
115 }
116
117 size_t BufferNode::Allocator::free_count() const {
118 return Atomic::load(&_free_count);
119 }
120
121 const size_t size_zero = 0;
122 const size_t size_one = 1;
123
124 BufferNode* BufferNode::Allocator::allocate() {
125 BufferNode* node;
126 {
127 // Protect against ABA; see release().
128 GlobalCounter::CriticalSection cs(Thread::current());
129 node = _free_list.pop();
130 }
131 if (node == NULL) {
132 node = BufferNode::allocate(_buffer_size);
133 } else {
134 // Decrement count after getting buffer from free list. This, along
135 // with incrementing count before adding to free list, ensures count
136 // never underflows.
137 size_t count = Atomic::sub(size_one, &_free_count);
138 assert((count + 1) != 0, "_free_count underflow");
139 }
140 return node;
141 }
142
143 // To solve the ABA problem for lock-free stack pop, allocate does the
144 // pop inside a critical section, and release synchronizes on the
145 // critical sections before adding to the _free_list. But we don't
146 // want to make every release have to do a synchronize. Instead, we
147 // initially place released nodes on the _pending_list, and transfer
148 // them to the _free_list in batches. Only one transfer at a time is
149 // permitted, with a lock bit to control access to that phase. A
150 // transfer takes all the nodes from the _pending_list, synchronizes on
151 // the _free_list pops, and then adds the former pending nodes to the
152 // _free_list. While that's happening, other threads might be adding
153 // other nodes to the _pending_list, to be dealt with by some later
154 // transfer.
155 void BufferNode::Allocator::release(BufferNode* node) {
156 assert(node != NULL, "precondition");
157 assert(node->next() == NULL, "precondition");
158
159 // Desired minimum transfer batch size. There is relatively little
160 // importance to the specific number. It shouldn't be too big, else
161 // we're wasting space when the release rate is low. If the release
162 // rate is high, we might accumulate more than this before being
163 // able to start a new transfer, but that's okay. Also note that
164 // the allocation rate and the release rate are going to be fairly
165 // similar, due to how the buffers are used.
166 const size_t trigger_transfer = 10;
167
168 // Add to pending list. Update count first so no underflow in transfer.
169 size_t pending_count = Atomic::add(size_one, &_pending_count);
170 _pending_list.push(*node);
171 if (pending_count > trigger_transfer) {
172 try_transfer_pending();
173 }
174 }
175
176 // Try to transfer nodes from _pending_list to _free_list, with a
177 // synchronization delay for any in-progress pops from the _free_list,
178 // to solve ABA there. Return true if performed a (possibly empty)
179 // transfer, false if blocked from doing so by some other thread's
180 // in-progress transfer.
181 bool BufferNode::Allocator::try_transfer_pending() {
182 // Attempt to claim the lock.
183 if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
184 Atomic::cmpxchg(true, &_transfer_lock, false)) {
185 return false;
186 }
187 // Have the lock; perform the transfer.
188
189 // Claim all the pending nodes.
|
101 }
102
103 BufferNode::Allocator::~Allocator() {
104 delete_list(_free_list.pop_all());
105 delete_list(_pending_list.pop_all());
106 }
107
108 void BufferNode::Allocator::delete_list(BufferNode* list) {
109 while (list != NULL) {
110 BufferNode* next = list->next();
111 DEBUG_ONLY(list->set_next(NULL);)
112 BufferNode::deallocate(list);
113 list = next;
114 }
115 }
116
117 size_t BufferNode::Allocator::free_count() const {
118 return Atomic::load(&_free_count);
119 }
120
121 BufferNode* BufferNode::Allocator::allocate() {
122 BufferNode* node;
123 {
124 // Protect against ABA; see release().
125 GlobalCounter::CriticalSection cs(Thread::current());
126 node = _free_list.pop();
127 }
128 if (node == NULL) {
129 node = BufferNode::allocate(_buffer_size);
130 } else {
131 // Decrement count after getting buffer from free list. This, along
132 // with incrementing count before adding to free list, ensures count
133 // never underflows.
134 size_t count = Atomic::sub(1u, &_free_count);
135 assert((count + 1) != 0, "_free_count underflow");
136 }
137 return node;
138 }
139
140 // To solve the ABA problem for lock-free stack pop, allocate does the
141 // pop inside a critical section, and release synchronizes on the
142 // critical sections before adding to the _free_list. But we don't
143 // want to make every release have to do a synchronize. Instead, we
144 // initially place released nodes on the _pending_list, and transfer
145 // them to the _free_list in batches. Only one transfer at a time is
146 // permitted, with a lock bit to control access to that phase. A
147 // transfer takes all the nodes from the _pending_list, synchronizes on
148 // the _free_list pops, and then adds the former pending nodes to the
149 // _free_list. While that's happening, other threads might be adding
150 // other nodes to the _pending_list, to be dealt with by some later
151 // transfer.
152 void BufferNode::Allocator::release(BufferNode* node) {
153 assert(node != NULL, "precondition");
154 assert(node->next() == NULL, "precondition");
155
156 // Desired minimum transfer batch size. There is relatively little
157 // importance to the specific number. It shouldn't be too big, else
158 // we're wasting space when the release rate is low. If the release
159 // rate is high, we might accumulate more than this before being
160 // able to start a new transfer, but that's okay. Also note that
161 // the allocation rate and the release rate are going to be fairly
162 // similar, due to how the buffers are used.
163 const size_t trigger_transfer = 10;
164
165 // Add to pending list. Update count first so no underflow in transfer.
166 size_t pending_count = Atomic::add(1u, &_pending_count);
167 _pending_list.push(*node);
168 if (pending_count > trigger_transfer) {
169 try_transfer_pending();
170 }
171 }
172
173 // Try to transfer nodes from _pending_list to _free_list, with a
174 // synchronization delay for any in-progress pops from the _free_list,
175 // to solve ABA there. Return true if performed a (possibly empty)
176 // transfer, false if blocked from doing so by some other thread's
177 // in-progress transfer.
178 bool BufferNode::Allocator::try_transfer_pending() {
179 // Attempt to claim the lock.
180 if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
181 Atomic::cmpxchg(true, &_transfer_lock, false)) {
182 return false;
183 }
184 // Have the lock; perform the transfer.
185
186 // Claim all the pending nodes.
|