1 /*
2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/ptrQueue.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "runtime/atomic.hpp"
30 #include "runtime/mutex.hpp"
31 #include "runtime/mutexLocker.hpp"
32 #include "runtime/thread.inline.hpp"
33
34 #include <new>
35
36 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
37 _qset(qset),
38 _active(active),
39 _permanent(permanent),
40 _index(0),
41 _capacity_in_bytes(0),
42 _buf(NULL),
43 _lock(NULL)
44 {}
45
46 PtrQueue::~PtrQueue() {
47 assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
48 }
49
50 void PtrQueue::flush_impl() {
51 if (_buf != NULL) {
52 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
68 }
69
70 assert(_buf != NULL, "postcondition");
71 assert(index() > 0, "postcondition");
72 assert(index() <= capacity(), "invariant");
73 _index -= _element_size;
74 _buf[index()] = ptr;
75 }
76
77 BufferNode* BufferNode::allocate(size_t size) {
78 size_t byte_size = size * sizeof(void*);
79 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
80 return new (data) BufferNode;
81 }
82
83 void BufferNode::deallocate(BufferNode* node) {
84 node->~BufferNode();
85 FREE_C_HEAP_ARRAY(char, node);
86 }
87
88 BufferNode::Allocator::Allocator(size_t buffer_size, Mutex* lock) :
89 _buffer_size(buffer_size),
90 _lock(lock),
91 _free_list(NULL),
92 _free_count(0)
93 {
94 assert(lock != NULL, "precondition");
95 }
96
97 BufferNode::Allocator::~Allocator() {
98 while (_free_list != NULL) {
99 BufferNode* node = _free_list;
100 _free_list = node->next();
101 BufferNode::deallocate(node);
102 }
103 }
104
105 size_t BufferNode::Allocator::free_count() const {
106 return Atomic::load(&_free_count);
107 }
108
109 BufferNode* BufferNode::Allocator::allocate() {
110 BufferNode* node = NULL;
111 {
112 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
113 node = _free_list;
114 if (node != NULL) {
115 _free_list = node->next();
116 --_free_count;
117 node->set_next(NULL);
118 node->set_index(0);
119 return node;
120 }
121 }
122 return BufferNode::allocate(_buffer_size);
123 }
124
125 void BufferNode::Allocator::release(BufferNode* node) {
126 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
127 node->set_next(_free_list);
128 _free_list = node;
129 ++_free_count;
130 }
131
132 void BufferNode::Allocator::reduce_free_list() {
133 BufferNode* head = NULL;
134 {
135 MutexLockerEx ml(_lock, Mutex::_no_safepoint_check_flag);
136 // For now, delete half.
137 size_t remove = _free_count / 2;
138 if (remove > 0) {
139 head = _free_list;
140 BufferNode* tail = head;
141 BufferNode* prev = NULL;
142 for (size_t i = 0; i < remove; ++i) {
143 assert(tail != NULL, "free list size is wrong");
144 prev = tail;
145 tail = tail->next();
146 }
147 assert(prev != NULL, "invariant");
148 assert(prev->next() == tail, "invariant");
149 prev->set_next(NULL);
150 _free_list = tail;
151 _free_count -= remove;
152 }
153 }
154 while (head != NULL) {
155 BufferNode* next = head->next();
156 BufferNode::deallocate(head);
157 head = next;
158 }
159 }
160
161 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
162 _allocator(NULL),
163 _cbl_mon(NULL),
164 _completed_buffers_head(NULL),
165 _completed_buffers_tail(NULL),
166 _n_completed_buffers(0),
167 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
168 _process_completed_buffers(false),
169 _notify_when_complete(notify_when_complete),
170 _max_completed_buffers(MaxCompletedBuffersUnlimited),
171 _completed_buffers_padding(0),
172 _all_active(false)
173 {}
174
175 PtrQueueSet::~PtrQueueSet() {
176 // There are presently only a couple (derived) instances ever
177 // created, and they are permanent, so no harm currently done by
178 // doing nothing here.
|
1 /*
2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/shared/ptrQueue.hpp"
27 #include "logging/log.hpp"
28 #include "memory/allocation.hpp"
29 #include "memory/allocation.inline.hpp"
30 #include "runtime/atomic.hpp"
31 #include "runtime/mutex.hpp"
32 #include "runtime/mutexLocker.hpp"
33 #include "runtime/orderAccess.hpp"
34 #include "runtime/thread.inline.hpp"
35 #include "utilities/globalCounter.inline.hpp"
36
37 #include <new>
38
39 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
40 _qset(qset),
41 _active(active),
42 _permanent(permanent),
43 _index(0),
44 _capacity_in_bytes(0),
45 _buf(NULL),
46 _lock(NULL)
47 {}
48
49 PtrQueue::~PtrQueue() {
50 assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
51 }
52
53 void PtrQueue::flush_impl() {
54 if (_buf != NULL) {
55 BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
71 }
72
73 assert(_buf != NULL, "postcondition");
74 assert(index() > 0, "postcondition");
75 assert(index() <= capacity(), "invariant");
76 _index -= _element_size;
77 _buf[index()] = ptr;
78 }
79
80 BufferNode* BufferNode::allocate(size_t size) {
81 size_t byte_size = size * sizeof(void*);
82 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
83 return new (data) BufferNode;
84 }
85
86 void BufferNode::deallocate(BufferNode* node) {
87 node->~BufferNode();
88 FREE_C_HEAP_ARRAY(char, node);
89 }
90
91 BufferNode::Allocator::Allocator(const char* name, size_t buffer_size) :
92 _buffer_size(buffer_size),
93 _pending_list(),
94 _free_list(),
95 _pending_count(0),
96 _free_count(0),
97 _transfer_lock(false)
98 {
99 strncpy(_name, name, sizeof(_name));
100 _name[sizeof(_name) - 1] = '\0';
101 }
102
103 BufferNode::Allocator::~Allocator() {
104 delete_list(_free_list.pop_all());
105 delete_list(_pending_list.pop_all());
106 }
107
108 void BufferNode::Allocator::delete_list(BufferNode* list) {
109 while (list != NULL) {
110 BufferNode* next = list->next();
111 DEBUG_ONLY(list->set_next(NULL);)
112 BufferNode::deallocate(list);
113 list = next;
114 }
115 }
116
117 size_t BufferNode::Allocator::free_count() const {
118 return Atomic::load(&_free_count);
119 }
120
121 BufferNode* BufferNode::Allocator::allocate() {
122 BufferNode* node;
123 {
124 // Protect against ABA; see release().
125 GlobalCounter::CriticalSection cs(Thread::current());
126 node = _free_list.pop();
127 }
128 if (node == NULL) {
129 node = BufferNode::allocate(_buffer_size);
130 } else {
131 // Decrement count after getting buffer from free list. This, along
132 // with incrementing count before adding to free list, ensures count
133 // never underflows.
134 size_t count = Atomic::sub(1u, &_free_count);
135 assert((count + 1) != 0, "_free_count underflow");
136 }
137 return node;
138 }
139
140 // To solve the ABA problem for lock-free stack pop, allocate does the
141 // pop inside a critical section, and release synchronizes on the
142 // critical sections before adding to the _free_list. But we don't
143 // want to make every release have to do a synchronize. Instead, we
144 // initially place released nodes on the _pending_list, and transfer
145 // them to the _free_list in batches. Only one transfer at a time is
146 // permitted, with a lock bit to control access to that phase. A
147 // transfer takes all the nodes from the _pending_list, synchronizes on
148 // the _free_list pops, and then adds the former pending nodes to the
149 // _free_list. While that's happening, other threads might be adding
150 // other nodes to the _pending_list, to be dealt with by some later
151 // transfer.
152 void BufferNode::Allocator::release(BufferNode* node) {
153 assert(node != NULL, "precondition");
154 assert(node->next() == NULL, "precondition");
155
156 // Desired minimum transfer batch size. There is relatively little
157 // importance to the specific number. It shouldn't be too big, else
158 // we're wasting space when the release rate is low. If the release
159 // rate is high, we might accumulate more than this before being
160 // able to start a new transfer, but that's okay. Also note that
161 // the allocation rate and the release rate are going to be fairly
162 // similar, due to how the buffers are used.
163 const size_t trigger_transfer = 10;
164
165 // Add to pending list. Update count first so no underflow in transfer.
166 size_t pending_count = Atomic::add(1u, &_pending_count);
167 _pending_list.push(*node);
168 if (pending_count > trigger_transfer) {
169 try_transfer_pending();
170 }
171 }
172
173 // Try to transfer nodes from _pending_list to _free_list, with a
174 // synchronization delay for any in-progress pops from the _free_list,
175 // to solve ABA there. Return true if performed a (possibly empty)
176 // transfer, false if blocked from doing so by some other thread's
177 // in-progress transfer.
178 bool BufferNode::Allocator::try_transfer_pending() {
179 // Attempt to claim the lock.
180 if (Atomic::load(&_transfer_lock) || // Skip CAS if likely to fail.
181 Atomic::cmpxchg(true, &_transfer_lock, false)) {
182 return false;
183 }
184 // Have the lock; perform the transfer.
185
186 // Claim all the pending nodes.
187 BufferNode* first = _pending_list.pop_all();
188 if (first != NULL) {
189 // Prepare to add the claimed nodes, and update _pending_count.
190 BufferNode* last = first;
191 size_t count = 1;
192 for (BufferNode* next = first->next(); next != NULL; next = next->next()) {
193 last = next;
194 ++count;
195 }
196 Atomic::sub(count, &_pending_count);
197
198 // Wait for any in-progress pops, to avoid ABA for them.
199 GlobalCounter::write_synchronize();
200
201 // Add synchronized nodes to _free_list.
202 // Update count first so no underflow in allocate().
203 Atomic::add(count, &_free_count);
204 _free_list.prepend(*first, *last);
205 log_trace(gc, ptrqueue, freelist)
206 ("Transferred %s pending to free: " SIZE_FORMAT, name(), count);
207 }
208 OrderAccess::release_store(&_transfer_lock, false);
209 return true;
210 }
211
212 size_t BufferNode::Allocator::reduce_free_list(size_t remove_goal) {
213 try_transfer_pending();
214 size_t removed = 0;
215 for ( ; removed < remove_goal; ++removed) {
216 BufferNode* node = _free_list.pop();
217 if (node == NULL) break;
218 BufferNode::deallocate(node);
219 }
220 size_t new_count = Atomic::sub(removed, &_free_count);
221 log_debug(gc, ptrqueue, freelist)
222 ("Reduced %s free list by " SIZE_FORMAT " to " SIZE_FORMAT,
223 name(), removed, new_count);
224 return removed;
225 }
226
227 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
228 _allocator(NULL),
229 _cbl_mon(NULL),
230 _completed_buffers_head(NULL),
231 _completed_buffers_tail(NULL),
232 _n_completed_buffers(0),
233 _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
234 _process_completed_buffers(false),
235 _notify_when_complete(notify_when_complete),
236 _max_completed_buffers(MaxCompletedBuffersUnlimited),
237 _completed_buffers_padding(0),
238 _all_active(false)
239 {}
240
241 PtrQueueSet::~PtrQueueSet() {
242 // There are presently only a couple (derived) instances ever
243 // created, and they are permanent, so no harm currently done by
244 // doing nothing here.
|