26 #include "gc/g1/ptrQueue.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "runtime/mutex.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "runtime/thread.inline.hpp"
32
33 #include <new>
34
35 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
36 _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
37 _permanent(permanent), _lock(NULL)
38 {}
39
40 PtrQueue::~PtrQueue() {
41 assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
42 }
43
44 void PtrQueue::flush_impl() {
45 if (!_permanent && _buf != NULL) {
46 if (_index == _sz) {
47 // No work to do.
48 qset()->deallocate_buffer(_buf);
49 } else {
50 // We must NULL out the unused entries, then enqueue.
51 size_t limit = byte_index_to_index(_index);
52 for (size_t i = 0; i < limit; ++i) {
53 _buf[i] = NULL;
54 }
55 qset()->enqueue_complete_buffer(_buf);
56 }
57 _buf = NULL;
58 _index = 0;
59 }
60 }
61
62
63 void PtrQueue::enqueue_known_active(void* ptr) {
64 assert(_index <= _sz, "Invariant.");
65 assert(_index == 0 || _buf != NULL, "invariant");
66
67 while (_index == 0) {
68 handle_zero_index();
69 }
70
71 assert(_index > 0, "postcondition");
72 _index -= sizeof(void*);
73 _buf[byte_index_to_index(_index)] = ptr;
74 assert(_index <= _sz, "Invariant.");
75 }
76
77 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
78 assert(_lock->owned_by_self(), "Required.");
79
80 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
81 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
82 // have the same rank and we may get the "possible deadlock" message
83 _lock->unlock();
84
85 qset()->enqueue_complete_buffer(buf);
86 // We must relock only because the caller will unlock, for the normal
87 // case.
88 _lock->lock_without_safepoint_check();
89 }
90
91
92 BufferNode* BufferNode::allocate(size_t byte_size) {
93 assert(byte_size > 0, "precondition");
94 assert(is_size_aligned(byte_size, sizeof(void**)),
95 "Invalid buffer size " SIZE_FORMAT, byte_size);
96 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
97 return new (data) BufferNode;
98 }
99
100 void BufferNode::deallocate(BufferNode* node) {
101 node->~BufferNode();
102 FREE_C_HEAP_ARRAY(char, node);
103 }
104
105 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
140 assert(_sz > 0, "Didn't set a buffer size.");
141 BufferNode* node = NULL;
142 {
143 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
144 node = _fl_owner->_buf_free_list;
145 if (node != NULL) {
146 _fl_owner->_buf_free_list = node->next();
147 _fl_owner->_buf_free_list_sz--;
148 }
149 }
150 if (node == NULL) {
151 node = BufferNode::allocate(_sz);
152 } else {
153 // Reinitialize buffer obtained from free list.
154 node->set_index(0);
155 node->set_next(NULL);
156 }
157 return BufferNode::make_buffer_from_node(node);
158 }
159
160 void PtrQueueSet::deallocate_buffer(void** buf) {
161 assert(_sz > 0, "Didn't set a buffer size.");
162 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
163 BufferNode *node = BufferNode::make_node_from_buffer(buf);
164 node->set_next(_fl_owner->_buf_free_list);
165 _fl_owner->_buf_free_list = node;
166 _fl_owner->_buf_free_list_sz++;
167 }
168
169 void PtrQueueSet::reduce_free_list() {
170 assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
171 // For now we'll adopt the strategy of deleting half.
172 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
173 size_t n = _buf_free_list_sz / 2;
174 for (size_t i = 0; i < n; ++i) {
175 assert(_buf_free_list != NULL,
176 "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
177 BufferNode* node = _buf_free_list;
178 _buf_free_list = node->next();
179 _buf_free_list_sz--;
180 BufferNode::deallocate(node);
181 }
182 }
183
194
195 if (_lock) {
196 assert(_lock->owned_by_self(), "Required.");
197
198 // The current PtrQ may be the shared dirty card queue and
199 // may be being manipulated by more than one worker thread
200 // during a pause. Since the enqueueing of the completed
201 // buffer unlocks the Shared_DirtyCardQ_lock more than one
202 // worker thread can 'race' on reading the shared queue attributes
203 // (_buf and _index) and multiple threads can call into this
204 // routine for the same buffer. This will cause the completed
205 // buffer to be added to the CBL multiple times.
206
207 // We "claim" the current buffer by caching value of _buf in
208 // a local and clearing the field while holding _lock. When
209 // _lock is released (while enqueueing the completed buffer)
210 // the thread that acquires _lock will skip this code,
211 // preventing the subsequent the multiple enqueue, and
212 // install a newly allocated buffer below.
213
214 void** buf = _buf; // local pointer to completed buffer
215 _buf = NULL; // clear shared _buf field
216
217 locking_enqueue_completed_buffer(buf); // enqueue completed buffer
218
219 // While the current thread was enqueueing the buffer another thread
220 // may have a allocated a new buffer and inserted it into this pointer
221 // queue. If that happens then we just return so that the current
222 // thread doesn't overwrite the buffer allocated by the other thread
223 // and potentially losing some dirtied cards.
224
225 if (_buf != NULL) return;
226 } else {
227 if (qset()->process_or_enqueue_complete_buffer(_buf)) {
228 // Recycle the buffer. No allocation.
229 _sz = qset()->buffer_size();
230 _index = _sz;
231 return;
232 }
233 }
234 }
235 // Reallocate the buffer
236 _buf = qset()->allocate_buffer();
237 _sz = qset()->buffer_size();
238 _index = _sz;
239 }
240
241 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
242 if (Thread::current()->is_Java_thread()) {
243 // We don't lock. It is fine to be epsilon-precise here.
244 if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
245 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
246 bool b = mut_process_buffer(buf);
247 if (b) {
248 // True here means that the buffer hasn't been deallocated and the caller may reuse it.
249 return true;
250 }
251 }
252 }
253 // The buffer will be enqueued. The caller will have to get a new one.
254 enqueue_complete_buffer(buf);
255 return false;
256 }
257
258 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
259 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
260 BufferNode* cbn = BufferNode::make_node_from_buffer(buf);
261 cbn->set_index(index);
262 cbn->set_next(NULL);
263 if (_completed_buffers_tail == NULL) {
264 assert(_completed_buffers_head == NULL, "Well-formedness");
265 _completed_buffers_head = cbn;
266 _completed_buffers_tail = cbn;
267 } else {
268 _completed_buffers_tail->set_next(cbn);
269 _completed_buffers_tail = cbn;
270 }
271 _n_completed_buffers++;
272
273 if (!_process_completed && _process_completed_threshold >= 0 &&
274 _n_completed_buffers >= (size_t)_process_completed_threshold) {
275 _process_completed = true;
276 if (_notify_when_complete) {
277 _cbl_mon->notify();
278 }
279 }
280 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
281 }
|
26 #include "gc/g1/ptrQueue.hpp"
27 #include "memory/allocation.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "runtime/mutex.hpp"
30 #include "runtime/mutexLocker.hpp"
31 #include "runtime/thread.inline.hpp"
32
33 #include <new>
34
35 PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
36 _qset(qset), _buf(NULL), _index(0), _sz(0), _active(active),
37 _permanent(permanent), _lock(NULL)
38 {}
39
40 PtrQueue::~PtrQueue() {
41 assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
42 }
43
44 void PtrQueue::flush_impl() {
45 if (!_permanent && _buf != NULL) {
46 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
47 if (is_empty()) {
48 // No work to do.
49 qset()->deallocate_buffer(node);
50 } else {
51 qset()->enqueue_complete_buffer(node);
52 }
53 _buf = NULL;
54 _index = 0;
55 }
56 }
57
58
59 void PtrQueue::enqueue_known_active(void* ptr) {
60 assert(_index <= _sz, "Invariant.");
61 assert(_index == 0 || _buf != NULL, "invariant");
62
63 while (_index == 0) {
64 handle_zero_index();
65 }
66
67 assert(_index > 0, "postcondition");
68 _index -= sizeof(void*);
69 _buf[byte_index_to_index(_index)] = ptr;
70 assert(_index <= _sz, "Invariant.");
71 }
72
73 void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
74 assert(_lock->owned_by_self(), "Required.");
75
76 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
77 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
78 // have the same rank and we may get the "possible deadlock" message
79 _lock->unlock();
80
81 qset()->enqueue_complete_buffer(node);
82 // We must relock only because the caller will unlock, for the normal
83 // case.
84 _lock->lock_without_safepoint_check();
85 }
86
87
88 BufferNode* BufferNode::allocate(size_t byte_size) {
89 assert(byte_size > 0, "precondition");
90 assert(is_size_aligned(byte_size, sizeof(void**)),
91 "Invalid buffer size " SIZE_FORMAT, byte_size);
92 void* data = NEW_C_HEAP_ARRAY(char, buffer_offset() + byte_size, mtGC);
93 return new (data) BufferNode;
94 }
95
96 void BufferNode::deallocate(BufferNode* node) {
97 node->~BufferNode();
98 FREE_C_HEAP_ARRAY(char, node);
99 }
100
101 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
136 assert(_sz > 0, "Didn't set a buffer size.");
137 BufferNode* node = NULL;
138 {
139 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
140 node = _fl_owner->_buf_free_list;
141 if (node != NULL) {
142 _fl_owner->_buf_free_list = node->next();
143 _fl_owner->_buf_free_list_sz--;
144 }
145 }
146 if (node == NULL) {
147 node = BufferNode::allocate(_sz);
148 } else {
149 // Reinitialize buffer obtained from free list.
150 node->set_index(0);
151 node->set_next(NULL);
152 }
153 return BufferNode::make_buffer_from_node(node);
154 }
155
156 void PtrQueueSet::deallocate_buffer(BufferNode* node) {
157 assert(_sz > 0, "Didn't set a buffer size.");
158 MutexLockerEx x(_fl_owner->_fl_lock, Mutex::_no_safepoint_check_flag);
159 node->set_next(_fl_owner->_buf_free_list);
160 _fl_owner->_buf_free_list = node;
161 _fl_owner->_buf_free_list_sz++;
162 }
163
164 void PtrQueueSet::reduce_free_list() {
165 assert(_fl_owner == this, "Free list reduction is allowed only for the owner");
166 // For now we'll adopt the strategy of deleting half.
167 MutexLockerEx x(_fl_lock, Mutex::_no_safepoint_check_flag);
168 size_t n = _buf_free_list_sz / 2;
169 for (size_t i = 0; i < n; ++i) {
170 assert(_buf_free_list != NULL,
171 "_buf_free_list_sz is wrong: " SIZE_FORMAT, _buf_free_list_sz);
172 BufferNode* node = _buf_free_list;
173 _buf_free_list = node->next();
174 _buf_free_list_sz--;
175 BufferNode::deallocate(node);
176 }
177 }
178
189
190 if (_lock) {
191 assert(_lock->owned_by_self(), "Required.");
192
193 // The current PtrQ may be the shared dirty card queue and
194 // may be being manipulated by more than one worker thread
195 // during a pause. Since the enqueueing of the completed
196 // buffer unlocks the Shared_DirtyCardQ_lock more than one
197 // worker thread can 'race' on reading the shared queue attributes
198 // (_buf and _index) and multiple threads can call into this
199 // routine for the same buffer. This will cause the completed
200 // buffer to be added to the CBL multiple times.
201
202 // We "claim" the current buffer by caching value of _buf in
203 // a local and clearing the field while holding _lock. When
204 // _lock is released (while enqueueing the completed buffer)
205 // the thread that acquires _lock will skip this code,
206 // preventing the subsequent the multiple enqueue, and
207 // install a newly allocated buffer below.
208
209 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
210 _buf = NULL; // clear shared _buf field
211
212 locking_enqueue_completed_buffer(node); // enqueue completed buffer
213
214 // While the current thread was enqueueing the buffer another thread
215 // may have a allocated a new buffer and inserted it into this pointer
216 // queue. If that happens then we just return so that the current
217 // thread doesn't overwrite the buffer allocated by the other thread
218 // and potentially losing some dirtied cards.
219
220 if (_buf != NULL) return;
221 } else {
222 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
223 if (qset()->process_or_enqueue_complete_buffer(node)) {
224 // Recycle the buffer. No allocation.
225 assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
226 assert(_sz == qset()->buffer_size(), "invariant");
227 _index = _sz;
228 return;
229 }
230 }
231 }
232 // Reallocate the buffer
233 _buf = qset()->allocate_buffer();
234 _sz = qset()->buffer_size();
235 _index = _sz;
236 }
237
238 bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
239 if (Thread::current()->is_Java_thread()) {
240 // We don't lock. It is fine to be epsilon-precise here.
241 if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
242 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
243 bool b = mut_process_buffer(node);
244 if (b) {
245 // True here means that the buffer hasn't been deallocated and the caller may reuse it.
246 return true;
247 }
248 }
249 }
250 // The buffer will be enqueued. The caller will have to get a new one.
251 enqueue_complete_buffer(node);
252 return false;
253 }
254
255 void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
256 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
257 cbn->set_next(NULL);
258 if (_completed_buffers_tail == NULL) {
259 assert(_completed_buffers_head == NULL, "Well-formedness");
260 _completed_buffers_head = cbn;
261 _completed_buffers_tail = cbn;
262 } else {
263 _completed_buffers_tail->set_next(cbn);
264 _completed_buffers_tail = cbn;
265 }
266 _n_completed_buffers++;
267
268 if (!_process_completed && _process_completed_threshold >= 0 &&
269 _n_completed_buffers >= (size_t)_process_completed_threshold) {
270 _process_completed = true;
271 if (_notify_when_complete) {
272 _cbl_mon->notify();
273 }
274 }
275 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
276 }
|