41
42 void PtrQueue::flush_impl() {
43 if (!_perm && _buf != NULL) {
44 if (_index == _sz) {
45 // No work to do.
46 qset()->deallocate_buffer(_buf);
47 } else {
48 // We must NULL out the unused entries, then enqueue.
49 for (size_t i = 0; i < _index; i += oopSize) {
50 _buf[byte_index_to_index((int)i)] = NULL;
51 }
52 qset()->enqueue_complete_buffer(_buf);
53 }
54 _buf = NULL;
55 _index = 0;
56 }
57 }
58
59
60 void PtrQueue::enqueue_known_active(void* ptr) {
61 assert(0 <= _index && _index <= _sz, "Invariant.");
62 assert(_index == 0 || _buf != NULL, "invariant");
63
64 while (_index == 0) {
65 handle_zero_index();
66 }
67
68 assert(_index > 0, "postcondition");
69 _index -= oopSize;
70 _buf[byte_index_to_index((int)_index)] = ptr;
71 assert(0 <= _index && _index <= _sz, "Invariant.");
72 }
73
74 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
75 assert(_lock->owned_by_self(), "Required.");
76
77 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
78 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
79 // have the same rank and we may get the "possible deadlock" message
80 _lock->unlock();
81
82 qset()->enqueue_complete_buffer(buf);
83 // We must relock only because the caller will unlock, for the normal
84 // case.
85 _lock->lock_without_safepoint_check();
86 }
87
88
89 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
90 _max_completed_queue(0),
91 _cbl_mon(NULL), _fl_lock(NULL),
177 // While the current thread was enqueueing the buffer another thread
178 // may have a allocated a new buffer and inserted it into this pointer
179 // queue. If that happens then we just return so that the current
180 // thread doesn't overwrite the buffer allocated by the other thread
181 // and potentially losing some dirtied cards.
182
183 if (_buf != NULL) return;
184 } else {
185 if (qset()->process_or_enqueue_complete_buffer(_buf)) {
186 // Recycle the buffer. No allocation.
187 _sz = qset()->buffer_size();
188 _index = _sz;
189 return;
190 }
191 }
192 }
193 // Reallocate the buffer
194 _buf = qset()->allocate_buffer();
195 _sz = qset()->buffer_size();
196 _index = _sz;
197 assert(0 <= _index && _index <= _sz, "Invariant.");
198 }
199
200 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
201 if (Thread::current()->is_Java_thread()) {
202 // We don't lock. It is fine to be epsilon-precise here.
203 if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
204 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
205 bool b = mut_process_buffer(buf);
206 if (b) {
207 // True here means that the buffer hasn't been deallocated and the caller may reuse it.
208 return true;
209 }
210 }
211 }
212 // The buffer will be enqueued. The caller will have to get a new one.
213 enqueue_complete_buffer(buf);
214 return false;
215 }
216
217 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
|
41
42 void PtrQueue::flush_impl() {
43 if (!_perm && _buf != NULL) {
44 if (_index == _sz) {
45 // No work to do.
46 qset()->deallocate_buffer(_buf);
47 } else {
48 // We must NULL out the unused entries, then enqueue.
49 for (size_t i = 0; i < _index; i += oopSize) {
50 _buf[byte_index_to_index((int)i)] = NULL;
51 }
52 qset()->enqueue_complete_buffer(_buf);
53 }
54 _buf = NULL;
55 _index = 0;
56 }
57 }
58
59
60 void PtrQueue::enqueue_known_active(void* ptr) {
61 assert(_index <= _sz, "Invariant.");
62 assert(_index == 0 || _buf != NULL, "invariant");
63
64 while (_index == 0) {
65 handle_zero_index();
66 }
67
68 assert(_index > 0, "postcondition");
69 _index -= oopSize;
70 _buf[byte_index_to_index((int)_index)] = ptr;
71 assert(_index <= _sz, "Invariant.");
72 }
73
74 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
75 assert(_lock->owned_by_self(), "Required.");
76
77 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
78 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
79 // have the same rank and we may get the "possible deadlock" message
80 _lock->unlock();
81
82 qset()->enqueue_complete_buffer(buf);
83 // We must relock only because the caller will unlock, for the normal
84 // case.
85 _lock->lock_without_safepoint_check();
86 }
87
88
89 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
90 _max_completed_queue(0),
91 _cbl_mon(NULL), _fl_lock(NULL),
177 // While the current thread was enqueueing the buffer another thread
178 // may have a allocated a new buffer and inserted it into this pointer
179 // queue. If that happens then we just return so that the current
180 // thread doesn't overwrite the buffer allocated by the other thread
181 // and potentially losing some dirtied cards.
182
183 if (_buf != NULL) return;
184 } else {
185 if (qset()->process_or_enqueue_complete_buffer(_buf)) {
186 // Recycle the buffer. No allocation.
187 _sz = qset()->buffer_size();
188 _index = _sz;
189 return;
190 }
191 }
192 }
193 // Reallocate the buffer
194 _buf = qset()->allocate_buffer();
195 _sz = qset()->buffer_size();
196 _index = _sz;
197 assert(_index <= _sz, "Invariant.");
198 }
199
200 bool PtrQueueSet::process_or_enqueue_complete_buffer(void** buf) {
201 if (Thread::current()->is_Java_thread()) {
202 // We don't lock. It is fine to be epsilon-precise here.
203 if (_max_completed_queue == 0 || _max_completed_queue > 0 &&
204 _n_completed_buffers >= _max_completed_queue + _completed_queue_padding) {
205 bool b = mut_process_buffer(buf);
206 if (b) {
207 // True here means that the buffer hasn't been deallocated and the caller may reuse it.
208 return true;
209 }
210 }
211 }
212 // The buffer will be enqueued. The caller will have to get a new one.
213 enqueue_complete_buffer(buf);
214 return false;
215 }
216
217 void PtrQueueSet::enqueue_complete_buffer(void** buf, size_t index) {
|