54
55
56 void PtrQueue::enqueue_known_active(void* ptr) {
57 assert(0 <= _index && _index <= _sz, "Invariant.");
58 assert(_index == 0 || _buf != NULL, "invariant");
59
60 while (_index == 0) {
61 handle_zero_index();
62 }
63
64 assert(_index > 0, "postcondition");
65 _index -= oopSize;
66 _buf[byte_index_to_index((int)_index)] = ptr;
67 assert(0 <= _index && _index <= _sz, "Invariant.");
68 }
69
70 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
71 assert(_lock->owned_by_self(), "Required.");
72
73 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
74 // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they
75 // have the same rank and we may get the "possible deadlock" message
76 _lock->unlock();
77
78 qset()->enqueue_complete_buffer(buf);
79 // We must relock only because the caller will unlock, for the normal
80 // case.
81 _lock->lock_without_safepoint_check();
82 }
83
84
85 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
86 _max_completed_queue(0),
87 _cbl_mon(NULL), _fl_lock(NULL),
88 _notify_when_complete(notify_when_complete),
89 _sz(0),
90 _completed_buffers_head(NULL),
91 _completed_buffers_tail(NULL),
92 _n_completed_buffers(0),
93 _process_completed_threshold(0), _process_completed(false),
94 _buf_free_list(NULL), _buf_free_list_sz(0)
134 n--;
135 }
136 }
137
138 void PtrQueue::handle_zero_index() {
139 assert(_index == 0, "Precondition.");
140
141 // This thread records the full buffer and allocates a new one (while
142 // holding the lock if there is one).
143 if (_buf != NULL) {
144 if (!should_enqueue_buffer()) {
145 assert(_index > 0, "the buffer can only be re-used if it's not full");
146 return;
147 }
148
149 if (_lock) {
150 assert(_lock->owned_by_self(), "Required.");
151
152 // The current PtrQ may be the shared dirty card queue and
153 // may be being manipulated by more than one worker thread
154 // during a pause. Since the enqueuing of the completed
155 // buffer unlocks the Shared_DirtyCardQ_lock more than one
156 // worker thread can 'race' on reading the shared queue attributes
157 // (_buf and _index) and multiple threads can call into this
158 // routine for the same buffer. This will cause the completed
159 // buffer to be added to the CBL multiple times.
160
161 // We "claim" the current buffer by caching value of _buf in
162 // a local and clearing the field while holding _lock. When
163 // _lock is released (while enqueueing the completed buffer)
164 // the thread that acquires _lock will skip this code,
165 // preventing the subsequent the multiple enqueue, and
166 // install a newly allocated buffer below.
167
168 void** buf = _buf; // local pointer to completed buffer
169 _buf = NULL; // clear shared _buf field
170
171 locking_enqueue_completed_buffer(buf); // enqueue completed buffer
172
173 // While the current thread was enqueuing the buffer another thread
174 // may have a allocated a new buffer and inserted it into this pointer
175 // queue. If that happens then we just return so that the current
176 // thread doesn't overwrite the buffer allocated by the other thread
177 // and potentially losing some dirtied cards.
178
179 if (_buf != NULL) return;
180 } else {
181 if (qset()->process_or_enqueue_complete_buffer(_buf)) {
182 // Recycle the buffer. No allocation.
183 _sz = qset()->buffer_size();
184 _index = _sz;
185 return;
186 }
187 }
188 }
189 // Reallocate the buffer
190 _buf = qset()->allocate_buffer();
191 _sz = qset()->buffer_size();
192 _index = _sz;
193 assert(0 <= _index && _index <= _sz, "Invariant.");
|
54
55
56 void PtrQueue::enqueue_known_active(void* ptr) {
57 assert(0 <= _index && _index <= _sz, "Invariant.");
58 assert(_index == 0 || _buf != NULL, "invariant");
59
60 while (_index == 0) {
61 handle_zero_index();
62 }
63
64 assert(_index > 0, "postcondition");
65 _index -= oopSize;
66 _buf[byte_index_to_index((int)_index)] = ptr;
67 assert(0 <= _index && _index <= _sz, "Invariant.");
68 }
69
70 void PtrQueue::locking_enqueue_completed_buffer(void** buf) {
71 assert(_lock->owned_by_self(), "Required.");
72
73 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
74 // we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
75 // have the same rank and we may get the "possible deadlock" message
76 _lock->unlock();
77
78 qset()->enqueue_complete_buffer(buf);
79 // We must relock only because the caller will unlock, for the normal
80 // case.
81 _lock->lock_without_safepoint_check();
82 }
83
84
85 PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
86 _max_completed_queue(0),
87 _cbl_mon(NULL), _fl_lock(NULL),
88 _notify_when_complete(notify_when_complete),
89 _sz(0),
90 _completed_buffers_head(NULL),
91 _completed_buffers_tail(NULL),
92 _n_completed_buffers(0),
93 _process_completed_threshold(0), _process_completed(false),
94 _buf_free_list(NULL), _buf_free_list_sz(0)
134 n--;
135 }
136 }
137
138 void PtrQueue::handle_zero_index() {
139 assert(_index == 0, "Precondition.");
140
141 // This thread records the full buffer and allocates a new one (while
142 // holding the lock if there is one).
143 if (_buf != NULL) {
144 if (!should_enqueue_buffer()) {
145 assert(_index > 0, "the buffer can only be re-used if it's not full");
146 return;
147 }
148
149 if (_lock) {
150 assert(_lock->owned_by_self(), "Required.");
151
152 // The current PtrQ may be the shared dirty card queue and
153 // may be being manipulated by more than one worker thread
154 // during a pause. Since the enqueueing of the completed
155 // buffer unlocks the Shared_DirtyCardQ_lock more than one
156 // worker thread can 'race' on reading the shared queue attributes
157 // (_buf and _index) and multiple threads can call into this
158 // routine for the same buffer. This will cause the completed
159 // buffer to be added to the CBL multiple times.
160
161 // We "claim" the current buffer by caching value of _buf in
162 // a local and clearing the field while holding _lock. When
163 // _lock is released (while enqueueing the completed buffer)
164 // the thread that acquires _lock will skip this code,
165 // preventing the subsequent the multiple enqueue, and
166 // install a newly allocated buffer below.
167
168 void** buf = _buf; // local pointer to completed buffer
169 _buf = NULL; // clear shared _buf field
170
171 locking_enqueue_completed_buffer(buf); // enqueue completed buffer
172
173 // While the current thread was enqueueing the buffer another thread
174 // may have a allocated a new buffer and inserted it into this pointer
175 // queue. If that happens then we just return so that the current
176 // thread doesn't overwrite the buffer allocated by the other thread
177 // and potentially losing some dirtied cards.
178
179 if (_buf != NULL) return;
180 } else {
181 if (qset()->process_or_enqueue_complete_buffer(_buf)) {
182 // Recycle the buffer. No allocation.
183 _sz = qset()->buffer_size();
184 _index = _sz;
185 return;
186 }
187 }
188 }
189 // Reallocate the buffer
190 _buf = qset()->allocate_buffer();
191 _sz = qset()->buffer_size();
192 _index = _sz;
193 assert(0 <= _index && _index <= _sz, "Invariant.");
|