98 }
99 }
100
101 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
102 // Dirty card queues are always active, so we create them with their
103 // active field set to true.
104 PtrQueue(qset, permanent, true /* active */)
105 { }
106
107 DirtyCardQueue::~DirtyCardQueue() {
108 if (!is_permanent()) {
109 flush();
110 }
111 }
112
113 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
114 bool consume,
115 uint worker_i) {
116 bool res = true;
117 if (_buf != NULL) {
118 res = apply_closure_to_buffer(cl, _buf, _index, _sz,
119 consume,
120 worker_i);
121 if (res && consume) {
122 _index = _sz;
123 }
124 }
125 return res;
126 }
127
128 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
129 void** buf,
130 size_t index, size_t sz,
131 bool consume,
132 uint worker_i) {
133 if (cl == NULL) return true;
134 size_t limit = byte_index_to_index(sz);
135 for (size_t i = byte_index_to_index(index); i < limit; ++i) {
136 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
137 if (card_ptr != NULL) {
138 // Set the entry to null, so we don't do it again (via the test
139 // above) if we reconsider this buffer.
140 if (consume) {
141 buf[i] = NULL;
142 }
143 if (!cl->do_card_ptr(card_ptr, worker_i)) {
144 return false;
145 }
146 }
147 }
148 return true;
149 }
150
151 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
152 PtrQueueSet(notify_when_complete),
153 _mut_process_closure(NULL),
154 _shared_dirty_card_queue(this, true /* permanent */),
155 _free_ids(NULL),
156 _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
157 {
158 _all_active = true;
159 }
160
161 // Determines how many mutator threads can process the buffers in parallel.
162 uint DirtyCardQueueSet::num_par_ids() {
163 return (uint)os::processor_count();
164 }
165
166 void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
171 Mutex* lock,
172 DirtyCardQueueSet* fl_owner,
173 bool init_free_ids) {
174 _mut_process_closure = cl;
175 PtrQueueSet::initialize(cbl_mon,
176 fl_lock,
177 process_completed_threshold,
178 max_completed_queue,
179 fl_owner);
180 set_buffer_size(G1UpdateBufferSize);
181 _shared_dirty_card_queue.set_lock(lock);
182 if (init_free_ids) {
183 _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
184 }
185 }
186
187 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
188 t->dirty_card_queue().handle_zero_index();
189 }
190
191 bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
192 guarantee(_free_ids != NULL, "must be");
193
194 // claim a par id
195 uint worker_i = _free_ids->claim_par_id();
196
197 bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
198 _sz, true, worker_i);
199 if (b) {
200 Atomic::inc(&_processed_buffers_mut);
201 }
202
203 // release the id
204 _free_ids->release_par_id(worker_i);
205
206 return b;
207 }
208
209
210 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
211 BufferNode* nd = NULL;
212 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
213
214 if (_n_completed_buffers <= stop_at) {
215 _process_completed = false;
216 return NULL;
217 }
218
222 _completed_buffers_head = nd->next();
223 _n_completed_buffers--;
224 if (_completed_buffers_head == NULL) {
225 assert(_n_completed_buffers == 0, "Invariant");
226 _completed_buffers_tail = NULL;
227 }
228 }
229 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
230 return nd;
231 }
232
233 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
234 uint worker_i,
235 size_t stop_at,
236 bool during_pause) {
237 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
238 BufferNode* nd = get_completed_buffer(stop_at);
239 if (nd == NULL) {
240 return false;
241 } else {
242 void** buf = BufferNode::make_buffer_from_node(nd);
243 size_t index = nd->index();
244 if (DirtyCardQueue::apply_closure_to_buffer(cl,
245 buf, index, _sz,
246 true, worker_i)) {
247 // Done with fully processed buffer.
248 deallocate_buffer(buf);
249 Atomic::inc(&_processed_buffers_rs_thread);
250 return true;
251 } else {
252 // Return partially processed buffer to the queue.
253 enqueue_complete_buffer(buf, index);
254 return false;
255 }
256 }
257 }
258
259 void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
260 BufferNode* nd = _completed_buffers_head;
261 while (nd != NULL) {
262 bool b =
263 DirtyCardQueue::apply_closure_to_buffer(cl,
264 BufferNode::make_buffer_from_node(nd),
265 0, _sz, false);
266 guarantee(b, "Should not stop early.");
267 nd = nd->next();
268 }
269 }
270
271 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
272 BufferNode* nd = _cur_par_buffer_node;
273 while (nd != NULL) {
274 BufferNode* next = (BufferNode*)nd->next();
275 BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd);
276 if (actual == nd) {
277 bool b =
278 DirtyCardQueue::apply_closure_to_buffer(cl,
279 BufferNode::make_buffer_from_node(actual),
280 0, _sz, false);
281 guarantee(b, "Should not stop early.");
282 nd = next;
283 } else {
284 nd = actual;
285 }
286 }
287 }
288
289 // Deallocates any completed log buffers
290 void DirtyCardQueueSet::clear() {
291 BufferNode* buffers_to_delete = NULL;
292 {
293 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
294 while (_completed_buffers_head != NULL) {
295 BufferNode* nd = _completed_buffers_head;
296 _completed_buffers_head = nd->next();
297 nd->set_next(buffers_to_delete);
298 buffers_to_delete = nd;
299 }
300 _n_completed_buffers = 0;
301 _completed_buffers_tail = NULL;
302 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
303 }
304 while (buffers_to_delete != NULL) {
305 BufferNode* nd = buffers_to_delete;
306 buffers_to_delete = nd->next();
307 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
308 }
309
310 }
311
312 void DirtyCardQueueSet::abandon_logs() {
313 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
314 clear();
315 // Since abandon is done only at safepoints, we can safely manipulate
316 // these queues.
317 for (JavaThread* t = Threads::first(); t; t = t->next()) {
318 t->dirty_card_queue().reset();
319 }
320 shared_dirty_card_queue()->reset();
321 }
322
323
324 void DirtyCardQueueSet::concatenate_logs() {
325 // Iterate over all the threads, if we find a partial log add it to
326 // the global list of logs. Temporarily turn off the limit on the number
327 // of outstanding buffers.
328 int save_max_completed_queue = _max_completed_queue;
329 _max_completed_queue = max_jint;
330 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
331 for (JavaThread* t = Threads::first(); t; t = t->next()) {
332 DirtyCardQueue& dcq = t->dirty_card_queue();
333 if (dcq.size() != 0) {
334 void** buf = dcq.get_buf();
335 // We must NULL out the unused entries, then enqueue.
336 size_t limit = dcq.byte_index_to_index(dcq.get_index());
337 for (size_t i = 0; i < limit; ++i) {
338 buf[i] = NULL;
339 }
340 enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
341 dcq.reinitialize();
342 }
343 }
344 if (_shared_dirty_card_queue.size() != 0) {
345 enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(),
346 _shared_dirty_card_queue.get_index());
347 _shared_dirty_card_queue.reinitialize();
348 }
349 // Restore the completed buffer queue limit.
350 _max_completed_queue = save_max_completed_queue;
351 }
|
98 }
99 }
100
101 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
102 // Dirty card queues are always active, so we create them with their
103 // active field set to true.
104 PtrQueue(qset, permanent, true /* active */)
105 { }
106
107 DirtyCardQueue::~DirtyCardQueue() {
108 if (!is_permanent()) {
109 flush();
110 }
111 }
112
113 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
114 bool consume,
115 uint worker_i) {
116 bool res = true;
117 if (_buf != NULL) {
118 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
119 res = apply_closure_to_buffer(cl, node, _sz, consume, worker_i);
120 if (res && consume) {
121 _index = _sz;
122 }
123 }
124 return res;
125 }
126
127 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
128 BufferNode* node,
129 size_t buffer_size,
130 bool consume,
131 uint worker_i) {
132 if (cl == NULL) return true;
133 void** buf = BufferNode::make_buffer_from_node(node);
134 size_t limit = byte_index_to_index(buffer_size);
135 for (size_t i = byte_index_to_index(node->index()); i < limit; ++i) {
136 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
137 assert(card_ptr != NULL, "invariant");
138 if (!cl->do_card_ptr(card_ptr, worker_i)) {
139 if (consume) {
140 size_t new_index = index_to_byte_index(i + 1);
141 assert(new_index <= buffer_size, "invariant");
142 node->set_index(new_index);
143 }
144 return false;
145 }
146 }
147 if (consume) {
148 node->set_index(buffer_size);
149 }
150 return true;
151 }
152
153 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
154 PtrQueueSet(notify_when_complete),
155 _mut_process_closure(NULL),
156 _shared_dirty_card_queue(this, true /* permanent */),
157 _free_ids(NULL),
158 _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
159 {
160 _all_active = true;
161 }
162
163 // Determines how many mutator threads can process the buffers in parallel.
164 uint DirtyCardQueueSet::num_par_ids() {
165 return (uint)os::processor_count();
166 }
167
168 void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
173 Mutex* lock,
174 DirtyCardQueueSet* fl_owner,
175 bool init_free_ids) {
176 _mut_process_closure = cl;
177 PtrQueueSet::initialize(cbl_mon,
178 fl_lock,
179 process_completed_threshold,
180 max_completed_queue,
181 fl_owner);
182 set_buffer_size(G1UpdateBufferSize);
183 _shared_dirty_card_queue.set_lock(lock);
184 if (init_free_ids) {
185 _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
186 }
187 }
188
189 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
190 t->dirty_card_queue().handle_zero_index();
191 }
192
193 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
194 guarantee(_free_ids != NULL, "must be");
195
196 // claim a par id
197 uint worker_i = _free_ids->claim_par_id();
198
199 bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure,
200 node, _sz,
201 true, worker_i);
202 if (b) {
203 Atomic::inc(&_processed_buffers_mut);
204 }
205
206 // release the id
207 _free_ids->release_par_id(worker_i);
208
209 return b;
210 }
211
212
213 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
214 BufferNode* nd = NULL;
215 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
216
217 if (_n_completed_buffers <= stop_at) {
218 _process_completed = false;
219 return NULL;
220 }
221
225 _completed_buffers_head = nd->next();
226 _n_completed_buffers--;
227 if (_completed_buffers_head == NULL) {
228 assert(_n_completed_buffers == 0, "Invariant");
229 _completed_buffers_tail = NULL;
230 }
231 }
232 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
233 return nd;
234 }
235
236 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
237 uint worker_i,
238 size_t stop_at,
239 bool during_pause) {
240 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
241 BufferNode* nd = get_completed_buffer(stop_at);
242 if (nd == NULL) {
243 return false;
244 } else {
245 if (DirtyCardQueue::apply_closure_to_buffer(cl, nd, _sz, true, worker_i)) {
246 // Done with fully processed buffer.
247 deallocate_buffer(nd);
248 Atomic::inc(&_processed_buffers_rs_thread);
249 return true;
250 } else {
251 // Return partially processed buffer to the queue.
252 enqueue_complete_buffer(nd);
253 return false;
254 }
255 }
256 }
257
258 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
259 BufferNode* nd = _cur_par_buffer_node;
260 while (nd != NULL) {
261 BufferNode* next = nd->next();
262 void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
263 if (actual == nd) {
264 bool b = DirtyCardQueue::apply_closure_to_buffer(cl, nd, _sz, false);
265 guarantee(b, "Should not stop early.");
266 nd = next;
267 } else {
268 nd = static_cast<BufferNode*>(actual);
269 }
270 }
271 }
272
273 // Deallocates any completed log buffers
274 void DirtyCardQueueSet::clear() {
275 BufferNode* buffers_to_delete = NULL;
276 {
277 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
278 while (_completed_buffers_head != NULL) {
279 BufferNode* nd = _completed_buffers_head;
280 _completed_buffers_head = nd->next();
281 nd->set_next(buffers_to_delete);
282 buffers_to_delete = nd;
283 }
284 _n_completed_buffers = 0;
285 _completed_buffers_tail = NULL;
286 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
287 }
288 while (buffers_to_delete != NULL) {
289 BufferNode* nd = buffers_to_delete;
290 buffers_to_delete = nd->next();
291 deallocate_buffer(nd);
292 }
293
294 }
295
296 void DirtyCardQueueSet::abandon_logs() {
297 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
298 clear();
299 // Since abandon is done only at safepoints, we can safely manipulate
300 // these queues.
301 for (JavaThread* t = Threads::first(); t; t = t->next()) {
302 t->dirty_card_queue().reset();
303 }
304 shared_dirty_card_queue()->reset();
305 }
306
307 void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
308 if (!dcq.is_empty()) {
309 enqueue_complete_buffer(
310 BufferNode::make_node_from_buffer(dcq.get_buf(), dcq.get_index()));
311 dcq.reinitialize();
312 }
313 }
314
315 void DirtyCardQueueSet::concatenate_logs() {
316 // Iterate over all the threads, if we find a partial log add it to
317 // the global list of logs. Temporarily turn off the limit on the number
318 // of outstanding buffers.
319 int save_max_completed_queue = _max_completed_queue;
320 _max_completed_queue = max_jint;
321 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
322 for (JavaThread* t = Threads::first(); t; t = t->next()) {
323 concatenate_log(t->dirty_card_queue());
324 }
325 concatenate_log(_shared_dirty_card_queue);
326 // Restore the completed buffer queue limit.
327 _max_completed_queue = save_max_completed_queue;
328 }
|