98 }
99 }
100
101 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
102 // Dirty card queues are always active, so we create them with their
103 // active field set to true.
104 PtrQueue(qset, permanent, true /* active */)
105 { }
106
107 DirtyCardQueue::~DirtyCardQueue() {
108 if (!is_permanent()) {
109 flush();
110 }
111 }
112
113 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
114 bool consume,
115 uint worker_i) {
116 bool res = true;
117 if (_buf != NULL) {
118 res = apply_closure_to_buffer(cl, _buf, _index, _sz,
119 consume,
120 worker_i);
121 if (res && consume) {
122 _index = _sz;
123 }
124 }
125 return res;
126 }
127
128 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
129 void** buf,
130 size_t index, size_t sz,
131 bool consume,
132 uint worker_i) {
133 if (cl == NULL) return true;
134 size_t limit = byte_index_to_index(sz);
135 for (size_t i = byte_index_to_index(index); i < limit; ++i) {
136 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
137 if (card_ptr != NULL) {
138 // Set the entry to null, so we don't do it again (via the test
139 // above) if we reconsider this buffer.
140 if (consume) {
141 buf[i] = NULL;
142 }
143 if (!cl->do_card_ptr(card_ptr, worker_i)) {
144 return false;
145 }
146 }
147 }
148 return true;
149 }
150
151 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
152 PtrQueueSet(notify_when_complete),
153 _mut_process_closure(NULL),
154 _shared_dirty_card_queue(this, true /* permanent */),
155 _free_ids(NULL),
156 _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
157 {
158 _all_active = true;
159 }
160
161 // Determines how many mutator threads can process the buffers in parallel.
162 uint DirtyCardQueueSet::num_par_ids() {
163 return (uint)os::processor_count();
164 }
165
166 void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
171 Mutex* lock,
172 DirtyCardQueueSet* fl_owner,
173 bool init_free_ids) {
174 _mut_process_closure = cl;
175 PtrQueueSet::initialize(cbl_mon,
176 fl_lock,
177 process_completed_threshold,
178 max_completed_queue,
179 fl_owner);
180 set_buffer_size(G1UpdateBufferSize);
181 _shared_dirty_card_queue.set_lock(lock);
182 if (init_free_ids) {
183 _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
184 }
185 }
186
187 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
188 t->dirty_card_queue().handle_zero_index();
189 }
190
191 bool DirtyCardQueueSet::mut_process_buffer(void** buf) {
192 guarantee(_free_ids != NULL, "must be");
193
194 // claim a par id
195 uint worker_i = _free_ids->claim_par_id();
196
197 bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure, buf, 0,
198 _sz, true, worker_i);
199 if (b) {
200 Atomic::inc(&_processed_buffers_mut);
201 }
202
203 // release the id
204 _free_ids->release_par_id(worker_i);
205
206 return b;
207 }
208
209
210 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
211 BufferNode* nd = NULL;
212 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
213
214 if (_n_completed_buffers <= stop_at) {
215 _process_completed = false;
216 return NULL;
217 }
218
222 _completed_buffers_head = nd->next();
223 _n_completed_buffers--;
224 if (_completed_buffers_head == NULL) {
225 assert(_n_completed_buffers == 0, "Invariant");
226 _completed_buffers_tail = NULL;
227 }
228 }
229 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
230 return nd;
231 }
232
233 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
234 uint worker_i,
235 size_t stop_at,
236 bool during_pause) {
237 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
238 BufferNode* nd = get_completed_buffer(stop_at);
239 if (nd == NULL) {
240 return false;
241 } else {
242 void** buf = BufferNode::make_buffer_from_node(nd);
243 size_t index = nd->index();
244 if (DirtyCardQueue::apply_closure_to_buffer(cl,
245 buf, index, _sz,
246 true, worker_i)) {
247 // Done with fully processed buffer.
248 deallocate_buffer(buf);
249 Atomic::inc(&_processed_buffers_rs_thread);
250 return true;
251 } else {
252 // Return partially processed buffer to the queue.
253 enqueue_complete_buffer(buf, index);
254 return false;
255 }
256 }
257 }
258
259 void DirtyCardQueueSet::apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
260 BufferNode* nd = _completed_buffers_head;
261 while (nd != NULL) {
262 bool b =
263 DirtyCardQueue::apply_closure_to_buffer(cl,
264 BufferNode::make_buffer_from_node(nd),
265 0, _sz, false);
266 guarantee(b, "Should not stop early.");
267 nd = nd->next();
268 }
269 }
270
271 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
272 BufferNode* nd = _cur_par_buffer_node;
273 while (nd != NULL) {
274 BufferNode* next = (BufferNode*)nd->next();
275 BufferNode* actual = (BufferNode*)Atomic::cmpxchg_ptr((void*)next, (volatile void*)&_cur_par_buffer_node, (void*)nd);
276 if (actual == nd) {
277 bool b =
278 DirtyCardQueue::apply_closure_to_buffer(cl,
279 BufferNode::make_buffer_from_node(actual),
280 0, _sz, false);
281 guarantee(b, "Should not stop early.");
282 nd = next;
283 } else {
284 nd = actual;
285 }
286 }
287 }
288
289 // Deallocates any completed log buffers
290 void DirtyCardQueueSet::clear() {
291 BufferNode* buffers_to_delete = NULL;
292 {
293 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
294 while (_completed_buffers_head != NULL) {
295 BufferNode* nd = _completed_buffers_head;
296 _completed_buffers_head = nd->next();
297 nd->set_next(buffers_to_delete);
298 buffers_to_delete = nd;
299 }
300 _n_completed_buffers = 0;
301 _completed_buffers_tail = NULL;
302 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
303 }
304 while (buffers_to_delete != NULL) {
305 BufferNode* nd = buffers_to_delete;
306 buffers_to_delete = nd->next();
307 deallocate_buffer(BufferNode::make_buffer_from_node(nd));
308 }
309
310 }
311
312 void DirtyCardQueueSet::abandon_logs() {
313 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
314 clear();
315 // Since abandon is done only at safepoints, we can safely manipulate
316 // these queues.
317 for (JavaThread* t = Threads::first(); t; t = t->next()) {
318 t->dirty_card_queue().reset();
319 }
320 shared_dirty_card_queue()->reset();
321 }
322
323
324 void DirtyCardQueueSet::concatenate_logs() {
325 // Iterate over all the threads, if we find a partial log add it to
326 // the global list of logs. Temporarily turn off the limit on the number
327 // of outstanding buffers.
328 int save_max_completed_queue = _max_completed_queue;
329 _max_completed_queue = max_jint;
330 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
331 for (JavaThread* t = Threads::first(); t; t = t->next()) {
332 DirtyCardQueue& dcq = t->dirty_card_queue();
333 if (dcq.size() != 0) {
334 void** buf = dcq.get_buf();
335 // We must NULL out the unused entries, then enqueue.
336 size_t limit = dcq.byte_index_to_index(dcq.get_index());
337 for (size_t i = 0; i < limit; ++i) {
338 buf[i] = NULL;
339 }
340 enqueue_complete_buffer(dcq.get_buf(), dcq.get_index());
341 dcq.reinitialize();
342 }
343 }
344 if (_shared_dirty_card_queue.size() != 0) {
345 enqueue_complete_buffer(_shared_dirty_card_queue.get_buf(),
346 _shared_dirty_card_queue.get_index());
347 _shared_dirty_card_queue.reinitialize();
348 }
349 // Restore the completed buffer queue limit.
350 _max_completed_queue = save_max_completed_queue;
351 }
|
98 }
99 }
100
101 DirtyCardQueue::DirtyCardQueue(DirtyCardQueueSet* qset, bool permanent) :
102 // Dirty card queues are always active, so we create them with their
103 // active field set to true.
104 PtrQueue(qset, permanent, true /* active */)
105 { }
106
107 DirtyCardQueue::~DirtyCardQueue() {
108 if (!is_permanent()) {
109 flush();
110 }
111 }
112
113 bool DirtyCardQueue::apply_closure(CardTableEntryClosure* cl,
114 bool consume,
115 uint worker_i) {
116 bool res = true;
117 if (_buf != NULL) {
118 BufferNode* node = BufferNode::make_node_from_buffer(_buf, _index);
119 res = apply_closure_to_buffer(cl, node, _sz, consume, worker_i);
120 if (res && consume) {
121 _index = _sz;
122 }
123 }
124 return res;
125 }
126
127 bool DirtyCardQueue::apply_closure_to_buffer(CardTableEntryClosure* cl,
128 BufferNode* node, size_t sz,
129 bool consume,
130 uint worker_i) {
131 if (cl == NULL) return true;
132 void** buf = BufferNode::make_buffer_from_node(node);
133 size_t limit = byte_index_to_index(sz);
134 for (size_t i = byte_index_to_index(node->index()); i < limit; ++i) {
135 jbyte* card_ptr = static_cast<jbyte*>(buf[i]);
136 assert(card_ptr != NULL, "invariant");
137 if (!cl->do_card_ptr(card_ptr, worker_i)) {
138 if (consume) {
139 size_t new_index = index_to_byte_index(i + 1);
140 assert(new_index <= sz, "invariant");
141 node->set_index(new_index);
142 }
143 return false;
144 }
145 }
146 if (consume) {
147 node->set_index(sz);
148 }
149 return true;
150 }
151
152 DirtyCardQueueSet::DirtyCardQueueSet(bool notify_when_complete) :
153 PtrQueueSet(notify_when_complete),
154 _mut_process_closure(NULL),
155 _shared_dirty_card_queue(this, true /* permanent */),
156 _free_ids(NULL),
157 _processed_buffers_mut(0), _processed_buffers_rs_thread(0)
158 {
159 _all_active = true;
160 }
161
162 // Determines how many mutator threads can process the buffers in parallel.
163 uint DirtyCardQueueSet::num_par_ids() {
164 return (uint)os::processor_count();
165 }
166
167 void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl,
172 Mutex* lock,
173 DirtyCardQueueSet* fl_owner,
174 bool init_free_ids) {
175 _mut_process_closure = cl;
176 PtrQueueSet::initialize(cbl_mon,
177 fl_lock,
178 process_completed_threshold,
179 max_completed_queue,
180 fl_owner);
181 set_buffer_size(G1UpdateBufferSize);
182 _shared_dirty_card_queue.set_lock(lock);
183 if (init_free_ids) {
184 _free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
185 }
186 }
187
188 void DirtyCardQueueSet::handle_zero_index_for_thread(JavaThread* t) {
189 t->dirty_card_queue().handle_zero_index();
190 }
191
192 bool DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
193 guarantee(_free_ids != NULL, "must be");
194
195 // claim a par id
196 uint worker_i = _free_ids->claim_par_id();
197
198 bool b = DirtyCardQueue::apply_closure_to_buffer(_mut_process_closure,
199 node, _sz,
200 true, worker_i);
201 if (b) {
202 Atomic::inc(&_processed_buffers_mut);
203 }
204
205 // release the id
206 _free_ids->release_par_id(worker_i);
207
208 return b;
209 }
210
211
212 BufferNode* DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
213 BufferNode* nd = NULL;
214 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
215
216 if (_n_completed_buffers <= stop_at) {
217 _process_completed = false;
218 return NULL;
219 }
220
224 _completed_buffers_head = nd->next();
225 _n_completed_buffers--;
226 if (_completed_buffers_head == NULL) {
227 assert(_n_completed_buffers == 0, "Invariant");
228 _completed_buffers_tail = NULL;
229 }
230 }
231 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
232 return nd;
233 }
234
235 bool DirtyCardQueueSet::apply_closure_to_completed_buffer(CardTableEntryClosure* cl,
236 uint worker_i,
237 size_t stop_at,
238 bool during_pause) {
239 assert(!during_pause || stop_at == 0, "Should not leave any completed buffers during a pause");
240 BufferNode* nd = get_completed_buffer(stop_at);
241 if (nd == NULL) {
242 return false;
243 } else {
244 if (DirtyCardQueue::apply_closure_to_buffer(cl, nd, _sz, true, worker_i)) {
245 // Done with fully processed buffer.
246 deallocate_buffer(nd);
247 Atomic::inc(&_processed_buffers_rs_thread);
248 return true;
249 } else {
250 // Return partially processed buffer to the queue.
251 enqueue_complete_buffer(nd);
252 return false;
253 }
254 }
255 }
256
257 void DirtyCardQueueSet::par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl) {
258 BufferNode* nd = _cur_par_buffer_node;
259 while (nd != NULL) {
260 BufferNode* next = nd->next();
261 void* actual = Atomic::cmpxchg_ptr(next, &_cur_par_buffer_node, nd);
262 if (actual == nd) {
263 bool b = DirtyCardQueue::apply_closure_to_buffer(cl, nd, _sz, false);
264 guarantee(b, "Should not stop early.");
265 nd = next;
266 } else {
267 nd = static_cast<BufferNode*>(actual);
268 }
269 }
270 }
271
272 // Deallocates any completed log buffers
273 void DirtyCardQueueSet::clear() {
274 BufferNode* buffers_to_delete = NULL;
275 {
276 MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
277 while (_completed_buffers_head != NULL) {
278 BufferNode* nd = _completed_buffers_head;
279 _completed_buffers_head = nd->next();
280 nd->set_next(buffers_to_delete);
281 buffers_to_delete = nd;
282 }
283 _n_completed_buffers = 0;
284 _completed_buffers_tail = NULL;
285 DEBUG_ONLY(assert_completed_buffer_list_len_correct_locked());
286 }
287 while (buffers_to_delete != NULL) {
288 BufferNode* nd = buffers_to_delete;
289 buffers_to_delete = nd->next();
290 deallocate_buffer(nd);
291 }
292
293 }
294
295 void DirtyCardQueueSet::abandon_logs() {
296 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
297 clear();
298 // Since abandon is done only at safepoints, we can safely manipulate
299 // these queues.
300 for (JavaThread* t = Threads::first(); t; t = t->next()) {
301 t->dirty_card_queue().reset();
302 }
303 shared_dirty_card_queue()->reset();
304 }
305
306 void DirtyCardQueueSet::concatenate_log(DirtyCardQueue& dcq) {
307 if (!dcq.is_empty()) {
308 enqueue_complete_buffer(
309 BufferNode::make_node_from_buffer(dcq.get_buf(), dcq.get_index()));
310 dcq.reinitialize();
311 }
312 }
313
314 void DirtyCardQueueSet::concatenate_logs() {
315 // Iterate over all the threads, if we find a partial log add it to
316 // the global list of logs. Temporarily turn off the limit on the number
317 // of outstanding buffers.
318 int save_max_completed_queue = _max_completed_queue;
319 _max_completed_queue = max_jint;
320 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
321 for (JavaThread* t = Threads::first(); t; t = t->next()) {
322 concatenate_log(t->dirty_card_queue());
323 }
324 concatenate_log(_shared_dirty_card_queue);
325 // Restore the completed buffer queue limit.
326 _max_completed_queue = save_max_completed_queue;
327 }
|