211 assert_at_safepoint();
212 HeadTail result(Atomic::load(&_head), Atomic::load(&_tail));
213 Atomic::store(&_head, (BufferNode*)NULL);
214 Atomic::store(&_tail, (BufferNode*)NULL);
215 return result;
216 }
217
218 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
219 assert(cbn != NULL, "precondition");
220 // Increment _num_cards before adding to queue, so queue removal doesn't
221 // need to deal with _num_cards possibly going negative.
222 size_t new_num_cards = Atomic::add(&_num_cards, buffer_size() - cbn->index());
223 _completed.push(*cbn);
224 if ((new_num_cards > process_cards_threshold()) &&
225 (_primary_refinement_thread != NULL)) {
226 _primary_refinement_thread->activate();
227 }
228 }
229
230 BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
231 enqueue_previous_paused_buffers();
232
233 // Check for insufficient cards to satisfy request. We only do this once,
234 // up front, rather than on each iteration below, since the test is racy
235 // regardless of when we do it.
236 if (Atomic::load_acquire(&_num_cards) <= stop_at) {
237 return NULL;
238 }
239
240 BufferNode* result = _completed.pop();
241 if (result != NULL) {
242 Atomic::sub(&_num_cards, buffer_size() - result->index());
243 }
244 return result;
245 }
246
247 #ifdef ASSERT
248 void G1DirtyCardQueueSet::verify_num_cards() const {
249 size_t actual = 0;
250 BufferNode* cur = _completed.top();
251 for ( ; cur != NULL; cur = cur->next()) {
252 actual += buffer_size() - cur->index();
253 }
254 assert(actual == Atomic::load(&_num_cards),
255 "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT,
256 Atomic::load(&_num_cards), actual);
257 }
258 #endif // ASSERT
259
260 G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() :
261 _head(NULL), _tail(NULL),
262 _safepoint_id(SafepointSynchronize::safepoint_id())
263 {}
281 if (old_head == NULL) {
282 assert(_tail == NULL, "invariant");
283 _tail = node;
284 } else {
285 node->set_next(old_head);
286 }
287 }
288
289 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() {
290 BufferNode* head = Atomic::load(&_head);
291 BufferNode* tail = _tail;
292 Atomic::store(&_head, (BufferNode*)NULL);
293 _tail = NULL;
294 return HeadTail(head, tail);
295 }
296
297 G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(NULL) {}
298
299 #ifdef ASSERT
300 G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() {
301 assert(is_empty(), "invariant");
302 }
303 #endif // ASSERT
304
305 bool G1DirtyCardQueueSet::PausedBuffers::is_empty() const {
306 return Atomic::load(&_plist) == NULL;
307 }
308
309 void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) {
310 assert_not_at_safepoint();
311 PausedList* plist = Atomic::load_acquire(&_plist);
312 if (plist != NULL) {
313 // Already have a next list, so use it. We know it's a next list because
314 // of the precondition that take_previous() has already been called.
315 assert(plist->is_next(), "invariant");
316 } else {
317 // Try to install a new next list.
318 plist = new PausedList();
319 PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)NULL, plist);
320 if (old_plist != NULL) {
321 // Some other thread installed a new next list. Use it instead.
322 delete plist;
323 plist = old_plist;
324 }
325 }
326 plist->add(node);
327 }
328
329 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous() {
330 assert_not_at_safepoint();
331 PausedList* previous;
332 {
333 // Deal with plist in a critical section, to prevent it from being
334 // deleted out from under us by a concurrent take_previous().
335 GlobalCounter::CriticalSection cs(Thread::current());
336 previous = Atomic::load_acquire(&_plist);
337 if ((previous == NULL) || // Nothing to take.
338 previous->is_next() || // Not from a previous safepoint.
339 // Some other thread stole it.
340 (Atomic::cmpxchg(&_plist, previous, (PausedList*)NULL) != previous)) {
341 return HeadTail();
342 }
343 }
344 // We now own previous.
345 HeadTail result = previous->take();
349 GlobalCounter::write_synchronize();
350 delete previous;
351 return result;
352 }
353
354 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() {
355 assert_at_safepoint();
356 HeadTail result;
357 PausedList* plist = Atomic::load(&_plist);
358 if (plist != NULL) {
359 Atomic::store(&_plist, (PausedList*)NULL);
360 result = plist->take();
361 delete plist;
362 }
363 return result;
364 }
365
366 void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
367 assert_not_at_safepoint();
368 assert(node->next() == NULL, "precondition");
369 // Cards for paused buffers are included in count, to contribute to
370 // notification checking after the coming safepoint if it doesn't GC.
371 // Note that this means the queue's _num_cards differs from the number
372 // of cards in the queued buffers when there are paused buffers.
373 Atomic::add(&_num_cards, buffer_size() - node->index());
374 _paused.add(node);
375 }
376
377 void G1DirtyCardQueueSet::enqueue_paused_buffers_aux(const HeadTail& paused) {
378 if (paused._head != NULL) {
379 assert(paused._tail != NULL, "invariant");
380 // Cards from paused buffers are already recorded in the queue count.
381 _completed.append(*paused._head, *paused._tail);
382 }
383 }
384
385 void G1DirtyCardQueueSet::enqueue_previous_paused_buffers() {
386 assert_not_at_safepoint();
387 // The fast-path still satisfies the precondition for record_paused_buffer
388 // and PausedBuffers::add, even with a racy test. If there are paused
389 // buffers from a previous safepoint, is_empty() will return false; there
390 // will have been a safepoint between recording and test, so there can't be
391 // a false negative (is_empty() returns true) while such buffers are present.
392 // If is_empty() is false, there are two cases:
393 //
394 // (1) There were paused buffers from a previous safepoint. A concurrent
395 // caller may take and enqueue them first, but that's okay; the precondition
396 // for a possible later record_paused_buffer by this thread will still hold.
397 //
398 // (2) There are paused buffers for a requested next safepoint.
399 //
400 // In each of those cases some effort may be spent detecting and dealing
401 // with those circumstances; any wasted effort in such cases is expected to
402 // be well compensated by the fast path.
403 if (!_paused.is_empty()) {
404 enqueue_paused_buffers_aux(_paused.take_previous());
405 }
406 }
407
408 void G1DirtyCardQueueSet::enqueue_all_paused_buffers() {
409 assert_at_safepoint();
410 enqueue_paused_buffers_aux(_paused.take_all());
411 }
412
413 void G1DirtyCardQueueSet::abandon_completed_buffers() {
414 enqueue_all_paused_buffers();
415 verify_num_cards();
416 G1BufferNodeList list = take_all_completed_buffers();
417 BufferNode* buffers_to_delete = list._head;
418 while (buffers_to_delete != NULL) {
419 BufferNode* bn = buffers_to_delete;
420 buffers_to_delete = bn->next();
421 bn->set_next(NULL);
422 deallocate_buffer(bn);
423 }
424 }
425
|
211 assert_at_safepoint();
212 HeadTail result(Atomic::load(&_head), Atomic::load(&_tail));
213 Atomic::store(&_head, (BufferNode*)NULL);
214 Atomic::store(&_tail, (BufferNode*)NULL);
215 return result;
216 }
217
218 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
219 assert(cbn != NULL, "precondition");
220 // Increment _num_cards before adding to queue, so queue removal doesn't
221 // need to deal with _num_cards possibly going negative.
222 size_t new_num_cards = Atomic::add(&_num_cards, buffer_size() - cbn->index());
223 _completed.push(*cbn);
224 if ((new_num_cards > process_cards_threshold()) &&
225 (_primary_refinement_thread != NULL)) {
226 _primary_refinement_thread->activate();
227 }
228 }
229
230 BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
231 if (Atomic::load_acquire(&_num_cards) < stop_at) {
232 return NULL;
233 }
234
235 BufferNode* result = _completed.pop();
236 if (result == NULL) { // Unlikely if no paused buffers.
237 enqueue_previous_paused_buffers();
238 result = _completed.pop();
239 if (result == NULL) return NULL;
240 }
241 Atomic::sub(&_num_cards, buffer_size() - result->index());
242 return result;
243 }
244
245 #ifdef ASSERT
246 void G1DirtyCardQueueSet::verify_num_cards() const {
247 size_t actual = 0;
248 BufferNode* cur = _completed.top();
249 for ( ; cur != NULL; cur = cur->next()) {
250 actual += buffer_size() - cur->index();
251 }
252 assert(actual == Atomic::load(&_num_cards),
253 "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT,
254 Atomic::load(&_num_cards), actual);
255 }
256 #endif // ASSERT
257
258 G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() :
259 _head(NULL), _tail(NULL),
260 _safepoint_id(SafepointSynchronize::safepoint_id())
261 {}
279 if (old_head == NULL) {
280 assert(_tail == NULL, "invariant");
281 _tail = node;
282 } else {
283 node->set_next(old_head);
284 }
285 }
286
287 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() {
288 BufferNode* head = Atomic::load(&_head);
289 BufferNode* tail = _tail;
290 Atomic::store(&_head, (BufferNode*)NULL);
291 _tail = NULL;
292 return HeadTail(head, tail);
293 }
294
295 G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(NULL) {}
296
297 #ifdef ASSERT
298 G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() {
299 assert(Atomic::load(&_plist) == NULL, "invariant");
300 }
301 #endif // ASSERT
302
303 void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) {
304 assert_not_at_safepoint();
305 PausedList* plist = Atomic::load_acquire(&_plist);
306 if (plist == NULL) {
307 // Try to install a new next list.
308 plist = new PausedList();
309 PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)NULL, plist);
310 if (old_plist != NULL) {
311 // Some other thread installed a new next list. Use it instead.
312 delete plist;
313 plist = old_plist;
314 }
315 }
316 assert(plist->is_next(), "invariant");
317 plist->add(node);
318 }
319
320 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous() {
321 assert_not_at_safepoint();
322 PausedList* previous;
323 {
324 // Deal with plist in a critical section, to prevent it from being
325 // deleted out from under us by a concurrent take_previous().
326 GlobalCounter::CriticalSection cs(Thread::current());
327 previous = Atomic::load_acquire(&_plist);
328 if ((previous == NULL) || // Nothing to take.
329 previous->is_next() || // Not from a previous safepoint.
330 // Some other thread stole it.
331 (Atomic::cmpxchg(&_plist, previous, (PausedList*)NULL) != previous)) {
332 return HeadTail();
333 }
334 }
335 // We now own previous.
336 HeadTail result = previous->take();
340 GlobalCounter::write_synchronize();
341 delete previous;
342 return result;
343 }
344
345 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() {
346 assert_at_safepoint();
347 HeadTail result;
348 PausedList* plist = Atomic::load(&_plist);
349 if (plist != NULL) {
350 Atomic::store(&_plist, (PausedList*)NULL);
351 result = plist->take();
352 delete plist;
353 }
354 return result;
355 }
356
357 void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
358 assert_not_at_safepoint();
359 assert(node->next() == NULL, "precondition");
360 // Ensure there aren't any paused buffers from a previous safepoint.
361 enqueue_previous_paused_buffers();
362 // Cards for paused buffers are included in count, to contribute to
363 // notification checking after the coming safepoint if it doesn't GC.
364 // Note that this means the queue's _num_cards differs from the number
365 // of cards in the queued buffers when there are paused buffers.
366 Atomic::add(&_num_cards, buffer_size() - node->index());
367 _paused.add(node);
368 }
369
370 void G1DirtyCardQueueSet::enqueue_paused_buffers_aux(const HeadTail& paused) {
371 if (paused._head != NULL) {
372 assert(paused._tail != NULL, "invariant");
373 // Cards from paused buffers are already recorded in the queue count.
374 _completed.append(*paused._head, *paused._tail);
375 }
376 }
377
378 void G1DirtyCardQueueSet::enqueue_previous_paused_buffers() {
379 assert_not_at_safepoint();
380 enqueue_paused_buffers_aux(_paused.take_previous());
381 }
382
383 void G1DirtyCardQueueSet::enqueue_all_paused_buffers() {
384 assert_at_safepoint();
385 enqueue_paused_buffers_aux(_paused.take_all());
386 }
387
388 void G1DirtyCardQueueSet::abandon_completed_buffers() {
389 enqueue_all_paused_buffers();
390 verify_num_cards();
391 G1BufferNodeList list = take_all_completed_buffers();
392 BufferNode* buffers_to_delete = list._head;
393 while (buffers_to_delete != NULL) {
394 BufferNode* bn = buffers_to_delete;
395 buffers_to_delete = bn->next();
396 bn->set_next(NULL);
397 deallocate_buffer(bn);
398 }
399 }
400
|