1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1BufferNodeList.hpp"
  27 #include "gc/g1/g1CardTableEntryClosure.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  30 #include "gc/g1/g1DirtyCardQueue.hpp"
  31 #include "gc/g1/g1FreeIdSet.hpp"
  32 #include "gc/g1/g1RedirtyCardsQueue.hpp"
  33 #include "gc/g1/g1RemSet.hpp"
  34 #include "gc/g1/g1ThreadLocalData.hpp"
  35 #include "gc/g1/heapRegionRemSet.hpp"
  36 #include "gc/shared/suspendibleThreadSet.hpp"
  37 #include "memory/iterator.hpp"
  38 #include "runtime/atomic.hpp"
  39 #include "runtime/os.hpp"
  40 #include "runtime/safepoint.hpp"
  41 #include "runtime/thread.inline.hpp"
  42 #include "runtime/threadSMR.hpp"
  43 #include "utilities/globalCounter.inline.hpp"
  44 #include "utilities/macros.hpp"
  45 #include "utilities/quickSort.hpp"
  46 
  47 G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
  48   // Dirty card queues are always active, so we create them with their
  49   // active field set to true.
  50   PtrQueue(qset, true /* active */)
  51 { }
  52 
  53 G1DirtyCardQueue::~G1DirtyCardQueue() {
  54   flush();
  55 }
  56 
  57 void G1DirtyCardQueue::handle_completed_buffer() {
  58   assert(_buf != NULL, "precondition");
  59   BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
  60   G1DirtyCardQueueSet* dcqs = dirty_card_qset();
  61   if (dcqs->process_or_enqueue_completed_buffer(node)) {
  62     reset();                    // Buffer fully processed, reset index.
  63   } else {
  64     allocate_buffer();          // Buffer enqueued, get a new one.
  65   }
  66 }
  67 
  68 // Assumed to be zero by concurrent threads.
  69 static uint par_ids_start() { return 0; }
  70 
  71 G1DirtyCardQueueSet::G1DirtyCardQueueSet(BufferNode::Allocator* allocator) :
  72   PtrQueueSet(allocator),
  73   _primary_refinement_thread(NULL),
  74   _num_cards(0),
  75   _completed(),
  76   _paused(),
  77   _free_ids(par_ids_start(), num_par_ids()),
  78   _process_cards_threshold(ProcessCardsThresholdNever),
  79   _max_cards(MaxCardsUnlimited),
  80   _max_cards_padding(0),
  81   _mutator_refined_cards_counters(NEW_C_HEAP_ARRAY(size_t, num_par_ids(), mtGC))
  82 {
  83   ::memset(_mutator_refined_cards_counters, 0, num_par_ids() * sizeof(size_t));
  84   _all_active = true;
  85 }
  86 
  87 G1DirtyCardQueueSet::~G1DirtyCardQueueSet() {
  88   abandon_completed_buffers();
  89   FREE_C_HEAP_ARRAY(size_t, _mutator_refined_cards_counters);
  90 }
  91 
  92 // Determines how many mutator threads can process the buffers in parallel.
  93 uint G1DirtyCardQueueSet::num_par_ids() {
  94   return (uint)os::initial_active_processor_count();
  95 }
  96 
  97 size_t G1DirtyCardQueueSet::total_mutator_refined_cards() const {
  98   size_t sum = 0;
  99   for (uint i = 0; i < num_par_ids(); ++i) {
 100     sum += _mutator_refined_cards_counters[i];
 101   }
 102   return sum;
 103 }
 104 
 105 void G1DirtyCardQueueSet::handle_zero_index_for_thread(Thread* t) {
 106   G1ThreadLocalData::dirty_card_queue(t).handle_zero_index();
 107 }
 108 
 109 #ifdef ASSERT
 110 G1DirtyCardQueueSet::Queue::~Queue() {
 111   assert(_head == NULL, "precondition");
 112   assert(_tail == NULL, "precondition");
 113 }
 114 #endif // ASSERT
 115 
 116 BufferNode* G1DirtyCardQueueSet::Queue::top() const {
 117   return Atomic::load(&_head);
 118 }
 119 
 120 // An append operation atomically exchanges the new tail with the queue tail.
 121 // It then sets the "next" value of the old tail to the head of the list being
 122 // appended; it is an invariant that the old tail's "next" value is NULL.
 123 // But if the old tail is NULL then the queue was empty.  In this case the
 124 // head of the list being appended is instead stored in the queue head; it is
 125 // an invariant that the queue head is NULL in this case.
 126 //
 127 // This means there is a period between the exchange and the old tail update
 128 // where the queue sequence is split into two parts, the list from the queue
 129 // head to the old tail, and the list being appended.  If there are concurrent
 130 // push/append operations, each may introduce another such segment.  But they
 131 // all eventually get resolved by their respective updates of their old tail's
 132 // "next" value.  This also means that pop operations must handle a buffer
 133 // with a NULL "next" value specially.
 134 //
 135 // A push operation is just a degenerate append, where the buffer being pushed
 136 // is both the head and the tail of the list being appended.
 137 void G1DirtyCardQueueSet::Queue::append(BufferNode& first, BufferNode& last) {
 138   assert(last.next() == NULL, "precondition");
 139   BufferNode* old_tail = Atomic::xchg(&_tail, &last);
 140   if (old_tail == NULL) {       // Was empty.
 141     Atomic::store(&_head, &first);
 142   } else {
 143     assert(old_tail->next() == NULL, "invariant");
 144     old_tail->set_next(&first);
 145   }
 146 }
 147 
 148 BufferNode* G1DirtyCardQueueSet::Queue::pop() {
 149   Thread* current_thread = Thread::current();
 150   while (true) {
 151     // Use a critical section per iteration, rather than over the whole
 152     // operation.  We're not guaranteed to make progress.  Lingering in one
 153     // CS could lead to excessive allocation of buffers, because the CS
 154     // blocks return of released buffers to the free list for reuse.
 155     GlobalCounter::CriticalSection cs(current_thread);
 156 
 157     BufferNode* result = Atomic::load_acquire(&_head);
 158     if (result == NULL) return NULL; // Queue is empty.
 159 
 160     BufferNode* next = Atomic::load_acquire(BufferNode::next_ptr(*result));
 161     if (next != NULL) {
 162       // The "usual" lock-free pop from the head of a singly linked list.
 163       if (result == Atomic::cmpxchg(&_head, result, next)) {
 164         // Former head successfully taken; it is not the last.
 165         assert(Atomic::load(&_tail) != result, "invariant");
 166         assert(result->next() != NULL, "invariant");
 167         result->set_next(NULL);
 168         return result;
 169       }
 170       // Lost the race; try again.
 171       continue;
 172     }
 173 
 174     // next is NULL.  This case is handled differently from the "usual"
 175     // lock-free pop from the head of a singly linked list.
 176 
 177     // If _tail == result then result is the only element in the list. We can
 178     // remove it from the list by first setting _tail to NULL and then setting
 179     // _head to NULL, the order being important.  We set _tail with cmpxchg in
 180     // case of a concurrent push/append/pop also changing _tail.  If we win
 181     // then we've claimed result.
 182     if (Atomic::cmpxchg(&_tail, result, (BufferNode*)NULL) == result) {
 183       assert(result->next() == NULL, "invariant");
 184       // Now that we've claimed result, also set _head to NULL.  But we must
 185       // be careful of a concurrent push/append after we NULLed _tail, since
 186       // it may have already performed its list-was-empty update of _head,
 187       // which we must not overwrite.
 188       Atomic::cmpxchg(&_head, result, (BufferNode*)NULL);
 189       return result;
 190     }
 191 
 192     // If _head != result then we lost the race to take result; try again.
 193     if (result != Atomic::load_acquire(&_head)) {
 194       continue;
 195     }
 196 
 197     // An in-progress concurrent operation interfered with taking the head
 198     // element when it was the only element.  A concurrent pop may have won
 199     // the race to clear the tail but not yet cleared the head. Alternatively,
 200     // a concurrent push/append may have changed the tail but not yet linked
 201     // result->next().  We cannot take result in either case.  We don't just
 202     // try again, because we could spin for a long time waiting for that
 203     // concurrent operation to finish.  In the first case, returning NULL is
 204     // fine; we lost the race for the only element to another thread.  We
 205     // also return NULL for the second case, and let the caller cope.
 206     return NULL;
 207   }
 208 }
 209 
 210 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::Queue::take_all() {
 211   assert_at_safepoint();
 212   HeadTail result(Atomic::load(&_head), Atomic::load(&_tail));
 213   Atomic::store(&_head, (BufferNode*)NULL);
 214   Atomic::store(&_tail, (BufferNode*)NULL);
 215   return result;
 216 }
 217 
 218 void G1DirtyCardQueueSet::enqueue_completed_buffer(BufferNode* cbn) {
 219   assert(cbn != NULL, "precondition");
 220   // Increment _num_cards before adding to queue, so queue removal doesn't
 221   // need to deal with _num_cards possibly going negative.
 222   size_t new_num_cards = Atomic::add(&_num_cards, buffer_size() - cbn->index());
 223   _completed.push(*cbn);
 224   if ((new_num_cards > process_cards_threshold()) &&
 225       (_primary_refinement_thread != NULL)) {
 226     _primary_refinement_thread->activate();
 227   }
 228 }
 229 
 230 BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) {
 231   if (Atomic::load_acquire(&_num_cards) < stop_at) {
 232     return NULL;
 233   }
 234 
 235   BufferNode* result = _completed.pop();
 236   if (result == NULL) {         // Unlikely if no paused buffers.
 237     enqueue_previous_paused_buffers();
 238     result = _completed.pop();
 239     if (result == NULL) return NULL;
 240   }
 241   Atomic::sub(&_num_cards, buffer_size() - result->index());
 242   return result;
 243 }
 244 
 245 #ifdef ASSERT
 246 void G1DirtyCardQueueSet::verify_num_cards() const {
 247   size_t actual = 0;
 248   BufferNode* cur = _completed.top();
 249   for ( ; cur != NULL; cur = cur->next()) {
 250     actual += buffer_size() - cur->index();
 251   }
 252   assert(actual == Atomic::load(&_num_cards),
 253          "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT,
 254          Atomic::load(&_num_cards), actual);
 255 }
 256 #endif // ASSERT
 257 
 258 G1DirtyCardQueueSet::PausedBuffers::PausedList::PausedList() :
 259   _head(NULL), _tail(NULL),
 260   _safepoint_id(SafepointSynchronize::safepoint_id())
 261 {}
 262 
 263 #ifdef ASSERT
 264 G1DirtyCardQueueSet::PausedBuffers::PausedList::~PausedList() {
 265   assert(Atomic::load(&_head) == NULL, "precondition");
 266   assert(_tail == NULL, "precondition");
 267 }
 268 #endif // ASSERT
 269 
 270 bool G1DirtyCardQueueSet::PausedBuffers::PausedList::is_next() const {
 271   assert_not_at_safepoint();
 272   return _safepoint_id == SafepointSynchronize::safepoint_id();
 273 }
 274 
 275 void G1DirtyCardQueueSet::PausedBuffers::PausedList::add(BufferNode* node) {
 276   assert_not_at_safepoint();
 277   assert(is_next(), "precondition");
 278   BufferNode* old_head = Atomic::xchg(&_head, node);
 279   if (old_head == NULL) {
 280     assert(_tail == NULL, "invariant");
 281     _tail = node;
 282   } else {
 283     node->set_next(old_head);
 284   }
 285 }
 286 
 287 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::PausedList::take() {
 288   BufferNode* head = Atomic::load(&_head);
 289   BufferNode* tail = _tail;
 290   Atomic::store(&_head, (BufferNode*)NULL);
 291   _tail = NULL;
 292   return HeadTail(head, tail);
 293 }
 294 
 295 G1DirtyCardQueueSet::PausedBuffers::PausedBuffers() : _plist(NULL) {}
 296 
 297 #ifdef ASSERT
 298 G1DirtyCardQueueSet::PausedBuffers::~PausedBuffers() {
 299   assert(Atomic::load(&_plist) == NULL, "invariant");
 300 }
 301 #endif // ASSERT
 302 
 303 void G1DirtyCardQueueSet::PausedBuffers::add(BufferNode* node) {
 304   assert_not_at_safepoint();
 305   PausedList* plist = Atomic::load_acquire(&_plist);
 306   if (plist == NULL) {
 307     // Try to install a new next list.
 308     plist = new PausedList();
 309     PausedList* old_plist = Atomic::cmpxchg(&_plist, (PausedList*)NULL, plist);
 310     if (old_plist != NULL) {
 311       // Some other thread installed a new next list.  Use it instead.
 312       delete plist;
 313       plist = old_plist;
 314     }
 315   }
 316   assert(plist->is_next(), "invariant");
 317   plist->add(node);
 318 }
 319 
 320 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_previous() {
 321   assert_not_at_safepoint();
 322   PausedList* previous;
 323   {
 324     // Deal with plist in a critical section, to prevent it from being
 325     // deleted out from under us by a concurrent take_previous().
 326     GlobalCounter::CriticalSection cs(Thread::current());
 327     previous = Atomic::load_acquire(&_plist);
 328     if ((previous == NULL) ||   // Nothing to take.
 329         previous->is_next() ||  // Not from a previous safepoint.
 330         // Some other thread stole it.
 331         (Atomic::cmpxchg(&_plist, previous, (PausedList*)NULL) != previous)) {
 332       return HeadTail();
 333     }
 334   }
 335   // We now own previous.
 336   HeadTail result = previous->take();
 337   // There might be other threads examining previous (in concurrent
 338   // take_previous()).  Synchronize to wait until any such threads are
 339   // done with such examination before deleting.
 340   GlobalCounter::write_synchronize();
 341   delete previous;
 342   return result;
 343 }
 344 
 345 G1DirtyCardQueueSet::HeadTail G1DirtyCardQueueSet::PausedBuffers::take_all() {
 346   assert_at_safepoint();
 347   HeadTail result;
 348   PausedList* plist = Atomic::load(&_plist);
 349   if (plist != NULL) {
 350     Atomic::store(&_plist, (PausedList*)NULL);
 351     result = plist->take();
 352     delete plist;
 353   }
 354   return result;
 355 }
 356 
 357 void G1DirtyCardQueueSet::record_paused_buffer(BufferNode* node) {
 358   assert_not_at_safepoint();
 359   assert(node->next() == NULL, "precondition");
 360   // Ensure there aren't any paused buffers from a previous safepoint.
 361   enqueue_previous_paused_buffers();
 362   // Cards for paused buffers are included in count, to contribute to
 363   // notification checking after the coming safepoint if it doesn't GC.
 364   // Note that this means the queue's _num_cards differs from the number
 365   // of cards in the queued buffers when there are paused buffers.
 366   Atomic::add(&_num_cards, buffer_size() - node->index());
 367   _paused.add(node);
 368 }
 369 
 370 void G1DirtyCardQueueSet::enqueue_paused_buffers_aux(const HeadTail& paused) {
 371   if (paused._head != NULL) {
 372     assert(paused._tail != NULL, "invariant");
 373     // Cards from paused buffers are already recorded in the queue count.
 374     _completed.append(*paused._head, *paused._tail);
 375   }
 376 }
 377 
 378 void G1DirtyCardQueueSet::enqueue_previous_paused_buffers() {
 379   assert_not_at_safepoint();
 380   enqueue_paused_buffers_aux(_paused.take_previous());
 381 }
 382 
 383 void G1DirtyCardQueueSet::enqueue_all_paused_buffers() {
 384   assert_at_safepoint();
 385   enqueue_paused_buffers_aux(_paused.take_all());
 386 }
 387 
 388 void G1DirtyCardQueueSet::abandon_completed_buffers() {
 389   enqueue_all_paused_buffers();
 390   verify_num_cards();
 391   G1BufferNodeList list = take_all_completed_buffers();
 392   BufferNode* buffers_to_delete = list._head;
 393   while (buffers_to_delete != NULL) {
 394     BufferNode* bn = buffers_to_delete;
 395     buffers_to_delete = bn->next();
 396     bn->set_next(NULL);
 397     deallocate_buffer(bn);
 398   }
 399 }
 400 
 401 void G1DirtyCardQueueSet::notify_if_necessary() {
 402   if ((_primary_refinement_thread != NULL) &&
 403       (num_cards() > process_cards_threshold())) {
 404     _primary_refinement_thread->activate();
 405   }
 406 }
 407 
 408 // Merge lists of buffers. The source queue set is emptied as a
 409 // result. The queue sets must share the same allocator.
 410 void G1DirtyCardQueueSet::merge_bufferlists(G1RedirtyCardsQueueSet* src) {
 411   assert(allocator() == src->allocator(), "precondition");
 412   const G1BufferNodeList from = src->take_all_completed_buffers();
 413   if (from._head != NULL) {
 414     Atomic::add(&_num_cards, from._entry_count);
 415     _completed.append(*from._head, *from._tail);
 416   }
 417 }
 418 
 419 G1BufferNodeList G1DirtyCardQueueSet::take_all_completed_buffers() {
 420   enqueue_all_paused_buffers();
 421   verify_num_cards();
 422   HeadTail buffers = _completed.take_all();
 423   size_t num_cards = Atomic::load(&_num_cards);
 424   Atomic::store(&_num_cards, size_t(0));
 425   return G1BufferNodeList(buffers._head, buffers._tail, num_cards);
 426 }
 427 
 428 class G1RefineBufferedCards : public StackObj {
 429   BufferNode* const _node;
 430   CardTable::CardValue** const _node_buffer;
 431   const size_t _node_buffer_size;
 432   const uint _worker_id;
 433   size_t* _total_refined_cards;
 434   G1RemSet* const _g1rs;
 435 
 436   static inline int compare_card(const CardTable::CardValue* p1,
 437                                  const CardTable::CardValue* p2) {
 438     return p2 - p1;
 439   }
 440 
 441   // Sorts the cards from start_index to _node_buffer_size in *decreasing*
 442   // address order. Tests showed that this order is preferable to not sorting
 443   // or increasing address order.
 444   void sort_cards(size_t start_index) {
 445     QuickSort::sort(&_node_buffer[start_index],
 446                     _node_buffer_size - start_index,
 447                     compare_card,
 448                     false);
 449   }
 450 
 451   // Returns the index to the first clean card in the buffer.
 452   size_t clean_cards() {
 453     const size_t start = _node->index();
 454     assert(start <= _node_buffer_size, "invariant");
 455 
 456     // Two-fingered compaction algorithm similar to the filtering mechanism in
 457     // SATBMarkQueue. The main difference is that clean_card_before_refine()
 458     // could change the buffer element in-place.
 459     // We don't check for SuspendibleThreadSet::should_yield(), because
 460     // cleaning and redirtying the cards is fast.
 461     CardTable::CardValue** src = &_node_buffer[start];
 462     CardTable::CardValue** dst = &_node_buffer[_node_buffer_size];
 463     assert(src <= dst, "invariant");
 464     for ( ; src < dst; ++src) {
 465       // Search low to high for a card to keep.
 466       if (_g1rs->clean_card_before_refine(src)) {
 467         // Found keeper.  Search high to low for a card to discard.
 468         while (src < --dst) {
 469           if (!_g1rs->clean_card_before_refine(dst)) {
 470             *dst = *src;         // Replace discard with keeper.
 471             break;
 472           }
 473         }
 474         // If discard search failed (src == dst), the outer loop will also end.
 475       }
 476     }
 477 
 478     // dst points to the first retained clean card, or the end of the buffer
 479     // if all the cards were discarded.
 480     const size_t first_clean = dst - _node_buffer;
 481     assert(first_clean >= start && first_clean <= _node_buffer_size, "invariant");
 482     // Discarded cards are considered as refined.
 483     *_total_refined_cards += first_clean - start;
 484     return first_clean;
 485   }
 486 
 487   bool refine_cleaned_cards(size_t start_index) {
 488     bool result = true;
 489     size_t i = start_index;
 490     for ( ; i < _node_buffer_size; ++i) {
 491       if (SuspendibleThreadSet::should_yield()) {
 492         redirty_unrefined_cards(i);
 493         result = false;
 494         break;
 495       }
 496       _g1rs->refine_card_concurrently(_node_buffer[i], _worker_id);
 497     }
 498     _node->set_index(i);
 499     *_total_refined_cards += i - start_index;
 500     return result;
 501   }
 502 
 503   void redirty_unrefined_cards(size_t start) {
 504     for ( ; start < _node_buffer_size; ++start) {
 505       *_node_buffer[start] = G1CardTable::dirty_card_val();
 506     }
 507   }
 508 
 509 public:
 510   G1RefineBufferedCards(BufferNode* node,
 511                         size_t node_buffer_size,
 512                         uint worker_id,
 513                         size_t* total_refined_cards) :
 514     _node(node),
 515     _node_buffer(reinterpret_cast<CardTable::CardValue**>(BufferNode::make_buffer_from_node(node))),
 516     _node_buffer_size(node_buffer_size),
 517     _worker_id(worker_id),
 518     _total_refined_cards(total_refined_cards),
 519     _g1rs(G1CollectedHeap::heap()->rem_set()) {}
 520 
 521   bool refine() {
 522     size_t first_clean_index = clean_cards();
 523     if (first_clean_index == _node_buffer_size) {
 524       _node->set_index(first_clean_index);
 525       return true;
 526     }
 527     // This fence serves two purposes. First, the cards must be cleaned
 528     // before processing the contents. Second, we can't proceed with
 529     // processing a region until after the read of the region's top in
 530     // collect_and_clean_cards(), for synchronization with possibly concurrent
 531     // humongous object allocation (see comment at the StoreStore fence before
 532     // setting the regions' tops in humongous allocation path).
 533     // It's okay that reading region's top and reading region's type were racy
 534     // wrto each other. We need both set, in any order, to proceed.
 535     OrderAccess::fence();
 536     sort_cards(first_clean_index);
 537     return refine_cleaned_cards(first_clean_index);
 538   }
 539 };
 540 
 541 bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
 542                                         uint worker_id,
 543                                         size_t* total_refined_cards) {
 544   G1RefineBufferedCards buffered_cards(node,
 545                                        buffer_size(),
 546                                        worker_id,
 547                                        total_refined_cards);
 548   return buffered_cards.refine();
 549 }
 550 
 551 #ifndef ASSERT
 552 #define assert_fully_consumed(node, buffer_size)
 553 #else
 554 #define assert_fully_consumed(node, buffer_size)                \
 555   do {                                                          \
 556     size_t _afc_index = (node)->index();                        \
 557     size_t _afc_size = (buffer_size);                           \
 558     assert(_afc_index == _afc_size,                             \
 559            "Buffer was not fully consumed as claimed: index: "  \
 560            SIZE_FORMAT ", size: " SIZE_FORMAT,                  \
 561             _afc_index, _afc_size);                             \
 562   } while (0)
 563 #endif // ASSERT
 564 
 565 bool G1DirtyCardQueueSet::process_or_enqueue_completed_buffer(BufferNode* node) {
 566   if (Thread::current()->is_Java_thread()) {
 567     // If the number of buffers exceeds the limit, make this Java
 568     // thread do the processing itself.  Calculation is racy but we
 569     // don't need precision here.  The add of padding could overflow,
 570     // which is treated as unlimited.
 571     size_t limit = max_cards() + max_cards_padding();
 572     if ((num_cards() > limit) && (limit >= max_cards())) {
 573       if (mut_process_buffer(node)) {
 574         return true;
 575       }
 576       // Buffer was incompletely processed because of a pending safepoint
 577       // request.  Unlike with refinement thread processing, for mutator
 578       // processing the buffer did not come from the completed buffer queue,
 579       // so it is okay to add it to the queue rather than to the paused set.
 580       // Indeed, it can't be added to the paused set because we didn't pass
 581       // through enqueue_previous_paused_buffers.
 582     }
 583   }
 584   enqueue_completed_buffer(node);
 585   return false;
 586 }
 587 
 588 bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
 589   uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id
 590   uint counter_index = worker_id - par_ids_start();
 591   size_t* counter = &_mutator_refined_cards_counters[counter_index];
 592   bool result = refine_buffer(node, worker_id, counter);
 593   _free_ids.release_par_id(worker_id); // release the id
 594 
 595   if (result) {
 596     assert_fully_consumed(node, buffer_size());
 597   }
 598   return result;
 599 }
 600 
 601 bool G1DirtyCardQueueSet::refine_completed_buffer_concurrently(uint worker_id,
 602                                                                size_t stop_at,
 603                                                                size_t* total_refined_cards) {
 604   BufferNode* node = get_completed_buffer(stop_at);
 605   if (node == NULL) {
 606     return false;
 607   } else if (refine_buffer(node, worker_id, total_refined_cards)) {
 608     assert_fully_consumed(node, buffer_size());
 609     // Done with fully processed buffer.
 610     deallocate_buffer(node);
 611     return true;
 612   } else {
 613     // Buffer incompletely processed because there is a pending safepoint.
 614     // Record partially processed buffer, to be finished later.
 615     record_paused_buffer(node);
 616     return true;
 617   }
 618 }
 619 
 620 void G1DirtyCardQueueSet::abandon_logs() {
 621   assert_at_safepoint();
 622   abandon_completed_buffers();
 623 
 624   // Since abandon is done only at safepoints, we can safely manipulate
 625   // these queues.
 626   struct AbandonThreadLogClosure : public ThreadClosure {
 627     virtual void do_thread(Thread* t) {
 628       G1ThreadLocalData::dirty_card_queue(t).reset();
 629     }
 630   } closure;
 631   Threads::threads_do(&closure);
 632 
 633   G1BarrierSet::shared_dirty_card_queue().reset();
 634 }
 635 
 636 void G1DirtyCardQueueSet::concatenate_logs() {
 637   // Iterate over all the threads, if we find a partial log add it to
 638   // the global list of logs.  Temporarily turn off the limit on the number
 639   // of outstanding buffers.
 640   assert_at_safepoint();
 641   size_t old_limit = max_cards();
 642   set_max_cards(MaxCardsUnlimited);
 643 
 644   struct ConcatenateThreadLogClosure : public ThreadClosure {
 645     virtual void do_thread(Thread* t) {
 646       G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(t);
 647       if (!dcq.is_empty()) {
 648         dcq.flush();
 649       }
 650     }
 651   } closure;
 652   Threads::threads_do(&closure);
 653 
 654   G1BarrierSet::shared_dirty_card_queue().flush();
 655   enqueue_all_paused_buffers();
 656   verify_num_cards();
 657   set_max_cards(old_limit);
 658 }