< prev index next >
src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp
Print this page
*** 226,333 ****
return result;
}
class G1RefineBufferedCards : public StackObj {
BufferNode* const _node;
! void** const _node_buffer;
const size_t _node_buffer_size;
size_t* _total_refined_cards;
- CardTable::CardValue** const _cards;
G1RemSet* const _g1rs;
static inline int compare_card(const void* p1,
const void* p2) {
! return *(CardTable::CardValue**)p1 - *(CardTable::CardValue**)p2;
}
! void sort_cards(size_t n) {
! qsort(_cards, n, sizeof(CardTable::CardValue*), compare_card);
! }
!
! size_t collect_and_clean_cards() {
! size_t i = _node->index();
! size_t num_collected = 0;
! assert(i <= _node_buffer_size, "invariant");
// We don't check for SuspendibleThreadSet::should_yield(), because
! // collecting, cleaning and abandoning the cards is fast.
! for ( ; i < _node_buffer_size; ++i) {
! CardTable::CardValue* cp = static_cast<CardTable::CardValue*>(_node_buffer[i]);
! if (_g1rs->clean_card_before_refine(cp)) {
! _cards[num_collected] = cp;
! num_collected++;
! } else {
! // Skipped cards are considered as refined.
! *_total_refined_cards += 1;
}
}
! _node->set_index(i);
! return num_collected;
}
! bool refine_collected_cards(uint worker_id, size_t num_collected) {
! while (num_collected > 0) {
if (SuspendibleThreadSet::should_yield()) {
! abandon_cards(num_collected);
return false;
}
! num_collected--;
*_total_refined_cards += 1;
- _g1rs->refine_card_concurrently(_cards[num_collected], worker_id);
}
return true;
}
! void abandon_cards(size_t num_collected) {
! assert(num_collected <= _node_buffer_size, "sanity");
! for (size_t i = 0; i < num_collected; ++i) {
! *_cards[i] = G1CardTable::dirty_card_val();
! }
! size_t buffer_index = _node_buffer_size - num_collected;
! memcpy(_node_buffer + buffer_index, _cards, num_collected * sizeof(CardTable::CardValue*));
! _node->set_index(buffer_index);
}
public:
G1RefineBufferedCards(BufferNode* node,
size_t node_buffer_size,
size_t* total_refined_cards) :
_node(node),
! _node_buffer(BufferNode::make_buffer_from_node(node)),
_node_buffer_size(node_buffer_size),
_total_refined_cards(total_refined_cards),
! _cards(NEW_RESOURCE_ARRAY(CardTable::CardValue*,
! node_buffer_size)),
! _g1rs(G1CollectedHeap::heap()->rem_set()) {}
!
! ~G1RefineBufferedCards() {
! FREE_RESOURCE_ARRAY(CardTable::CardValue*, _cards, _node_buffer_size);
! }
!
! // Refine the cards in the BufferNode "_node" from its index to buffer_size.
! // Stops processing if SuspendibleThreadSet::should_yield() is true.
! // Returns true if the entire buffer was processed, false if there
! // is a pending yield request. The node's index is updated to exclude
! // the processed elements, e.g. up to the element before processing
! // stopped, or one past the last element if the entire buffer was
! // processed. Increments *_total_refined_cards by the number of cards
! // processed and removed from the buffer.
bool refine(uint worker_id) {
! size_t n = collect_and_clean_cards();
// This fence serves two purposes. First, the cards must be cleaned
// before processing the contents. Second, we can't proceed with
// processing a region until after the read of the region's top in
// collect_and_clean_cards(), for synchronization with possibly concurrent
// humongous object allocation (see comment at the StoreStore fence before
// setting the regions' tops in humongous allocation path).
// It's okay that reading region's top and reading region's type were racy
// wrto each other. We need both set, in any order, to proceed.
OrderAccess::fence();
! sort_cards(n);
! return refine_collected_cards(worker_id, n);
}
};
#ifndef ASSERT
#define assert_fully_consumed(node, buffer_size)
#else
#define assert_fully_consumed(node, buffer_size) \
do { \
--- 226,334 ----
return result;
}
class G1RefineBufferedCards : public StackObj {
BufferNode* const _node;
! CardTable::CardValue** const _node_buffer;
const size_t _node_buffer_size;
size_t* _total_refined_cards;
G1RemSet* const _g1rs;
+ DEBUG_ONLY(KVHashtable<CardTable::CardValue* COMMA HeapWord* COMMA mtGC> _card_top_map;)
static inline int compare_card(const void* p1,
const void* p2) {
! return *(CardTable::CardValue**)p2 - *(CardTable::CardValue**)p1;
}
! // Sorts the cards from start_index to _node_buffer_size in *decreasing*
! // address order. This order improves performance of processing the cards
! // later starting from start_index.
! void sort_cards(size_t start_index) {
! qsort(_node_buffer + start_index,
! _node_buffer_size - start_index,
! sizeof(CardTable::CardValue*),
! compare_card);
! }
!
! // Returns the index to the first clean card in the buffer.
! size_t clean_cards() {
! const size_t start = _node->index();
! assert(start <= _node_buffer_size, "invariant");
! size_t first_clean = _node_buffer_size;
// We don't check for SuspendibleThreadSet::should_yield(), because
! // cleaning and redirtying the cards is fast.
! for (int i = _node_buffer_size - 1; i >= static_cast<int>(start); --i) {
! CardTable::CardValue* cp = _node_buffer[i];
! if (_g1rs->clean_card_before_refine(cp
! DEBUG_ONLY(COMMA _card_top_map))) {
! first_clean--;
! _node_buffer[first_clean] = cp;
}
}
! assert(first_clean >= start && first_clean <= _node_buffer_size, "invariant");
! // Skipped cards are considered as refined.
! *_total_refined_cards += first_clean - start;
! return first_clean;
}
! bool refine_cleaned_cards(uint worker_id, size_t start_index) {
! for (size_t i = start_index; i < _node_buffer_size; ++i) {
if (SuspendibleThreadSet::should_yield()) {
! redirty_unrefined_cards(i);
! _node->set_index(i);
return false;
}
! _g1rs->refine_card_concurrently(_node_buffer[i], worker_id
! DEBUG_ONLY(COMMA _card_top_map));
*_total_refined_cards += 1;
}
+ _node->set_index(_node_buffer_size);
return true;
}
! void redirty_unrefined_cards(size_t start) {
! for ( ; start < _node_buffer_size; ++start) {
! *_node_buffer[start] = G1CardTable::dirty_card_val();
! }
}
public:
G1RefineBufferedCards(BufferNode* node,
size_t node_buffer_size,
size_t* total_refined_cards) :
_node(node),
! _node_buffer(reinterpret_cast<CardTable::CardValue**>(BufferNode::make_buffer_from_node(node))),
_node_buffer_size(node_buffer_size),
_total_refined_cards(total_refined_cards),
! _g1rs(G1CollectedHeap::heap()->rem_set())
! DEBUG_ONLY(COMMA _card_top_map(node_buffer_size)) {}
!
bool refine(uint worker_id) {
! size_t first_clean_index = clean_cards();
// This fence serves two purposes. First, the cards must be cleaned
// before processing the contents. Second, we can't proceed with
// processing a region until after the read of the region's top in
// collect_and_clean_cards(), for synchronization with possibly concurrent
// humongous object allocation (see comment at the StoreStore fence before
// setting the regions' tops in humongous allocation path).
// It's okay that reading region's top and reading region's type were racy
// wrto each other. We need both set, in any order, to proceed.
OrderAccess::fence();
! sort_cards(first_clean_index);
! return refine_cleaned_cards(worker_id, first_clean_index);
}
};
+ bool G1DirtyCardQueueSet::refine_buffer(BufferNode* node,
+ uint worker_id,
+ size_t* total_refined_cards) {
+ G1RefineBufferedCards buffered_cards(node,
+ buffer_size(),
+ total_refined_cards);
+ return buffered_cards.refine(worker_id);
+ }
+
#ifndef ASSERT
#define assert_fully_consumed(node, buffer_size)
#else
#define assert_fully_consumed(node, buffer_size) \
do { \
*** 359,373 ****
bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id
uint counter_index = worker_id - par_ids_start();
size_t* counter = &_mutator_refined_cards_counters[counter_index];
! ResourceMark rm;
! G1RefineBufferedCards buffered_cards(node,
! buffer_size(),
! counter);
! bool result = buffered_cards.refine(worker_id);
_free_ids.release_par_id(worker_id); // release the id
if (result) {
assert_fully_consumed(node, buffer_size());
}
--- 360,370 ----
bool G1DirtyCardQueueSet::mut_process_buffer(BufferNode* node) {
uint worker_id = _free_ids.claim_par_id(); // temporarily claim an id
uint counter_index = worker_id - par_ids_start();
size_t* counter = &_mutator_refined_cards_counters[counter_index];
! bool result = refine_buffer(node, worker_id, counter);
_free_ids.release_par_id(worker_id); // release the id
if (result) {
assert_fully_consumed(node, buffer_size());
}
*** 378,402 ****
size_t stop_at,
size_t* total_refined_cards) {
BufferNode* node = get_completed_buffer(stop_at);
if (node == NULL) {
return false;
! } else {
! G1RefineBufferedCards buffered_cards(node,
! buffer_size(),
! total_refined_cards);
! if (buffered_cards.refine(worker_id)) {
assert_fully_consumed(node, buffer_size());
// Done with fully processed buffer.
deallocate_buffer(node);
return true;
} else {
// Return partially processed buffer to the queue.
enqueue_completed_buffer(node);
return true;
}
- }
}
void G1DirtyCardQueueSet::abandon_logs() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
abandon_completed_buffers();
--- 375,394 ----
size_t stop_at,
size_t* total_refined_cards) {
BufferNode* node = get_completed_buffer(stop_at);
if (node == NULL) {
return false;
! } else if (refine_buffer(node, worker_id, total_refined_cards)) {
assert_fully_consumed(node, buffer_size());
// Done with fully processed buffer.
deallocate_buffer(node);
return true;
} else {
// Return partially processed buffer to the queue.
enqueue_completed_buffer(node);
return true;
}
}
void G1DirtyCardQueueSet::abandon_logs() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
abandon_completed_buffers();
< prev index next >