< prev index next >
src/share/vm/gc_implementation/g1/g1HotCardCache.cpp
Print this page
rev 7695 : 8069273: Reduce Hot Card Cache Lock contention
Reviewed-by: tschatzl
@@ -34,14 +34,16 @@
void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
if (default_use_cache()) {
_use_cache = true;
- _hot_cache_size = (1 << G1ConcRSLogCacheSize);
+ _hot_cache_size = (intptr_t)1 << G1ConcRSLogCacheSize;
_hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
+ for (intptr_t i = 0; i < _hot_cache_size; i++) {
+ _hot_cache[i] = NULL;
+ }
- _n_hot = 0;
_hot_cache_idx = 0;
// For refining the cards in the hot cache in parallel
_hot_cache_par_chunk_size = ClaimChunkSize;
_hot_cache_par_claimed_idx = 0;
@@ -62,30 +64,33 @@
if (!_card_counts.is_hot(count)) {
// The card is not hot so do not store it in the cache;
// return it for immediate refining.
return card_ptr;
}
-
// Otherwise, the card is hot.
- jbyte* res = NULL;
- MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
- if (_n_hot == _hot_cache_size) {
- res = _hot_cache[_hot_cache_idx];
- _n_hot--;
- }
-
- // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
- _hot_cache[_hot_cache_idx] = card_ptr;
- _hot_cache_idx++;
-
- if (_hot_cache_idx == _hot_cache_size) {
- // Wrap around
- _hot_cache_idx = 0;
- }
- _n_hot++;
-
- return res;
+ jbyte* current_ptr;
+ intptr_t masked_index;
+ intptr_t index;
+ do {
+ index = _hot_cache_idx;
+ masked_index = index & (_hot_cache_size - 1);
+ current_ptr = _hot_cache[masked_index];
+ if (current_ptr == card_ptr) {
+ return NULL;
+ }
+ } while (index != Atomic::cmpxchg_ptr(index + 1, &_hot_cache_idx, index));
+
+ // Try to store the new card pointer into the cache. Compare-and-swap to guard
+ // against the unlikely event of a race resulting in another card pointer to
+ // have already been written to the cache. In this case we will return
+ // card_ptr in favor of the other option, which would be starting over. This
+ // should be OK since card_ptr will likely be the older card already when/if
+ // this ever happens.
+ jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
+ &_hot_cache[masked_index],
+ current_ptr);
+ return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
}
void G1HotCardCache::drain(uint worker_i,
G1RemSet* g1rs,
DirtyCardQueue* into_cset_dcq) {
@@ -94,20 +99,20 @@
return;
}
assert(_hot_cache != NULL, "Logic");
assert(!use_cache(), "cache should be disabled");
- int start_idx;
+ intptr_t start_idx;
- while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
- int end_idx = start_idx + _hot_cache_par_chunk_size;
+ while ((start_idx = _hot_cache_par_claimed_idx) < _hot_cache_size) { // read once
+ intptr_t end_idx = start_idx + _hot_cache_par_chunk_size;
if (start_idx ==
- Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
+ Atomic::cmpxchg_ptr(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
// The current worker has successfully claimed the chunk [start_idx..end_idx)
- end_idx = MIN2(end_idx, _n_hot);
- for (int i = start_idx; i < end_idx; i++) {
+ end_idx = MIN2(end_idx, _hot_cache_size);
+ for (intptr_t i = start_idx; i < end_idx; i++) {
jbyte* card_ptr = _hot_cache[i];
if (card_ptr != NULL) {
if (g1rs->refine_card(card_ptr, worker_i, true)) {
// The part of the heap spanned by the card contains references
// that point into the current collection set.
@@ -122,10 +127,12 @@
assert(worker_i < ParallelGCThreads,
err_msg("incorrect worker id: %u", worker_i));
into_cset_dcq->enqueue(card_ptr);
}
+ } else {
+ break;
}
}
}
}
// The existing entries in the hot card cache, which were just refined
< prev index next >