< prev index next >

src/share/vm/gc_implementation/g1/g1HotCardCache.cpp

Print this page
rev 7695 : 8069273: Reduce Hot Card Cache Lock contention
Reviewed-by: tschatzl
rev 7696 : [mq]: atomicadd

*** 34,47 **** void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { if (default_use_cache()) { _use_cache = true; ! _hot_cache_size = (1 << G1ConcRSLogCacheSize); _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC); - _n_hot = 0; _hot_cache_idx = 0; // For refining the cards in the hot cache in parallel _hot_cache_par_chunk_size = ClaimChunkSize; _hot_cache_par_claimed_idx = 0; --- 34,49 ---- void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { if (default_use_cache()) { _use_cache = true; ! _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC); + for (size_t i = 0; i < _hot_cache_size; i++) { + _hot_cache[i] = NULL; + } _hot_cache_idx = 0; // For refining the cards in the hot cache in parallel _hot_cache_par_chunk_size = ClaimChunkSize; _hot_cache_par_claimed_idx = 0;
*** 62,91 **** if (!_card_counts.is_hot(count)) { // The card is not hot so do not store it in the cache; // return it for immediate refining. return card_ptr; } - // Otherwise, the card is hot. ! jbyte* res = NULL; ! MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag); ! if (_n_hot == _hot_cache_size) { ! res = _hot_cache[_hot_cache_idx]; ! _n_hot--; ! } ! ! // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx. ! _hot_cache[_hot_cache_idx] = card_ptr; ! _hot_cache_idx++; ! ! if (_hot_cache_idx == _hot_cache_size) { ! // Wrap around ! _hot_cache_idx = 0; ! } ! _n_hot++; ! ! return res; } void G1HotCardCache::drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq) { --- 64,88 ---- if (!_card_counts.is_hot(count)) { // The card is not hot so do not store it in the cache; // return it for immediate refining. return card_ptr; } // Otherwise, the card is hot. ! size_t index = Atomic::add(1, &_hot_cache_idx) - 1; ! size_t masked_index = index & (_hot_cache_size - 1); ! jbyte* current_ptr = _hot_cache[masked_index]; ! ! // Try to store the new card pointer into the cache. Compare-and-swap to guard ! // against the unlikely event of a race resulting in another card pointer to ! // have already been written to the cache. In this case we will return ! // card_ptr in favor of the other option, which would be starting over. This ! // should be OK since card_ptr will likely be the older card already when/if ! // this ever happens. ! jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr, ! &_hot_cache[masked_index], ! current_ptr); ! return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } void G1HotCardCache::drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq) {
*** 94,113 **** return; } assert(_hot_cache != NULL, "Logic"); assert(!use_cache(), "cache should be disabled"); - int start_idx; - - while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once - int end_idx = start_idx + _hot_cache_par_chunk_size; ! if (start_idx == ! Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) { // The current worker has successfully claimed the chunk [start_idx..end_idx) ! end_idx = MIN2(end_idx, _n_hot); ! for (int i = start_idx; i < end_idx; i++) { jbyte* card_ptr = _hot_cache[i]; if (card_ptr != NULL) { if (g1rs->refine_card(card_ptr, worker_i, true)) { // The part of the heap spanned by the card contains references // that point into the current collection set. --- 91,108 ---- return; } assert(_hot_cache != NULL, "Logic"); assert(!use_cache(), "cache should be disabled"); ! while (_hot_cache_par_claimed_idx < _hot_cache_size) { ! size_t end_idx = Atomic::add(_hot_cache_par_chunk_size, ! &_hot_cache_par_claimed_idx); ! size_t start_idx = end_idx - _hot_cache_par_chunk_size; // The current worker has successfully claimed the chunk [start_idx..end_idx) ! end_idx = MIN2(end_idx, _hot_cache_size); ! for (size_t i = start_idx; i < end_idx; i++) { jbyte* card_ptr = _hot_cache[i]; if (card_ptr != NULL) { if (g1rs->refine_card(card_ptr, worker_i, true)) { // The part of the heap spanned by the card contains references // that point into the current collection set.
*** 122,135 **** assert(worker_i < ParallelGCThreads, err_msg("incorrect worker id: %u", worker_i)); into_cset_dcq->enqueue(card_ptr); } } } } ! } // The existing entries in the hot card cache, which were just refined // above, are discarded prior to re-enabling the cache near the end of the GC. } void G1HotCardCache::reset_card_counts(HeapRegion* hr) { --- 117,132 ---- assert(worker_i < ParallelGCThreads, err_msg("incorrect worker id: %u", worker_i)); into_cset_dcq->enqueue(card_ptr); } + } else { + break; } } } ! // The existing entries in the hot card cache, which were just refined // above, are discarded prior to re-enabling the cache near the end of the GC. } void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
< prev index next >