< prev index next >

src/hotspot/share/gc/g1/g1HotCardCache.cpp

Print this page

        

*** 66,100 **** // The card is not hot so do not store it in the cache; // return it for immediate refining. return card_ptr; } // Otherwise, the card is hot. ! size_t index = Atomic::add(1u, &_hot_cache_idx) - 1; size_t masked_index = index & (_hot_cache_size - 1); CardValue* current_ptr = _hot_cache[masked_index]; // Try to store the new card pointer into the cache. Compare-and-swap to guard // against the unlikely event of a race resulting in another card pointer to // have already been written to the cache. In this case we will return // card_ptr in favor of the other option, which would be starting over. This // should be OK since card_ptr will likely be the older card already when/if // this ever happens. ! CardValue* previous_ptr = Atomic::cmpxchg(card_ptr, ! &_hot_cache[masked_index], ! current_ptr); return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) { assert(default_use_cache(), "Drain only necessary if we use the hot card cache."); assert(_hot_cache != NULL, "Logic"); assert(!use_cache(), "cache should be disabled"); while (_hot_cache_par_claimed_idx < _hot_cache_size) { ! size_t end_idx = Atomic::add(_hot_cache_par_chunk_size, ! &_hot_cache_par_claimed_idx); size_t start_idx = end_idx - _hot_cache_par_chunk_size; // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _hot_cache_size); for (size_t i = start_idx; i < end_idx; i++) { CardValue* card_ptr = _hot_cache[i]; --- 66,100 ---- // The card is not hot so do not store it in the cache; // return it for immediate refining. return card_ptr; } // Otherwise, the card is hot. ! size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1; size_t masked_index = index & (_hot_cache_size - 1); CardValue* current_ptr = _hot_cache[masked_index]; // Try to store the new card pointer into the cache. Compare-and-swap to guard // against the unlikely event of a race resulting in another card pointer to // have already been written to the cache. In this case we will return // card_ptr in favor of the other option, which would be starting over. This // should be OK since card_ptr will likely be the older card already when/if // this ever happens. ! CardValue* previous_ptr = Atomic::cmpxchg(&_hot_cache[masked_index], ! current_ptr, ! card_ptr); return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) { assert(default_use_cache(), "Drain only necessary if we use the hot card cache."); assert(_hot_cache != NULL, "Logic"); assert(!use_cache(), "cache should be disabled"); while (_hot_cache_par_claimed_idx < _hot_cache_size) { ! size_t end_idx = Atomic::add(&_hot_cache_par_claimed_idx, ! _hot_cache_par_chunk_size); size_t start_idx = end_idx - _hot_cache_par_chunk_size; // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _hot_cache_size); for (size_t i = start_idx; i < end_idx; i++) { CardValue* card_ptr = _hot_cache[i];
< prev index next >