< prev index next >

src/share/vm/gc/g1/g1HotCardCache.cpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile

*** 34,44 **** void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { if (default_use_cache()) { _use_cache = true; _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; ! _hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size); reset_hot_cache_internal(); // For refining the cards in the hot cache in parallel _hot_cache_par_chunk_size = ClaimChunkSize; --- 34,44 ---- void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { if (default_use_cache()) { _use_cache = true; _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; ! _hot_cache = ArrayAllocator<volatile jbyte*, mtGC>::allocate(_hot_cache_size); reset_hot_cache_internal(); // For refining the cards in the hot cache in parallel _hot_cache_par_chunk_size = ClaimChunkSize;
*** 49,84 **** } G1HotCardCache::~G1HotCardCache() { if (default_use_cache()) { assert(_hot_cache != NULL, "Logic"); ! ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size); _hot_cache = NULL; } } ! jbyte* G1HotCardCache::insert(jbyte* card_ptr) { uint count = _card_counts.add_card_count(card_ptr); if (!_card_counts.is_hot(count)) { // The card is not hot so do not store it in the cache; // return it for immediate refining. return card_ptr; } // Otherwise, the card is hot. size_t index = Atomic::add(1, &_hot_cache_idx) - 1; size_t masked_index = index & (_hot_cache_size - 1); ! jbyte* current_ptr = _hot_cache[masked_index]; // Try to store the new card pointer into the cache. Compare-and-swap to guard // against the unlikely event of a race resulting in another card pointer to // have already been written to the cache. In this case we will return // card_ptr in favor of the other option, which would be starting over. This // should be OK since card_ptr will likely be the older card already when/if // this ever happens. ! jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr, &_hot_cache[masked_index], ! current_ptr); return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) { assert(default_use_cache(), "Drain only necessary if we use the hot card cache."); --- 49,84 ---- } G1HotCardCache::~G1HotCardCache() { if (default_use_cache()) { assert(_hot_cache != NULL, "Logic"); ! ArrayAllocator<volatile jbyte*, mtGC>::free(_hot_cache, _hot_cache_size); _hot_cache = NULL; } } ! volatile jbyte* G1HotCardCache::insert(volatile jbyte* card_ptr) { uint count = _card_counts.add_card_count(card_ptr); if (!_card_counts.is_hot(count)) { // The card is not hot so do not store it in the cache; // return it for immediate refining. return card_ptr; } // Otherwise, the card is hot. size_t index = Atomic::add(1, &_hot_cache_idx) - 1; size_t masked_index = index & (_hot_cache_size - 1); ! volatile jbyte* current_ptr = _hot_cache[masked_index]; // Try to store the new card pointer into the cache. Compare-and-swap to guard // against the unlikely event of a race resulting in another card pointer to // have already been written to the cache. In this case we will return // card_ptr in favor of the other option, which would be starting over. This // should be OK since card_ptr will likely be the older card already when/if // this ever happens. ! volatile jbyte* previous_ptr = (volatile jbyte*)Atomic::cmpxchg_ptr((jbyte*)card_ptr, &_hot_cache[masked_index], ! (jbyte*)current_ptr); return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; } void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) { assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
*** 91,101 **** &_hot_cache_par_claimed_idx); size_t start_idx = end_idx - _hot_cache_par_chunk_size; // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _hot_cache_size); for (size_t i = start_idx; i < end_idx; i++) { ! jbyte* card_ptr = _hot_cache[i]; if (card_ptr != NULL) { bool result = cl->do_card_ptr(card_ptr, worker_i); assert(result, "Closure should always return true"); } else { break; --- 91,101 ---- &_hot_cache_par_claimed_idx); size_t start_idx = end_idx - _hot_cache_par_chunk_size; // The current worker has successfully claimed the chunk [start_idx..end_idx) end_idx = MIN2(end_idx, _hot_cache_size); for (size_t i = start_idx; i < end_idx; i++) { ! volatile jbyte* card_ptr = _hot_cache[i]; if (card_ptr != NULL) { bool result = cl->do_card_ptr(card_ptr, worker_i); assert(result, "Closure should always return true"); } else { break;
< prev index next >