< prev index next >

src/hotspot/share/gc/g1/g1HotCardCache.cpp

Print this page




  51     _card_counts.initialize(card_counts_storage);
  52   }
  53 }
  54 
  55 G1HotCardCache::~G1HotCardCache() {
  56   if (default_use_cache()) {
  57     assert(_hot_cache != NULL, "Logic");
  58     ArrayAllocator<CardValue*>::free(_hot_cache, _hot_cache_size);
  59     _hot_cache = NULL;
  60   }
  61 }
  62 
  63 CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
  64   uint count = _card_counts.add_card_count(card_ptr);
  65   if (!_card_counts.is_hot(count)) {
  66     // The card is not hot so do not store it in the cache;
  67     // return it for immediate refining.
  68     return card_ptr;
  69   }
  70   // Otherwise, the card is hot.
  71   size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
  72   size_t masked_index = index & (_hot_cache_size - 1);
  73   CardValue* current_ptr = _hot_cache[masked_index];
  74 
  75   // Try to store the new card pointer into the cache. Compare-and-swap to guard
  76   // against the unlikely event of a race resulting in another card pointer to
  77   // have already been written to the cache. In this case we will return
  78   // card_ptr in favor of the other option, which would be starting over. This
  79   // should be OK since card_ptr will likely be the older card already when/if
  80   // this ever happens.
  81   CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
  82                                             &_hot_cache[masked_index],
  83                                             current_ptr);
  84   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
  85 }
  86 
  87 void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) {
  88   assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
  89 
  90   assert(_hot_cache != NULL, "Logic");
  91   assert(!use_cache(), "cache should be disabled");
  92 
  93   while (_hot_cache_par_claimed_idx < _hot_cache_size) {
  94     size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
  95                                  &_hot_cache_par_claimed_idx);
  96     size_t start_idx = end_idx - _hot_cache_par_chunk_size;
  97     // The current worker has successfully claimed the chunk [start_idx..end_idx)
  98     end_idx = MIN2(end_idx, _hot_cache_size);
  99     for (size_t i = start_idx; i < end_idx; i++) {
 100       CardValue* card_ptr = _hot_cache[i];
 101       if (card_ptr != NULL) {
 102         cl->do_card_ptr(card_ptr, worker_id);
 103       } else {
 104         break;
 105       }
 106     }
 107   }
 108 
 109   // The existing entries in the hot card cache, which were just refined
 110   // above, are discarded prior to re-enabling the cache near the end of the GC.
 111 }
 112 
 113 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
 114   _card_counts.clear_region(hr);
 115 }


  51     _card_counts.initialize(card_counts_storage);
  52   }
  53 }
  54 
  55 G1HotCardCache::~G1HotCardCache() {
  56   if (default_use_cache()) {
  57     assert(_hot_cache != NULL, "Logic");
  58     ArrayAllocator<CardValue*>::free(_hot_cache, _hot_cache_size);
  59     _hot_cache = NULL;
  60   }
  61 }
  62 
  63 CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
  64   uint count = _card_counts.add_card_count(card_ptr);
  65   if (!_card_counts.is_hot(count)) {
  66     // The card is not hot so do not store it in the cache;
  67     // return it for immediate refining.
  68     return card_ptr;
  69   }
  70   // Otherwise, the card is hot.
  71   size_t index = Atomic::add(&_hot_cache_idx, 1u) - 1;
  72   size_t masked_index = index & (_hot_cache_size - 1);
  73   CardValue* current_ptr = _hot_cache[masked_index];
  74 
  75   // Try to store the new card pointer into the cache. Compare-and-swap to guard
  76   // against the unlikely event of a race resulting in another card pointer to
  77   // have already been written to the cache. In this case we will return
  78   // card_ptr in favor of the other option, which would be starting over. This
  79   // should be OK since card_ptr will likely be the older card already when/if
  80   // this ever happens.
  81   CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
  82                                             &_hot_cache[masked_index],
  83                                             current_ptr);
  84   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
  85 }
  86 
  87 void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) {
  88   assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
  89 
  90   assert(_hot_cache != NULL, "Logic");
  91   assert(!use_cache(), "cache should be disabled");
  92 
  93   while (_hot_cache_par_claimed_idx < _hot_cache_size) {
  94     size_t end_idx = Atomic::add(&_hot_cache_par_claimed_idx,
  95                                  _hot_cache_par_chunk_size);
  96     size_t start_idx = end_idx - _hot_cache_par_chunk_size;
  97     // The current worker has successfully claimed the chunk [start_idx..end_idx)
  98     end_idx = MIN2(end_idx, _hot_cache_size);
  99     for (size_t i = start_idx; i < end_idx; i++) {
 100       CardValue* card_ptr = _hot_cache[i];
 101       if (card_ptr != NULL) {
 102         cl->do_card_ptr(card_ptr, worker_id);
 103       } else {
 104         break;
 105       }
 106     }
 107   }
 108 
 109   // The existing entries in the hot card cache, which were just refined
 110   // above, are discarded prior to re-enabling the cache near the end of the GC.
 111 }
 112 
 113 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
 114   _card_counts.clear_region(hr);
 115 }
< prev index next >