src/share/vm/gc_implementation/g1/g1HotCardCache.cpp

Print this page




  27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc_implementation/g1/g1HotCardCache.hpp"
  29 #include "gc_implementation/g1/g1RemSet.hpp"
  30 #include "gc_implementation/g1/heapRegion.hpp"
  31 #include "runtime/atomic.hpp"
  32 
  33 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
  34   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
  35 
  36 void G1HotCardCache::initialize() {
  37   if (default_use_cache()) {
  38     _use_cache = true;
  39 
  40     _hot_cache_size = (1 << G1ConcRSLogCacheSize);
  41     _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
  42 
  43     _n_hot = 0;
  44     _hot_cache_idx = 0;
  45 
  46     // For refining the cards in the hot cache in parallel
  47     int n_workers = (ParallelGCThreads > 0 ?
  48                         _g1h->workers()->total_workers() : 1);
  49     _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
  50     _hot_cache_par_claimed_idx = 0;
  51 
  52     _card_counts.initialize();
  53   }
  54 }
  55 
  56 G1HotCardCache::~G1HotCardCache() {
  57   if (default_use_cache()) {
  58     assert(_hot_cache != NULL, "Logic");
  59     FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
  60   }
  61 }
  62 
  63 jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
  64   uint count = _card_counts.add_card_count(card_ptr);
  65   if (!_card_counts.is_hot(count)) {
  66     // The card is not hot so do not store it in the cache;
  67     // return it for immediate refining.
  68     return card_ptr;
  69   }


  72   jbyte* res = NULL;
  73   MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
  74   if (_n_hot == _hot_cache_size) {
  75     res = _hot_cache[_hot_cache_idx];
  76     _n_hot--;
  77   }
  78 
  79   // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
  80   _hot_cache[_hot_cache_idx] = card_ptr;
  81   _hot_cache_idx++;
  82 
  83   if (_hot_cache_idx == _hot_cache_size) {
  84     // Wrap around
  85     _hot_cache_idx = 0;
  86   }
  87   _n_hot++;
  88 
  89   return res;
  90 }
  91 
  92 void G1HotCardCache::drain(int worker_i,
  93                            G1RemSet* g1rs,
  94                            DirtyCardQueue* into_cset_dcq) {
  95   if (!default_use_cache()) {
  96     assert(_hot_cache == NULL, "Logic");
  97     return;
  98   }
  99 
 100   assert(_hot_cache != NULL, "Logic");
 101   assert(!use_cache(), "cache should be disabled");
 102   int start_idx;
 103 
 104   while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
 105     int end_idx = start_idx + _hot_cache_par_chunk_size;
 106 
 107     if (start_idx ==
 108         Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
 109       // The current worker has successfully claimed the chunk [start_idx..end_idx)
 110       end_idx = MIN2(end_idx, _n_hot);
 111       for (int i = start_idx; i < end_idx; i++) {
 112         jbyte* card_ptr = _hot_cache[i];
 113         if (card_ptr != NULL) {
 114           if (g1rs->refine_card(card_ptr, worker_i, true)) {
 115             // The part of the heap spanned by the card contains references
 116             // that point into the current collection set.
 117             // We need to record the card pointer in the DirtyCardQueueSet
 118             // that we use for such cards.
 119             //
 120             // The only time we care about recording cards that contain
 121             // references that point into the collection set is during
 122             // RSet updating while within an evacuation pause.
 123             // In this case worker_i should be the id of a GC worker thread
 124             assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
 125             assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
 126                    err_msg("incorrect worker id: "INT32_FORMAT, worker_i));
 127 
 128             into_cset_dcq->enqueue(card_ptr);
 129           }
 130         }
 131       }
 132     }
 133   }
 134   // The existing entries in the hot card cache, which were just refined
 135   // above, are discarded prior to re-enabling the cache near the end of the GC.
 136 }
 137 
 138 void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
 139   _card_counts.resize(heap_capacity);
 140 }
 141 
 142 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
 143   _card_counts.clear_region(hr);
 144 }
 145 
 146 void G1HotCardCache::reset_card_counts() {


  27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  28 #include "gc_implementation/g1/g1HotCardCache.hpp"
  29 #include "gc_implementation/g1/g1RemSet.hpp"
  30 #include "gc_implementation/g1/heapRegion.hpp"
  31 #include "runtime/atomic.hpp"
  32 
  33 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
  34   _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
  35 
  36 void G1HotCardCache::initialize() {
  37   if (default_use_cache()) {
  38     _use_cache = true;
  39 
  40     _hot_cache_size = (1 << G1ConcRSLogCacheSize);
  41     _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
  42 
  43     _n_hot = 0;
  44     _hot_cache_idx = 0;
  45 
  46     // For refining the cards in the hot cache in parallel
  47     uint n_workers = (ParallelGCThreads > 0 ?
  48                         _g1h->workers()->total_workers() : 1);
  49     _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / (int)n_workers);
  50     _hot_cache_par_claimed_idx = 0;
  51 
  52     _card_counts.initialize();
  53   }
  54 }
  55 
  56 G1HotCardCache::~G1HotCardCache() {
  57   if (default_use_cache()) {
  58     assert(_hot_cache != NULL, "Logic");
  59     FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
  60   }
  61 }
  62 
  63 jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
  64   uint count = _card_counts.add_card_count(card_ptr);
  65   if (!_card_counts.is_hot(count)) {
  66     // The card is not hot so do not store it in the cache;
  67     // return it for immediate refining.
  68     return card_ptr;
  69   }


  72   jbyte* res = NULL;
  73   MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
  74   if (_n_hot == _hot_cache_size) {
  75     res = _hot_cache[_hot_cache_idx];
  76     _n_hot--;
  77   }
  78 
  79   // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
  80   _hot_cache[_hot_cache_idx] = card_ptr;
  81   _hot_cache_idx++;
  82 
  83   if (_hot_cache_idx == _hot_cache_size) {
  84     // Wrap around
  85     _hot_cache_idx = 0;
  86   }
  87   _n_hot++;
  88 
  89   return res;
  90 }
  91 
  92 void G1HotCardCache::drain(uint worker_i,
  93                            G1RemSet* g1rs,
  94                            DirtyCardQueue* into_cset_dcq) {
  95   if (!default_use_cache()) {
  96     assert(_hot_cache == NULL, "Logic");
  97     return;
  98   }
  99 
 100   assert(_hot_cache != NULL, "Logic");
 101   assert(!use_cache(), "cache should be disabled");
 102   int start_idx;
 103 
 104   while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
 105     int end_idx = start_idx + _hot_cache_par_chunk_size;
 106 
 107     if (start_idx ==
 108         Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
 109       // The current worker has successfully claimed the chunk [start_idx..end_idx)
 110       end_idx = MIN2(end_idx, _n_hot);
 111       for (int i = start_idx; i < end_idx; i++) {
 112         jbyte* card_ptr = _hot_cache[i];
 113         if (card_ptr != NULL) {
 114           if (g1rs->refine_card(card_ptr, worker_i, true)) {
 115             // The part of the heap spanned by the card contains references
 116             // that point into the current collection set.
 117             // We need to record the card pointer in the DirtyCardQueueSet
 118             // that we use for such cards.
 119             //
 120             // The only time we care about recording cards that contain
 121             // references that point into the collection set is during
 122             // RSet updating while within an evacuation pause.
 123             // In this case worker_i should be the id of a GC worker thread
 124             assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
 125             assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
 126                    err_msg("incorrect worker id: "UINT32_FORMAT, worker_i));
 127 
 128             into_cset_dcq->enqueue(card_ptr);
 129           }
 130         }
 131       }
 132     }
 133   }
 134   // The existing entries in the hot card cache, which were just refined
 135   // above, are discarded prior to re-enabling the cache near the end of the GC.
 136 }
 137 
 138 void G1HotCardCache::resize_card_counts(size_t heap_capacity) {
 139   _card_counts.resize(heap_capacity);
 140 }
 141 
 142 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
 143   _card_counts.clear_region(hr);
 144 }
 145 
 146 void G1HotCardCache::reset_card_counts() {