26 #include "gc_implementation/g1/dirtyCardQueue.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1HotCardCache.hpp"
29 #include "gc_implementation/g1/g1RemSet.hpp"
30 #include "runtime/atomic.inline.hpp"
31
32 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
33 _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
34
35 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
36 if (default_use_cache()) {
37 _use_cache = true;
38
39 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
40 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
41
42 _n_hot = 0;
43 _hot_cache_idx = 0;
44
45 // For refining the cards in the hot cache in parallel
46 _hot_cache_par_chunk_size = (ParallelGCThreads > 0 ? ClaimChunkSize : _hot_cache_size);
47 _hot_cache_par_claimed_idx = 0;
48
49 _card_counts.initialize(card_counts_storage);
50 }
51 }
52
53 G1HotCardCache::~G1HotCardCache() {
54 if (default_use_cache()) {
55 assert(_hot_cache != NULL, "Logic");
56 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
57 }
58 }
59
60 jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
61 uint count = _card_counts.add_card_count(card_ptr);
62 if (!_card_counts.is_hot(count)) {
63 // The card is not hot so do not store it in the cache;
64 // return it for immediate refining.
65 return card_ptr;
66 }
102 int end_idx = start_idx + _hot_cache_par_chunk_size;
103
104 if (start_idx ==
105 Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
106 // The current worker has successfully claimed the chunk [start_idx..end_idx)
107 end_idx = MIN2(end_idx, _n_hot);
108 for (int i = start_idx; i < end_idx; i++) {
109 jbyte* card_ptr = _hot_cache[i];
110 if (card_ptr != NULL) {
111 if (g1rs->refine_card(card_ptr, worker_i, true)) {
112 // The part of the heap spanned by the card contains references
113 // that point into the current collection set.
114 // We need to record the card pointer in the DirtyCardQueueSet
115 // that we use for such cards.
116 //
117 // The only time we care about recording cards that contain
118 // references that point into the collection set is during
119 // RSet updating while within an evacuation pause.
120 // In this case worker_i should be the id of a GC worker thread
121 assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
122 assert(worker_i < (ParallelGCThreads == 0 ? 1 : ParallelGCThreads),
123 err_msg("incorrect worker id: %u", worker_i));
124
125 into_cset_dcq->enqueue(card_ptr);
126 }
127 }
128 }
129 }
130 }
131 // The existing entries in the hot card cache, which were just refined
132 // above, are discarded prior to re-enabling the cache near the end of the GC.
133 }
134
135 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
136 _card_counts.clear_region(hr);
137 }
138
139 void G1HotCardCache::reset_card_counts() {
140 _card_counts.clear_all();
141 }
|
26 #include "gc_implementation/g1/dirtyCardQueue.hpp"
27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
28 #include "gc_implementation/g1/g1HotCardCache.hpp"
29 #include "gc_implementation/g1/g1RemSet.hpp"
30 #include "runtime/atomic.inline.hpp"
31
32 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
33 _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
34
35 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
36 if (default_use_cache()) {
37 _use_cache = true;
38
39 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
40 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size, mtGC);
41
42 _n_hot = 0;
43 _hot_cache_idx = 0;
44
45 // For refining the cards in the hot cache in parallel
46 _hot_cache_par_chunk_size = ClaimChunkSize;
47 _hot_cache_par_claimed_idx = 0;
48
49 _card_counts.initialize(card_counts_storage);
50 }
51 }
52
53 G1HotCardCache::~G1HotCardCache() {
54 if (default_use_cache()) {
55 assert(_hot_cache != NULL, "Logic");
56 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache, mtGC);
57 }
58 }
59
60 jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
61 uint count = _card_counts.add_card_count(card_ptr);
62 if (!_card_counts.is_hot(count)) {
63 // The card is not hot so do not store it in the cache;
64 // return it for immediate refining.
65 return card_ptr;
66 }
102 int end_idx = start_idx + _hot_cache_par_chunk_size;
103
104 if (start_idx ==
105 Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
106 // The current worker has successfully claimed the chunk [start_idx..end_idx)
107 end_idx = MIN2(end_idx, _n_hot);
108 for (int i = start_idx; i < end_idx; i++) {
109 jbyte* card_ptr = _hot_cache[i];
110 if (card_ptr != NULL) {
111 if (g1rs->refine_card(card_ptr, worker_i, true)) {
112 // The part of the heap spanned by the card contains references
113 // that point into the current collection set.
114 // We need to record the card pointer in the DirtyCardQueueSet
115 // that we use for such cards.
116 //
117 // The only time we care about recording cards that contain
118 // references that point into the collection set is during
119 // RSet updating while within an evacuation pause.
120 // In this case worker_i should be the id of a GC worker thread
121 assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
122 assert(worker_i < ParallelGCThreads,
123 err_msg("incorrect worker id: %u", worker_i));
124
125 into_cset_dcq->enqueue(card_ptr);
126 }
127 }
128 }
129 }
130 }
131 // The existing entries in the hot card cache, which were just refined
132 // above, are discarded prior to re-enabling the cache near the end of the GC.
133 }
134
135 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
136 _card_counts.clear_region(hr);
137 }
138
139 void G1HotCardCache::reset_card_counts() {
140 _card_counts.clear_all();
141 }
|