19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/dirtyCardQueue.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1HotCardCache.hpp"
29 #include "runtime/atomic.inline.hpp"
30
31 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
32 _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
33
34 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
35 if (default_use_cache()) {
36 _use_cache = true;
37
38 _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
39 _hot_cache = ArrayAllocator<jbyte*, mtGC>::allocate(_hot_cache_size);
40
41 reset_hot_cache_internal();
42
43 // For refining the cards in the hot cache in parallel
44 _hot_cache_par_chunk_size = ClaimChunkSize;
45 _hot_cache_par_claimed_idx = 0;
46
47 _card_counts.initialize(card_counts_storage);
48 }
49 }
50
51 G1HotCardCache::~G1HotCardCache() {
52 if (default_use_cache()) {
53 assert(_hot_cache != NULL, "Logic");
54 ArrayAllocator<jbyte*, mtGC>::free(_hot_cache, _hot_cache_size);
55 _hot_cache = NULL;
56 }
57 }
58
59 jbyte* G1HotCardCache::insert(jbyte* card_ptr) {
60 uint count = _card_counts.add_card_count(card_ptr);
61 if (!_card_counts.is_hot(count)) {
62 // The card is not hot so do not store it in the cache;
63 // return it for immediate refining.
64 return card_ptr;
65 }
66 // Otherwise, the card is hot.
67 size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
68 size_t masked_index = index & (_hot_cache_size - 1);
69 jbyte* current_ptr = _hot_cache[masked_index];
70
71 // Try to store the new card pointer into the cache. Compare-and-swap to guard
72 // against the unlikely event of a race resulting in another card pointer to
73 // have already been written to the cache. In this case we will return
74 // card_ptr in favor of the other option, which would be starting over. This
75 // should be OK since card_ptr will likely be the older card already when/if
76 // this ever happens.
77 jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
78 &_hot_cache[masked_index],
79 current_ptr);
80 return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
81 }
82
83 void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
84 assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
85
86 assert(_hot_cache != NULL, "Logic");
87 assert(!use_cache(), "cache should be disabled");
88
89 while (_hot_cache_par_claimed_idx < _hot_cache_size) {
90 size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
91 &_hot_cache_par_claimed_idx);
92 size_t start_idx = end_idx - _hot_cache_par_chunk_size;
93 // The current worker has successfully claimed the chunk [start_idx..end_idx)
94 end_idx = MIN2(end_idx, _hot_cache_size);
95 for (size_t i = start_idx; i < end_idx; i++) {
96 jbyte* card_ptr = _hot_cache[i];
97 if (card_ptr != NULL) {
98 bool result = cl->do_card_ptr(card_ptr, worker_i);
99 assert(result, "Closure should always return true");
100 } else {
101 break;
102 }
103 }
104 }
105
106 // The existing entries in the hot card cache, which were just refined
107 // above, are discarded prior to re-enabling the cache near the end of the GC.
108 }
109
110 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
111 _card_counts.clear_region(hr);
112 }
113
114 void G1HotCardCache::reset_card_counts() {
115 _card_counts.clear_all();
116 }
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/dirtyCardQueue.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1HotCardCache.hpp"
29 #include "runtime/atomic.inline.hpp"
30
31 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
32 _g1h(g1h), _hot_cache(NULL), _use_cache(false), _card_counts(g1h) {}
33
34 void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
35 if (default_use_cache()) {
36 _use_cache = true;
37
38 _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
39 _hot_cache = ArrayAllocator<volatile jbyte*, mtGC>::allocate(_hot_cache_size);
40
41 reset_hot_cache_internal();
42
43 // For refining the cards in the hot cache in parallel
44 _hot_cache_par_chunk_size = ClaimChunkSize;
45 _hot_cache_par_claimed_idx = 0;
46
47 _card_counts.initialize(card_counts_storage);
48 }
49 }
50
51 G1HotCardCache::~G1HotCardCache() {
52 if (default_use_cache()) {
53 assert(_hot_cache != NULL, "Logic");
54 ArrayAllocator<volatile jbyte*, mtGC>::free(_hot_cache, _hot_cache_size);
55 _hot_cache = NULL;
56 }
57 }
58
59 volatile jbyte* G1HotCardCache::insert(volatile jbyte* card_ptr) {
60 uint count = _card_counts.add_card_count(card_ptr);
61 if (!_card_counts.is_hot(count)) {
62 // The card is not hot so do not store it in the cache;
63 // return it for immediate refining.
64 return card_ptr;
65 }
66 // Otherwise, the card is hot.
67 size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
68 size_t masked_index = index & (_hot_cache_size - 1);
69 volatile jbyte* current_ptr = _hot_cache[masked_index];
70
71 // Try to store the new card pointer into the cache. Compare-and-swap to guard
72 // against the unlikely event of a race resulting in another card pointer to
73 // have already been written to the cache. In this case we will return
74 // card_ptr in favor of the other option, which would be starting over. This
75 // should be OK since card_ptr will likely be the older card already when/if
76 // this ever happens.
77 volatile jbyte* previous_ptr = (volatile jbyte*)Atomic::cmpxchg_ptr((jbyte*)card_ptr,
78 &_hot_cache[masked_index],
79 (jbyte*)current_ptr);
80 return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
81 }
82
83 void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
84 assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
85
86 assert(_hot_cache != NULL, "Logic");
87 assert(!use_cache(), "cache should be disabled");
88
89 while (_hot_cache_par_claimed_idx < _hot_cache_size) {
90 size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
91 &_hot_cache_par_claimed_idx);
92 size_t start_idx = end_idx - _hot_cache_par_chunk_size;
93 // The current worker has successfully claimed the chunk [start_idx..end_idx)
94 end_idx = MIN2(end_idx, _hot_cache_size);
95 for (size_t i = start_idx; i < end_idx; i++) {
96 volatile jbyte* card_ptr = _hot_cache[i];
97 if (card_ptr != NULL) {
98 bool result = cl->do_card_ptr(card_ptr, worker_i);
99 assert(result, "Closure should always return true");
100 } else {
101 break;
102 }
103 }
104 }
105
106 // The existing entries in the hot card cache, which were just refined
107 // above, are discarded prior to re-enabling the cache near the end of the GC.
108 }
109
110 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
111 _card_counts.clear_region(hr);
112 }
113
114 void G1HotCardCache::reset_card_counts() {
115 _card_counts.clear_all();
116 }
|