67 // return it for immediate refining.
68 return card_ptr;
69 }
70 // Otherwise, the card is hot.
71 size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
72 size_t masked_index = index & (_hot_cache_size - 1);
73 CardValue* current_ptr = _hot_cache[masked_index];
74
75 // Try to store the new card pointer into the cache. Compare-and-swap to guard
76 // against the unlikely event of a race resulting in another card pointer to
77 // have already been written to the cache. In this case we will return
78 // card_ptr in favor of the other option, which would be starting over. This
79 // should be OK since card_ptr will likely be the older card already when/if
80 // this ever happens.
81 CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
82 &_hot_cache[masked_index],
83 current_ptr);
84 return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
85 }
86
87 void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_i) {
88 assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
89
90 assert(_hot_cache != NULL, "Logic");
91 assert(!use_cache(), "cache should be disabled");
92
93 while (_hot_cache_par_claimed_idx < _hot_cache_size) {
94 size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
95 &_hot_cache_par_claimed_idx);
96 size_t start_idx = end_idx - _hot_cache_par_chunk_size;
97 // The current worker has successfully claimed the chunk [start_idx..end_idx)
98 end_idx = MIN2(end_idx, _hot_cache_size);
99 for (size_t i = start_idx; i < end_idx; i++) {
100 CardValue* card_ptr = _hot_cache[i];
101 if (card_ptr != NULL) {
102 cl->do_card_ptr(card_ptr, worker_i);
103 } else {
104 break;
105 }
106 }
107 }
108
109 // The existing entries in the hot card cache, which were just refined
110 // above, are discarded prior to re-enabling the cache near the end of the GC.
111 }
112
113 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
114 _card_counts.clear_region(hr);
115 }
|
67 // return it for immediate refining.
68 return card_ptr;
69 }
70 // Otherwise, the card is hot.
71 size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
72 size_t masked_index = index & (_hot_cache_size - 1);
73 CardValue* current_ptr = _hot_cache[masked_index];
74
75 // Try to store the new card pointer into the cache. Compare-and-swap to guard
76 // against the unlikely event of a race resulting in another card pointer to
77 // have already been written to the cache. In this case we will return
78 // card_ptr in favor of the other option, which would be starting over. This
79 // should be OK since card_ptr will likely be the older card already when/if
80 // this ever happens.
81 CardValue* previous_ptr = Atomic::cmpxchg(card_ptr,
82 &_hot_cache[masked_index],
83 current_ptr);
84 return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
85 }
86
87 void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) {
88 assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
89
90 assert(_hot_cache != NULL, "Logic");
91 assert(!use_cache(), "cache should be disabled");
92
93 while (_hot_cache_par_claimed_idx < _hot_cache_size) {
94 size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
95 &_hot_cache_par_claimed_idx);
96 size_t start_idx = end_idx - _hot_cache_par_chunk_size;
97 // The current worker has successfully claimed the chunk [start_idx..end_idx)
98 end_idx = MIN2(end_idx, _hot_cache_size);
99 for (size_t i = start_idx; i < end_idx; i++) {
100 CardValue* card_ptr = _hot_cache[i];
101 if (card_ptr != NULL) {
102 cl->do_card_ptr(card_ptr, worker_id);
103 } else {
104 break;
105 }
106 }
107 }
108
109 // The existing entries in the hot card cache, which were just refined
110 // above, are discarded prior to re-enabling the cache near the end of the GC.
111 }
112
113 void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
114 _card_counts.clear_region(hr);
115 }
|