64 return card_ptr;
65 }
66 // Otherwise, the card is hot.
67 size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
68 size_t masked_index = index & (_hot_cache_size - 1);
69 jbyte* current_ptr = _hot_cache[masked_index];
70
71 // Try to store the new card pointer into the cache. Compare-and-swap to guard
72 // against the unlikely event of a race resulting in another card pointer to
73 // have already been written to the cache. In this case we will return
74 // card_ptr in favor of the other option, which would be starting over. This
75 // should be OK since card_ptr will likely be the older card already when/if
76 // this ever happens.
77 jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
78 &_hot_cache[masked_index],
79 current_ptr);
80 return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
81 }
82
83 void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
84 if (!default_use_cache()) {
85 assert(_hot_cache == NULL, "Logic");
86 return;
87 }
88
89 assert(_hot_cache != NULL, "Logic");
90 assert(!use_cache(), "cache should be disabled");
91
92 while (_hot_cache_par_claimed_idx < _hot_cache_size) {
93 size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
94 &_hot_cache_par_claimed_idx);
95 size_t start_idx = end_idx - _hot_cache_par_chunk_size;
96 // The current worker has successfully claimed the chunk [start_idx..end_idx)
97 end_idx = MIN2(end_idx, _hot_cache_size);
98 for (size_t i = start_idx; i < end_idx; i++) {
99 jbyte* card_ptr = _hot_cache[i];
100 if (card_ptr != NULL) {
101 bool result = cl->do_card_ptr(card_ptr, worker_i);
102 assert(result, "Closure should always return true");
103 } else {
104 break;
105 }
106 }
107 }
|
64 return card_ptr;
65 }
66 // Otherwise, the card is hot.
67 size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
68 size_t masked_index = index & (_hot_cache_size - 1);
69 jbyte* current_ptr = _hot_cache[masked_index];
70
71 // Try to store the new card pointer into the cache. Compare-and-swap to guard
72 // against the unlikely event of a race resulting in another card pointer to
73 // have already been written to the cache. In this case we will return
74 // card_ptr in favor of the other option, which would be starting over. This
75 // should be OK since card_ptr will likely be the older card already when/if
76 // this ever happens.
77 jbyte* previous_ptr = (jbyte*)Atomic::cmpxchg_ptr(card_ptr,
78 &_hot_cache[masked_index],
79 current_ptr);
80 return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
81 }
82
83 void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
84 assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
85
86 assert(_hot_cache != NULL, "Logic");
87 assert(!use_cache(), "cache should be disabled");
88
89 while (_hot_cache_par_claimed_idx < _hot_cache_size) {
90 size_t end_idx = Atomic::add(_hot_cache_par_chunk_size,
91 &_hot_cache_par_claimed_idx);
92 size_t start_idx = end_idx - _hot_cache_par_chunk_size;
93 // The current worker has successfully claimed the chunk [start_idx..end_idx)
94 end_idx = MIN2(end_idx, _hot_cache_size);
95 for (size_t i = start_idx; i < end_idx; i++) {
96 jbyte* card_ptr = _hot_cache[i];
97 if (card_ptr != NULL) {
98 bool result = cl->do_card_ptr(card_ptr, worker_i);
99 assert(result, "Closure should always return true");
100 } else {
101 break;
102 }
103 }
104 }
|