Print this page
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
+++ new/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
1 1 /*
2 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
24 24
25 25 #include "precompiled.hpp"
26 26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
27 27 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
28 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
29 29 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
30 30 #include "gc_implementation/g1/g1RemSet.hpp"
31 31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
32 32 #include "memory/space.inline.hpp"
33 33 #include "runtime/atomic.hpp"
34 +#include "runtime/java.hpp"
34 35 #include "utilities/copy.hpp"
35 36
36 37 // Possible sizes for the card counts cache: odd primes that roughly double in size.
37 38 // (See jvmtiTagMap.cpp).
38 39 int ConcurrentG1Refine::_cc_cache_sizes[] = {
39 40 16381, 32771, 76831, 150001, 307261,
40 41 614563, 1228891, 2457733, 4915219, 9830479,
41 42 19660831, 39321619, 78643219, 157286461, -1
42 43 };
43 44
44 45 ConcurrentG1Refine::ConcurrentG1Refine() :
45 46 _card_counts(NULL), _card_epochs(NULL),
46 - _n_card_counts(0), _max_n_card_counts(0),
47 + _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
47 48 _cache_size_index(0), _expand_card_counts(false),
48 49 _hot_cache(NULL),
49 50 _def_use_cache(false), _use_cache(false),
50 51 _n_periods(0),
51 52 _threads(NULL), _n_threads(0)
52 53 {
53 54
54 55 // Ergomonically select initial concurrent refinement parameters
55 56 if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
56 57 FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
57 58 }
58 59 set_green_zone(G1ConcRefinementGreenZone);
59 60
60 61 if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
61 62 FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
62 63 }
63 64 set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
64 65
65 66 if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
66 67 FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
67 68 }
68 69 set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
69 70 _n_worker_threads = thread_num();
70 71 // We need one extra thread to do the young gen rset size sampling.
71 72 _n_threads = _n_worker_threads + 1;
72 73 reset_threshold_step();
73 74
74 75 _threads = NEW_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _n_threads);
75 76 int worker_id_offset = (int)DirtyCardQueueSet::num_par_ids();
76 77 ConcurrentG1RefineThread *next = NULL;
77 78 for (int i = _n_threads - 1; i >= 0; i--) {
78 79 ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
79 80 assert(t != NULL, "Conc refine should have been created");
80 81 assert(t->cg1r() == this, "Conc refine thread should refer to this");
81 82 _threads[i] = t;
82 83 next = t;
83 84 }
84 85 }
85 86
86 87 void ConcurrentG1Refine::reset_threshold_step() {
87 88 if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
88 89 _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
89 90 } else {
90 91 _thread_threshold_step = G1ConcRefinementThresholdStep;
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
91 92 }
92 93 }
93 94
94 95 int ConcurrentG1Refine::thread_num() {
95 96 return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
96 97 }
97 98
98 99 void ConcurrentG1Refine::init() {
99 100 if (G1ConcRSLogCacheSize > 0) {
100 101 _g1h = G1CollectedHeap::heap();
101 - _max_n_card_counts =
102 - (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift);
102 +
103 + _max_cards = (unsigned)(_g1h->max_capacity() >> CardTableModRefBS::card_shift);
104 + _max_n_card_counts = (unsigned)(_max_cards * ((float)G1MaxHotCardCountSizePercent / 100.0));
103 105
104 106 size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
105 - guarantee(_max_n_card_counts < max_card_num, "card_num representation");
107 + guarantee(_max_cards < max_card_num, "card_num representation");
106 108
107 - int desired = _max_n_card_counts / InitialCacheFraction;
108 - for (_cache_size_index = 0;
109 - _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) {
110 - if (_cc_cache_sizes[_cache_size_index] >= desired) break;
111 - }
112 - _cache_size_index = MAX2(0, (_cache_size_index - 1));
109 + int desired = _max_cards / InitialCacheFraction;
113 110
114 - int initial_size = _cc_cache_sizes[_cache_size_index];
115 - if (initial_size < 0) initial_size = _max_n_card_counts;
111 + // Find the index into cache size array that is of a size that's
112 + // large enough to hold "desired".
113 + assert(_n_card_counts == 0, "pre-condition");
114 + assert(_max_n_card_counts > 0, "pre-condition");
116 115
117 - // Make sure we don't go bigger than we will ever need
118 - _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts);
116 + int index;
117 + for (index = 0; _cc_cache_sizes[index] >= 0; index++) {
118 + if (_cc_cache_sizes[index] >= desired) break;
119 + }
120 + assert(index < MAX_CC_CACHE_INDEX, "post condition");
119 121
120 - _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
121 - _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
122 + // _cc_cache_sizes[index - 1] <= desired < _cc_cache_sizes[index]
123 + index = MAX2(0, (index - 1));
124 +
125 + if (!expand_card_count_cache(index)) {
126 + // Allocation was unsuccessful - exit
127 + vm_exit_during_initialization("Could not reserve enough space for card count cache");
128 + }
129 + assert(_n_card_counts > 0, "post-condition");
130 + assert(_cache_size_index == index, "post-condition");
122 131
123 132 Copy::fill_to_bytes(&_card_counts[0],
124 133 _n_card_counts * sizeof(CardCountCacheEntry));
125 134 Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
126 135
127 136 ModRefBarrierSet* bs = _g1h->mr_bs();
128 137 guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
129 138 _ct_bs = (CardTableModRefBS*)bs;
130 139 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
131 140
132 141 _def_use_cache = true;
133 142 _use_cache = true;
134 143 _hot_cache_size = (1 << G1ConcRSLogCacheSize);
135 144 _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
136 145 _n_hot = 0;
137 146 _hot_cache_idx = 0;
138 147
139 148 // For refining the cards in the hot cache in parallel
140 149 int n_workers = (ParallelGCThreads > 0 ?
141 150 _g1h->workers()->total_workers() : 1);
142 151 _hot_cache_par_chunk_size = MAX2(1, _hot_cache_size / n_workers);
143 152 _hot_cache_par_claimed_idx = 0;
144 153 }
145 154 }
146 155
147 156 void ConcurrentG1Refine::stop() {
148 157 if (_threads != NULL) {
149 158 for (int i = 0; i < _n_threads; i++) {
150 159 _threads[i]->stop();
151 160 }
152 161 }
153 162 }
154 163
155 164 void ConcurrentG1Refine::reinitialize_threads() {
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
156 165 reset_threshold_step();
157 166 if (_threads != NULL) {
158 167 for (int i = 0; i < _n_threads; i++) {
159 168 _threads[i]->initialize();
160 169 }
161 170 }
162 171 }
163 172
164 173 ConcurrentG1Refine::~ConcurrentG1Refine() {
165 174 if (G1ConcRSLogCacheSize > 0) {
175 + // We access the allocation routines directly for
176 + // the counts and epochs.
166 177 assert(_card_counts != NULL, "Logic");
167 - FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
178 + os::free(_card_counts);
168 179 assert(_card_epochs != NULL, "Logic");
169 - FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
180 + os::free(_card_epochs);
181 +
170 182 assert(_hot_cache != NULL, "Logic");
171 183 FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
172 184 }
173 185 if (_threads != NULL) {
174 186 for (int i = 0; i < _n_threads; i++) {
175 187 delete _threads[i];
176 188 }
177 189 FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
178 190 }
179 191 }
180 192
181 193 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
182 194 if (_threads != NULL) {
183 195 for (int i = 0; i < _n_threads; i++) {
184 196 tc->do_thread(_threads[i]);
185 197 }
186 198 }
187 199 }
188 200
189 201 bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {
190 202 HeapWord* start = _ct_bs->addr_for(card_ptr);
191 203 HeapRegion* r = _g1h->heap_region_containing(start);
192 204 if (r != NULL && r->is_young()) {
193 205 return true;
194 206 }
195 207 // This card is not associated with a heap region
196 208 // so can't be young.
197 209 return false;
198 210 }
199 211
200 212 jbyte* ConcurrentG1Refine::add_card_count(jbyte* card_ptr, int* count, bool* defer) {
201 213 unsigned new_card_num = ptr_2_card_num(card_ptr);
202 214 unsigned bucket = hash(new_card_num);
203 215 assert(0 <= bucket && bucket < _n_card_counts, "Bounds");
204 216
205 217 CardCountCacheEntry* count_ptr = &_card_counts[bucket];
206 218 CardEpochCacheEntry* epoch_ptr = &_card_epochs[bucket];
207 219
208 220 // We have to construct a new entry if we haven't updated the counts
209 221 // during the current period, or if the count was updated for a
210 222 // different card number.
211 223 unsigned int new_epoch = (unsigned int) _n_periods;
212 224 julong new_epoch_entry = make_epoch_entry(new_card_num, new_epoch);
213 225
214 226 while (true) {
215 227 // Fetch the previous epoch value
216 228 julong prev_epoch_entry = epoch_ptr->_value;
217 229 julong cas_res;
218 230
219 231 if (extract_epoch(prev_epoch_entry) != new_epoch) {
220 232 // This entry has not yet been updated during this period.
221 233 // Note: we update the epoch value atomically to ensure
222 234 // that there is only one winner that updates the cached
223 235 // card_ptr value even though all the refine threads share
224 236 // the same epoch value.
225 237
226 238 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
227 239 (volatile jlong*)&epoch_ptr->_value,
228 240 (jlong) prev_epoch_entry);
229 241
230 242 if (cas_res == prev_epoch_entry) {
231 243 // We have successfully won the race to update the
232 244 // epoch and card_num value. Make it look like the
233 245 // count and eviction count were previously cleared.
234 246 count_ptr->_count = 1;
235 247 count_ptr->_evict_count = 0;
236 248 *count = 0;
237 249 // We can defer the processing of card_ptr
238 250 *defer = true;
239 251 return card_ptr;
240 252 }
241 253 // We did not win the race to update the epoch field, so some other
242 254 // thread must have done it. The value that gets returned by CAS
243 255 // should be the new epoch value.
244 256 assert(extract_epoch(cas_res) == new_epoch, "unexpected epoch");
245 257 // We could 'continue' here or just re-read the previous epoch value
246 258 prev_epoch_entry = epoch_ptr->_value;
247 259 }
248 260
249 261 // The epoch entry for card_ptr has been updated during this period.
250 262 unsigned old_card_num = extract_card_num(prev_epoch_entry);
251 263
252 264 // The card count that will be returned to caller
253 265 *count = count_ptr->_count;
254 266
255 267 // Are we updating the count for the same card?
256 268 if (new_card_num == old_card_num) {
257 269 // Same card - just update the count. We could have more than one
258 270 // thread racing to update count for the current card. It should be
259 271 // OK not to use a CAS as the only penalty should be some missed
260 272 // increments of the count which delays identifying the card as "hot".
261 273
262 274 if (*count < max_jubyte) count_ptr->_count++;
263 275 // We can defer the processing of card_ptr
264 276 *defer = true;
265 277 return card_ptr;
266 278 }
267 279
268 280 // Different card - evict old card info
269 281 if (count_ptr->_evict_count < max_jubyte) count_ptr->_evict_count++;
270 282 if (count_ptr->_evict_count > G1CardCountCacheExpandThreshold) {
271 283 // Trigger a resize the next time we clear
272 284 _expand_card_counts = true;
273 285 }
274 286
275 287 cas_res = (julong) Atomic::cmpxchg((jlong) new_epoch_entry,
276 288 (volatile jlong*)&epoch_ptr->_value,
277 289 (jlong) prev_epoch_entry);
278 290
279 291 if (cas_res == prev_epoch_entry) {
280 292 // We successfully updated the card num value in the epoch entry
281 293 count_ptr->_count = 0; // initialize counter for new card num
282 294 jbyte* old_card_ptr = card_num_2_ptr(old_card_num);
283 295
284 296 // Even though the region containg the card at old_card_num was not
285 297 // in the young list when old_card_num was recorded in the epoch
286 298 // cache it could have been added to the free list and subsequently
287 299 // added to the young list in the intervening time. See CR 6817995.
288 300 // We do not deal with this case here - it will be handled in
289 301 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
290 302 // determined that the region containing the card has been allocated
291 303 // to, and it's safe to check the young type of the region.
292 304
293 305 // We do not want to defer processing of card_ptr in this case
294 306 // (we need to refine old_card_ptr and card_ptr)
295 307 *defer = false;
296 308 return old_card_ptr;
297 309 }
298 310 // Someone else beat us - try again.
299 311 }
300 312 }
301 313
302 314 jbyte* ConcurrentG1Refine::cache_insert(jbyte* card_ptr, bool* defer) {
303 315 int count;
304 316 jbyte* cached_ptr = add_card_count(card_ptr, &count, defer);
305 317 assert(cached_ptr != NULL, "bad cached card ptr");
306 318
307 319 // We've just inserted a card pointer into the card count cache
308 320 // and got back the card that we just inserted or (evicted) the
309 321 // previous contents of that count slot.
310 322
311 323 // The card we got back could be in a young region. When the
312 324 // returned card (if evicted) was originally inserted, we had
313 325 // determined that its containing region was not young. However
314 326 // it is possible for the region to be freed during a cleanup
315 327 // pause, then reallocated and tagged as young which will result
316 328 // in the returned card residing in a young region.
317 329 //
318 330 // We do not deal with this case here - the change from non-young
319 331 // to young could be observed at any time - it will be handled in
320 332 // HeapRegion::oops_on_card_seq_iterate_careful after it has been
321 333 // determined that the region containing the card has been allocated
322 334 // to.
323 335
324 336 // The card pointer we obtained from card count cache is not hot
325 337 // so do not store it in the cache; return it for immediate
326 338 // refining.
327 339 if (count < G1ConcRSHotCardLimit) {
328 340 return cached_ptr;
329 341 }
330 342
331 343 // Otherwise, the pointer we got from the _card_counts cache is hot.
332 344 jbyte* res = NULL;
333 345 MutexLockerEx x(HotCardCache_lock, Mutex::_no_safepoint_check_flag);
334 346 if (_n_hot == _hot_cache_size) {
335 347 res = _hot_cache[_hot_cache_idx];
336 348 _n_hot--;
337 349 }
338 350 // Now _n_hot < _hot_cache_size, and we can insert at _hot_cache_idx.
339 351 _hot_cache[_hot_cache_idx] = cached_ptr;
340 352 _hot_cache_idx++;
341 353 if (_hot_cache_idx == _hot_cache_size) _hot_cache_idx = 0;
342 354 _n_hot++;
343 355
344 356 // The card obtained from the hot card cache could be in a young
345 357 // region. See above on how this can happen.
346 358
347 359 return res;
348 360 }
349 361
350 362 void ConcurrentG1Refine::clean_up_cache(int worker_i,
351 363 G1RemSet* g1rs,
352 364 DirtyCardQueue* into_cset_dcq) {
353 365 assert(!use_cache(), "cache should be disabled");
354 366 int start_idx;
355 367
356 368 while ((start_idx = _hot_cache_par_claimed_idx) < _n_hot) { // read once
357 369 int end_idx = start_idx + _hot_cache_par_chunk_size;
358 370
359 371 if (start_idx ==
360 372 Atomic::cmpxchg(end_idx, &_hot_cache_par_claimed_idx, start_idx)) {
361 373 // The current worker has successfully claimed the chunk [start_idx..end_idx)
362 374 end_idx = MIN2(end_idx, _n_hot);
363 375 for (int i = start_idx; i < end_idx; i++) {
364 376 jbyte* entry = _hot_cache[i];
365 377 if (entry != NULL) {
366 378 if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
367 379 // 'entry' contains references that point into the current
368 380 // collection set. We need to record 'entry' in the DCQS
369 381 // that's used for that purpose.
370 382 //
371 383 // The only time we care about recording cards that contain
372 384 // references that point into the collection set is during
373 385 // RSet updating while within an evacuation pause.
374 386 // In this case worker_i should be the id of a GC worker thread
↓ open down ↓ |
195 lines elided |
↑ open up ↑ |
375 387 assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
376 388 assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
377 389 into_cset_dcq->enqueue(entry);
378 390 }
379 391 }
380 392 }
381 393 }
382 394 }
383 395 }
384 396
385 -void ConcurrentG1Refine::expand_card_count_cache() {
397 +// The arrays used to hold the card counts and the epochs must have
398 +// a 1:1 correspondence. Hence they are allocated and freed together
399 +// Returns true if the allocations of both the counts and epochs
400 +// were successful; false otherwise.
401 +bool ConcurrentG1Refine::allocate_card_count_cache(int n,
402 + CardCountCacheEntry** counts,
403 + CardEpochCacheEntry** epochs) {
404 + assert(*counts == NULL, "out param");
405 + assert(*epochs == NULL, "out param");
406 +
407 + size_t counts_size = n * sizeof(CardCountCacheEntry);
408 + size_t epochs_size = n * sizeof(CardEpochCacheEntry);
409 +
410 + *counts = (CardCountCacheEntry*) os::malloc(counts_size);
411 + if (*counts == NULL) {
412 + // allocation was unsuccessful
413 + return false;
414 + }
415 +
416 + *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size);
417 + if (*epochs == NULL) {
418 + // allocation was unsuccessful - free counts array
419 + assert(*counts != NULL, "must be");
420 + os::free(*counts);
421 + *counts = NULL;
422 + return false;
423 + }
424 +
425 + // We successfully allocated both counts and epochs
426 + return true;
427 +}
428 +
429 +// Returns true if the card counts/epochs cache was
430 +// successfully expanded; false otherwise.
431 +bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
432 + // Can we expand the card count and epoch tables?
386 433 if (_n_card_counts < _max_n_card_counts) {
387 - int new_idx = _cache_size_index+1;
388 - int new_size = _cc_cache_sizes[new_idx];
389 - if (new_size < 0) new_size = _max_n_card_counts;
434 + int cache_size = (cache_size_idx < MAX_CC_CACHE_INDEX ? _cc_cache_sizes[cache_size_idx]
435 + : _max_n_card_counts);
436 + if (cache_size < 0) cache_size = _max_n_card_counts;
390 437
391 438 // Make sure we don't go bigger than we will ever need
392 - new_size = MIN2((unsigned) new_size, _max_n_card_counts);
439 + cache_size = MIN2((unsigned) cache_size, _max_n_card_counts);
440 +
441 + // Should we expand the card count and card epoch tables?
442 + if (cache_size > (int)_n_card_counts) {
443 + // We have been asked to allocate new, larger, arrays for
444 + // the card counts and the epochs. Attempt the allocation
445 + // of both before we free the existing arrays in case
446 + // the allocation is unsuccessful...
447 + CardCountCacheEntry* counts = NULL;
448 + CardEpochCacheEntry* epochs = NULL;
449 +
450 + if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
451 + // Allocation was successful.
452 + // We can just free the old arrays; we're
453 + // not interested in preserving the contents
454 + if (_card_counts != NULL) os::free(_card_counts);
455 + if (_card_epochs != NULL) os::free(_card_epochs);
456 +
457 + // Cache the size of the arrays and the index that got us there.
458 + _n_card_counts = cache_size;
459 + _cache_size_index = cache_size_idx;
393 460
394 - // Expand the card count and card epoch tables
395 - if (new_size > (int)_n_card_counts) {
396 - // We can just free and allocate a new array as we're
397 - // not interested in preserving the contents
398 - assert(_card_counts != NULL, "Logic!");
399 - assert(_card_epochs != NULL, "Logic!");
400 - FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
401 - FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
402 - _n_card_counts = new_size;
403 - _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
404 - _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
405 - _cache_size_index = new_idx;
461 + _card_counts = counts;
462 + _card_epochs = epochs;
463 +
464 + // We successfully allocated/expanded the caches.
465 + return true;
466 + }
406 467 }
407 468 }
469 +
470 + // We did not successfully expand the caches.
471 + return false;
408 472 }
409 473
410 474 void ConcurrentG1Refine::clear_and_record_card_counts() {
411 475 if (G1ConcRSLogCacheSize == 0) return;
412 476
413 477 #ifndef PRODUCT
414 478 double start = os::elapsedTime();
415 479 #endif
416 480
417 481 if (_expand_card_counts) {
418 - expand_card_count_cache();
482 + int new_idx = _cache_size_index + 1;
483 +
484 + if (expand_card_count_cache(new_idx)) {
485 + // Allocation was successful and _n_card_counts has
486 + // been updated to the new size. We only need to clear
487 + // the epochs so we don't read a bogus epoch value
488 + // when inserting a card into the hot card cache.
489 + Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
490 + }
419 491 _expand_card_counts = false;
420 - // Only need to clear the epochs.
421 - Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
422 492 }
423 493
424 494 int this_epoch = (int) _n_periods;
425 495 assert((this_epoch+1) <= max_jint, "to many periods");
426 496 // Update epoch
427 497 _n_periods++;
428 498
429 499 #ifndef PRODUCT
430 500 double elapsed = os::elapsedTime() - start;
431 501 _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
432 502 #endif
433 503 }
434 504
435 505 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
436 506 for (int i = 0; i < _n_threads; ++i) {
437 507 _threads[i]->print_on(st);
438 508 st->cr();
439 509 }
440 510 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX