Print this page


Split Close
Expand all
Collapse all
          --- old/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
          +++ new/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp
↓ open down ↓ 23 lines elided ↑ open up ↑
  24   24  
  25   25  #include "precompiled.hpp"
  26   26  #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27   27  #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  28   28  #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  29   29  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  30   30  #include "gc_implementation/g1/g1RemSet.hpp"
  31   31  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  32   32  #include "memory/space.inline.hpp"
  33   33  #include "runtime/atomic.hpp"
       34 +#include "runtime/java.hpp"
  34   35  #include "utilities/copy.hpp"
  35   36  
  36   37  // Possible sizes for the card counts cache: odd primes that roughly double in size.
  37   38  // (See jvmtiTagMap.cpp).
  38   39  int ConcurrentG1Refine::_cc_cache_sizes[] = {
  39   40          16381,    32771,    76831,    150001,   307261,
  40   41         614563,  1228891,  2457733,   4915219,  9830479,
  41   42       19660831, 39321619, 78643219, 157286461,       -1
  42   43    };
  43   44  
  44   45  ConcurrentG1Refine::ConcurrentG1Refine() :
  45   46    _card_counts(NULL), _card_epochs(NULL),
  46      -  _n_card_counts(0), _max_n_card_counts(0),
       47 +  _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
  47   48    _cache_size_index(0), _expand_card_counts(false),
  48   49    _hot_cache(NULL),
  49   50    _def_use_cache(false), _use_cache(false),
  50   51    _n_periods(0),
  51   52    _threads(NULL), _n_threads(0)
  52   53  {
  53   54  
  54   55    // Ergomonically select initial concurrent refinement parameters
  55   56    if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
  56   57      FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
↓ open down ↓ 34 lines elided ↑ open up ↑
  91   92    }
  92   93  }
  93   94  
  94   95  int ConcurrentG1Refine::thread_num() {
  95   96    return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
  96   97  }
  97   98  
  98   99  void ConcurrentG1Refine::init() {
  99  100    if (G1ConcRSLogCacheSize > 0) {
 100  101      _g1h = G1CollectedHeap::heap();
 101      -    _max_n_card_counts =
 102      -      (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift);
      102 +
      103 +    _max_cards = (unsigned)(_g1h->max_capacity() >> CardTableModRefBS::card_shift);
      104 +    _max_n_card_counts = (unsigned)(_max_cards * ((float)G1MaxHotCardCountSizePercent / 100.0));
 103  105  
 104  106      size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
 105      -    guarantee(_max_n_card_counts < max_card_num, "card_num representation");
      107 +    guarantee(_max_cards < max_card_num, "card_num representation");
 106  108  
 107      -    int desired = _max_n_card_counts / InitialCacheFraction;
 108      -    for (_cache_size_index = 0;
 109      -              _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) {
 110      -      if (_cc_cache_sizes[_cache_size_index] >= desired) break;
 111      -    }
 112      -    _cache_size_index = MAX2(0, (_cache_size_index - 1));
      109 +    int desired = _max_cards / InitialCacheFraction;
 113  110  
 114      -    int initial_size = _cc_cache_sizes[_cache_size_index];
 115      -    if (initial_size < 0) initial_size = _max_n_card_counts;
      111 +    // Find the index into cache size array that is of a size that's
      112 +    // large enough to hold "desired".
      113 +    assert(_n_card_counts == 0, "pre-condition");
      114 +    assert(_max_n_card_counts > 0, "pre-condition");
 116  115  
 117      -    // Make sure we don't go bigger than we will ever need
 118      -    _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts);
      116 +    int index;
      117 +    for (index = 0; _cc_cache_sizes[index] >= 0; index++) {
      118 +      if (_cc_cache_sizes[index] >= desired) break;
      119 +    }
      120 +    assert(index < MAX_CC_CACHE_INDEX, "post condition");
 119  121  
 120      -    _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
 121      -    _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
      122 +    // _cc_cache_sizes[index - 1] <= desired < _cc_cache_sizes[index]
      123 +    index = MAX2(0, (index - 1));
      124 +
      125 +    if (!expand_card_count_cache(index)) {
      126 +      // Allocation was unsuccessful - exit
      127 +      vm_exit_during_initialization("Could not reserve enough space for card count cache");
      128 +    }
      129 +    assert(_n_card_counts > 0, "post-condition");
      130 +    assert(_cache_size_index == index, "post-condition");
 122  131  
 123  132      Copy::fill_to_bytes(&_card_counts[0],
 124  133                          _n_card_counts * sizeof(CardCountCacheEntry));
 125  134      Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
 126  135  
 127  136      ModRefBarrierSet* bs = _g1h->mr_bs();
 128  137      guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
 129  138      _ct_bs = (CardTableModRefBS*)bs;
 130  139      _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 131  140  
↓ open down ↓ 24 lines elided ↑ open up ↑
 156  165    reset_threshold_step();
 157  166    if (_threads != NULL) {
 158  167      for (int i = 0; i < _n_threads; i++) {
 159  168        _threads[i]->initialize();
 160  169      }
 161  170    }
 162  171  }
 163  172  
 164  173  ConcurrentG1Refine::~ConcurrentG1Refine() {
 165  174    if (G1ConcRSLogCacheSize > 0) {
      175 +    // We access the allocation routines directly for
      176 +    // the counts and epochs.
 166  177      assert(_card_counts != NULL, "Logic");
 167      -    FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
      178 +    os::free(_card_counts);
 168  179      assert(_card_epochs != NULL, "Logic");
 169      -    FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
      180 +    os::free(_card_epochs);
      181 +
 170  182      assert(_hot_cache != NULL, "Logic");
 171  183      FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
 172  184    }
 173  185    if (_threads != NULL) {
 174  186      for (int i = 0; i < _n_threads; i++) {
 175  187        delete _threads[i];
 176  188      }
 177  189      FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
 178  190    }
 179  191  }
↓ open down ↓ 195 lines elided ↑ open up ↑
 375  387              assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
 376  388              assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
 377  389              into_cset_dcq->enqueue(entry);
 378  390            }
 379  391          }
 380  392        }
 381  393      }
 382  394    }
 383  395  }
 384  396  
 385      -void ConcurrentG1Refine::expand_card_count_cache() {
      397 +// The arrays used to hold the card counts and the epochs must have
      398 +// a 1:1 correspondence. Hence they are allocated and freed together
      399 +// Returns true if the allocations of both the counts and epochs
      400 +// were successful; false otherwise.
      401 +bool ConcurrentG1Refine::allocate_card_count_cache(int n,
      402 +                                                   CardCountCacheEntry** counts,
      403 +                                                   CardEpochCacheEntry** epochs) {
      404 +  assert(*counts == NULL, "out param");
      405 +  assert(*epochs == NULL, "out param");
      406 +
      407 +  size_t counts_size = n * sizeof(CardCountCacheEntry);
      408 +  size_t epochs_size = n * sizeof(CardEpochCacheEntry);
      409 +
      410 +  *counts = (CardCountCacheEntry*) os::malloc(counts_size);
      411 +  if (*counts == NULL) {
      412 +    // allocation was unsuccessful
      413 +    return false;
      414 +  }
      415 +
      416 +  *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size);
      417 +  if (*epochs == NULL) {
      418 +    // allocation was unsuccessful - free counts array
      419 +    assert(*counts != NULL, "must be");
      420 +    os::free(*counts);
      421 +    *counts = NULL;
      422 +    return false;
      423 +  }
      424 +
      425 +  // We successfully allocated both counts and epochs
      426 +  return true;
      427 +}
      428 +
      429 +// Returns true if the card counts/epochs cache was
      430 +// successfully expanded; false otherwise.
      431 +bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
      432 +  // Can we expand the card count and epoch tables?
 386  433    if (_n_card_counts < _max_n_card_counts) {
 387      -    int new_idx = _cache_size_index+1;
 388      -    int new_size = _cc_cache_sizes[new_idx];
 389      -    if (new_size < 0) new_size = _max_n_card_counts;
      434 +    int cache_size = (cache_size_idx < MAX_CC_CACHE_INDEX ? _cc_cache_sizes[cache_size_idx]
      435 +                                                          : _max_n_card_counts);
      436 +    if (cache_size < 0) cache_size = _max_n_card_counts;
 390  437  
 391  438      // Make sure we don't go bigger than we will ever need
 392      -    new_size = MIN2((unsigned) new_size, _max_n_card_counts);
      439 +    cache_size = MIN2((unsigned) cache_size, _max_n_card_counts);
      440 +
      441 +    // Should we expand the card count and card epoch tables?
      442 +    if (cache_size > (int)_n_card_counts) {
      443 +      // We have been asked to allocate new, larger, arrays for
      444 +      // the card counts and the epochs. Attempt the allocation
      445 +      // of both before we free the existing arrays in case
      446 +      // the allocation is unsuccessful...
      447 +      CardCountCacheEntry* counts = NULL;
      448 +      CardEpochCacheEntry* epochs = NULL;
      449 +
      450 +      if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
      451 +        // Allocation was successful.
      452 +        // We can just free the old arrays; we're
      453 +        // not interested in preserving the contents
      454 +        if (_card_counts != NULL) os::free(_card_counts);
      455 +        if (_card_epochs != NULL) os::free(_card_epochs);
      456 +
      457 +        // Cache the size of the arrays and the index that got us there.
      458 +        _n_card_counts = cache_size;
      459 +        _cache_size_index = cache_size_idx;
 393  460  
 394      -    // Expand the card count and card epoch tables
 395      -    if (new_size > (int)_n_card_counts) {
 396      -      // We can just free and allocate a new array as we're
 397      -      // not interested in preserving the contents
 398      -      assert(_card_counts != NULL, "Logic!");
 399      -      assert(_card_epochs != NULL, "Logic!");
 400      -      FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
 401      -      FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
 402      -      _n_card_counts = new_size;
 403      -      _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
 404      -      _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
 405      -      _cache_size_index = new_idx;
      461 +        _card_counts = counts;
      462 +        _card_epochs = epochs;
      463 +
      464 +        // We successfully allocated/expanded the caches.
      465 +        return true;
      466 +      }
 406  467      }
 407  468    }
      469 +
      470 +  // We did not successfully expand the caches.
      471 +  return false;
 408  472  }
 409  473  
 410  474  void ConcurrentG1Refine::clear_and_record_card_counts() {
 411  475    if (G1ConcRSLogCacheSize == 0) return;
 412  476  
 413  477  #ifndef PRODUCT
 414  478    double start = os::elapsedTime();
 415  479  #endif
 416  480  
 417  481    if (_expand_card_counts) {
 418      -    expand_card_count_cache();
      482 +    int new_idx = _cache_size_index + 1;
      483 +
      484 +    if (expand_card_count_cache(new_idx)) {
      485 +      // Allocation was successful and  _n_card_counts has
      486 +      // been updated to the new size. We only need to clear
      487 +      // the epochs so we don't read a bogus epoch value
      488 +      // when inserting a card into the hot card cache.
      489 +      Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
      490 +    }
 419  491      _expand_card_counts = false;
 420      -    // Only need to clear the epochs.
 421      -    Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
 422  492    }
 423  493  
 424  494    int this_epoch = (int) _n_periods;
 425  495    assert((this_epoch+1) <= max_jint, "to many periods");
 426  496    // Update epoch
 427  497    _n_periods++;
 428  498  
 429  499  #ifndef PRODUCT
 430  500    double elapsed = os::elapsedTime() - start;
 431  501    _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
 432  502  #endif
 433  503  }
 434  504  
 435  505  void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
 436  506    for (int i = 0; i < _n_threads; ++i) {
 437  507      _threads[i]->print_on(st);
 438  508      st->cr();
 439  509    }
 440  510  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX