src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp

Print this page




  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  30 #include "gc_implementation/g1/g1RemSet.hpp"
  31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  32 #include "memory/space.inline.hpp"
  33 #include "runtime/atomic.hpp"

  34 #include "utilities/copy.hpp"
  35 
  36 // Possible sizes for the card counts cache: odd primes that roughly double in size.
  37 // (See jvmtiTagMap.cpp).
  38 int ConcurrentG1Refine::_cc_cache_sizes[] = {
  39         16381,    32771,    76831,    150001,   307261,
  40        614563,  1228891,  2457733,   4915219,  9830479,
  41      19660831, 39321619, 78643219, 157286461,       -1
  42   };
  43 
  44 ConcurrentG1Refine::ConcurrentG1Refine() :
  45   _card_counts(NULL), _card_epochs(NULL),
  46   _n_card_counts(0), _max_n_card_counts(0),
  47   _cache_size_index(0), _expand_card_counts(false),
  48   _hot_cache(NULL),
  49   _def_use_cache(false), _use_cache(false),
  50   _n_periods(0),
  51   _threads(NULL), _n_threads(0)
  52 {
  53 
  54   // Ergomonically select initial concurrent refinement parameters
  55   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
  56     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
  57   }
  58   set_green_zone(G1ConcRefinementGreenZone);
  59 
  60   if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
  61     FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
  62   }
  63   set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
  64 
  65   if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
  66     FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);


  81     _threads[i] = t;
  82     next = t;
  83   }
  84 }
  85 
  86 void ConcurrentG1Refine::reset_threshold_step() {
  87   if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
  88     _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
  89   } else {
  90     _thread_threshold_step = G1ConcRefinementThresholdStep;
  91   }
  92 }
  93 
  94 int ConcurrentG1Refine::thread_num() {
  95   return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
  96 }
  97 
  98 void ConcurrentG1Refine::init() {
  99   if (G1ConcRSLogCacheSize > 0) {
 100     _g1h = G1CollectedHeap::heap();
 101     _max_n_card_counts =
 102       (unsigned) (_g1h->max_capacity() >> CardTableModRefBS::card_shift);

 103 
 104     size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
 105     guarantee(_max_n_card_counts < max_card_num, "card_num representation");
 106 
 107     int desired = _max_n_card_counts / InitialCacheFraction;
 108     for (_cache_size_index = 0;
 109               _cc_cache_sizes[_cache_size_index] >= 0; _cache_size_index++) {
 110       if (_cc_cache_sizes[_cache_size_index] >= desired) break;
 111     }
 112     _cache_size_index = MAX2(0, (_cache_size_index - 1));
 113 
 114     int initial_size = _cc_cache_sizes[_cache_size_index];
 115     if (initial_size < 0) initial_size = _max_n_card_counts;


 116 
 117     // Make sure we don't go bigger than we will ever need
 118     _n_card_counts = MIN2((unsigned) initial_size, _max_n_card_counts);



 119 
 120     _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
 121     _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);







 122 
 123     Copy::fill_to_bytes(&_card_counts[0],
 124                         _n_card_counts * sizeof(CardCountCacheEntry));
 125     Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
 126 
 127     ModRefBarrierSet* bs = _g1h->mr_bs();
 128     guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
 129     _ct_bs = (CardTableModRefBS*)bs;
 130     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 131 
 132     _def_use_cache = true;
 133     _use_cache = true;
 134     _hot_cache_size = (1 << G1ConcRSLogCacheSize);
 135     _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
 136     _n_hot = 0;
 137     _hot_cache_idx = 0;
 138 
 139     // For refining the cards in the hot cache in parallel
 140     int n_workers = (ParallelGCThreads > 0 ?
 141                         _g1h->workers()->total_workers() : 1);


 146 
 147 void ConcurrentG1Refine::stop() {
 148   if (_threads != NULL) {
 149     for (int i = 0; i < _n_threads; i++) {
 150       _threads[i]->stop();
 151     }
 152   }
 153 }
 154 
 155 void ConcurrentG1Refine::reinitialize_threads() {
 156   reset_threshold_step();
 157   if (_threads != NULL) {
 158     for (int i = 0; i < _n_threads; i++) {
 159       _threads[i]->initialize();
 160     }
 161   }
 162 }
 163 
 164 ConcurrentG1Refine::~ConcurrentG1Refine() {
 165   if (G1ConcRSLogCacheSize > 0) {


 166     assert(_card_counts != NULL, "Logic");
 167     FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
 168     assert(_card_epochs != NULL, "Logic");
 169     FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);

 170     assert(_hot_cache != NULL, "Logic");
 171     FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
 172   }
 173   if (_threads != NULL) {
 174     for (int i = 0; i < _n_threads; i++) {
 175       delete _threads[i];
 176     }
 177     FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
 178   }
 179 }
 180 
 181 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
 182   if (_threads != NULL) {
 183     for (int i = 0; i < _n_threads; i++) {
 184       tc->do_thread(_threads[i]);
 185     }
 186   }
 187 }
 188 
 189 bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {


 365         if (entry != NULL) {
 366           if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
 367             // 'entry' contains references that point into the current
 368             // collection set. We need to record 'entry' in the DCQS
 369             // that's used for that purpose.
 370             //
 371             // The only time we care about recording cards that contain
 372             // references that point into the collection set is during
 373             // RSet updating while within an evacuation pause.
 374             // In this case worker_i should be the id of a GC worker thread
 375             assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
 376             assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
 377             into_cset_dcq->enqueue(entry);
 378           }
 379         }
 380       }
 381     }
 382   }
 383 }
 384 
 385 void ConcurrentG1Refine::expand_card_count_cache() {



































 386   if (_n_card_counts < _max_n_card_counts) {
 387     int new_idx = _cache_size_index+1;
 388     int new_size = _cc_cache_sizes[new_idx];
 389     if (new_size < 0) new_size = _max_n_card_counts;
 390 
 391     // Make sure we don't go bigger than we will ever need
 392     new_size = MIN2((unsigned) new_size, _max_n_card_counts);
 393 
 394     // Expand the card count and card epoch tables
 395     if (new_size > (int)_n_card_counts) {
 396       // We can just free and allocate a new array as we're









 397       // not interested in preserving the contents
 398       assert(_card_counts != NULL, "Logic!");
 399       assert(_card_epochs != NULL, "Logic!");
 400       FREE_C_HEAP_ARRAY(CardCountCacheEntry, _card_counts);
 401       FREE_C_HEAP_ARRAY(CardEpochCacheEntry, _card_epochs);
 402       _n_card_counts = new_size;
 403       _card_counts = NEW_C_HEAP_ARRAY(CardCountCacheEntry, _n_card_counts);
 404       _card_epochs = NEW_C_HEAP_ARRAY(CardEpochCacheEntry, _n_card_counts);
 405       _cache_size_index = new_idx;





 406     }
 407   }



 408 }
 409 
 410 void ConcurrentG1Refine::clear_and_record_card_counts() {
 411   if (G1ConcRSLogCacheSize == 0) return;
 412 
 413 #ifndef PRODUCT
 414   double start = os::elapsedTime();
 415 #endif
 416 
 417   if (_expand_card_counts) {
 418     expand_card_count_cache();
 419     _expand_card_counts = false;
 420     // Only need to clear the epochs.




 421     Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
 422   }


 423 
 424   int this_epoch = (int) _n_periods;
 425   assert((this_epoch+1) <= max_jint, "to many periods");
 426   // Update epoch
 427   _n_periods++;
 428 
 429 #ifndef PRODUCT
 430   double elapsed = os::elapsedTime() - start;
 431   _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
 432 #endif
 433 }
 434 
 435 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
 436   for (int i = 0; i < _n_threads; ++i) {
 437     _threads[i]->print_on(st);
 438     st->cr();
 439   }
 440 }


  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/concurrentG1Refine.hpp"
  27 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
  28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  30 #include "gc_implementation/g1/g1RemSet.hpp"
  31 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  32 #include "memory/space.inline.hpp"
  33 #include "runtime/atomic.hpp"
  34 #include "runtime/java.hpp"
  35 #include "utilities/copy.hpp"
  36 
  37 // Possible sizes for the card counts cache: odd primes that roughly double in size.
  38 // (See jvmtiTagMap.cpp).
  39 int ConcurrentG1Refine::_cc_cache_sizes[] = {
  40         16381,    32771,    76831,    150001,   307261,
  41        614563,  1228891,  2457733,   4915219,  9830479,
  42      19660831, 39321619, 78643219, 157286461,       -1
  43   };
  44 
  45 ConcurrentG1Refine::ConcurrentG1Refine() :
  46   _card_counts(NULL), _card_epochs(NULL),
  47   _n_card_counts(0), _max_cards(0), _max_n_card_counts(0),
  48   _cache_size_index(0), _expand_card_counts(false),
  49   _hot_cache(NULL),
  50   _def_use_cache(false), _use_cache(false),
  51   _n_periods(0),
  52   _threads(NULL), _n_threads(0)
  53 {
  54 
  55   // Ergomonically select initial concurrent refinement parameters
  56   if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
  57     FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
  58   }
  59   set_green_zone(G1ConcRefinementGreenZone);
  60 
  61   if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
  62     FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
  63   }
  64   set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
  65 
  66   if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
  67     FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);


  82     _threads[i] = t;
  83     next = t;
  84   }
  85 }
  86 
  87 void ConcurrentG1Refine::reset_threshold_step() {
  88   if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
  89     _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
  90   } else {
  91     _thread_threshold_step = G1ConcRefinementThresholdStep;
  92   }
  93 }
  94 
  95 int ConcurrentG1Refine::thread_num() {
  96   return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
  97 }
  98 
  99 void ConcurrentG1Refine::init() {
 100   if (G1ConcRSLogCacheSize > 0) {
 101     _g1h = G1CollectedHeap::heap();
 102 
 103     _max_cards = (unsigned)(_g1h->max_capacity() >> CardTableModRefBS::card_shift);
 104     _max_n_card_counts = (unsigned)(_max_cards * ((float)G1MaxHotCardCountSizePercent / 100.0));
 105 
 106     size_t max_card_num = ((size_t)1 << (sizeof(unsigned)*BitsPerByte-1)) - 1;
 107     guarantee(_max_cards < max_card_num, "card_num representation");
 108 
 109     int desired = _max_cards / InitialCacheFraction;





 110 
 111     // Find the index into cache size array that is of a size that's
 112     // large enough to hold "desired".
 113     assert(_n_card_counts == 0, "pre-condition");
 114     assert(_max_n_card_counts > 0, "pre-condition");
 115 
 116     int index;
 117     for (index = 0; _cc_cache_sizes[index] >= 0; index++) {
 118       if (_cc_cache_sizes[index] >= desired) break;
 119     }
 120     assert(index < MAX_CC_CACHE_INDEX, "post condition");
 121 
 122     // _cc_cache_sizes[index - 1] <= desired < _cc_cache_sizes[index]
 123     index = MAX2(0, (index - 1));
 124 
 125     if (!expand_card_count_cache(index)) {
 126       // Allocation was unsuccessful - exit
 127       vm_exit_during_initialization("Could not reserve enough space for card count cache");
 128     }
 129     assert(_n_card_counts > 0, "post-condition");
 130     assert(_cache_size_index == index, "post-condition");
 131 
 132     Copy::fill_to_bytes(&_card_counts[0],
 133                         _n_card_counts * sizeof(CardCountCacheEntry));
 134     Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
 135 
 136     ModRefBarrierSet* bs = _g1h->mr_bs();
 137     guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
 138     _ct_bs = (CardTableModRefBS*)bs;
 139     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 140 
 141     _def_use_cache = true;
 142     _use_cache = true;
 143     _hot_cache_size = (1 << G1ConcRSLogCacheSize);
 144     _hot_cache = NEW_C_HEAP_ARRAY(jbyte*, _hot_cache_size);
 145     _n_hot = 0;
 146     _hot_cache_idx = 0;
 147 
 148     // For refining the cards in the hot cache in parallel
 149     int n_workers = (ParallelGCThreads > 0 ?
 150                         _g1h->workers()->total_workers() : 1);


 155 
 156 void ConcurrentG1Refine::stop() {
 157   if (_threads != NULL) {
 158     for (int i = 0; i < _n_threads; i++) {
 159       _threads[i]->stop();
 160     }
 161   }
 162 }
 163 
 164 void ConcurrentG1Refine::reinitialize_threads() {
 165   reset_threshold_step();
 166   if (_threads != NULL) {
 167     for (int i = 0; i < _n_threads; i++) {
 168       _threads[i]->initialize();
 169     }
 170   }
 171 }
 172 
 173 ConcurrentG1Refine::~ConcurrentG1Refine() {
 174   if (G1ConcRSLogCacheSize > 0) {
 175     // We access the allocation routines directly for
 176     // the counts and epochs.
 177     assert(_card_counts != NULL, "Logic");
 178     os::free(_card_counts);
 179     assert(_card_epochs != NULL, "Logic");
 180     os::free(_card_epochs);
 181 
 182     assert(_hot_cache != NULL, "Logic");
 183     FREE_C_HEAP_ARRAY(jbyte*, _hot_cache);
 184   }
 185   if (_threads != NULL) {
 186     for (int i = 0; i < _n_threads; i++) {
 187       delete _threads[i];
 188     }
 189     FREE_C_HEAP_ARRAY(ConcurrentG1RefineThread*, _threads);
 190   }
 191 }
 192 
 193 void ConcurrentG1Refine::threads_do(ThreadClosure *tc) {
 194   if (_threads != NULL) {
 195     for (int i = 0; i < _n_threads; i++) {
 196       tc->do_thread(_threads[i]);
 197     }
 198   }
 199 }
 200 
 201 bool ConcurrentG1Refine::is_young_card(jbyte* card_ptr) {


 377         if (entry != NULL) {
 378           if (g1rs->concurrentRefineOneCard(entry, worker_i, true)) {
 379             // 'entry' contains references that point into the current
 380             // collection set. We need to record 'entry' in the DCQS
 381             // that's used for that purpose.
 382             //
 383             // The only time we care about recording cards that contain
 384             // references that point into the collection set is during
 385             // RSet updating while within an evacuation pause.
 386             // In this case worker_i should be the id of a GC worker thread
 387             assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
 388             assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "incorrect worker id");
 389             into_cset_dcq->enqueue(entry);
 390           }
 391         }
 392       }
 393     }
 394   }
 395 }
 396 
 397 // The arrays used to hold the card counts and the epochs must have
 398 // a 1:1 correspondence. Hence they are allocated and freed together
 399 // Returns true if the allocations of both the counts and epochs
 400 // were successful; false otherwise.
 401 bool ConcurrentG1Refine::allocate_card_count_cache(int n,
 402                                                    CardCountCacheEntry** counts,
 403                                                    CardEpochCacheEntry** epochs) {
 404   assert(*counts == NULL, "out param");
 405   assert(*epochs == NULL, "out param");
 406 
 407   size_t counts_size = n * sizeof(CardCountCacheEntry);
 408   size_t epochs_size = n * sizeof(CardEpochCacheEntry);
 409 
 410   *counts = (CardCountCacheEntry*) os::malloc(counts_size);
 411   if (*counts == NULL) {
 412     // allocation was unsuccessful
 413     return false;
 414   }
 415 
 416   *epochs = (CardEpochCacheEntry*) os::malloc(epochs_size);
 417   if (*epochs == NULL) {
 418     // allocation was unsuccessful - free counts array
 419     assert(*counts != NULL, "must be");
 420     os::free(*counts);
 421     *counts = NULL;
 422     return false;
 423   }
 424 
 425   // We successfully allocated both counts and epochs
 426   return true;
 427 }
 428 
 429 // Returns true if the card counts/epochs cache was
 430 // successfully expanded; false otherwise.
 431 bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
 432   // Can we expand the card count and epoch tables?
 433   if (_n_card_counts < _max_n_card_counts) {
 434     int cache_size = (cache_size_idx < MAX_CC_CACHE_INDEX ? _cc_cache_sizes[cache_size_idx]
 435                                                           : _max_n_card_counts);
 436     if (cache_size < 0) cache_size = _max_n_card_counts;
 437 
 438     // Make sure we don't go bigger than we will ever need
 439     cache_size = MIN2((unsigned) cache_size, _max_n_card_counts);
 440 
 441     // Should we expand the card count and card epoch tables?
 442     if (cache_size > (int)_n_card_counts) {
 443       // We have been asked to allocate new, larger, arrays for
 444       // the card counts and the epochs. Attempt the allocation
 445       // of both before we free the existing arrays in case
 446       // the allocation is unsuccessful...
 447       CardCountCacheEntry* counts = NULL;
 448       CardEpochCacheEntry* epochs = NULL;
 449 
 450       if (allocate_card_count_cache(cache_size, &counts, &epochs)) {
 451         // Allocation was successful.
 452         // We can just free the old arrays; we're
 453         // not interested in preserving the contents
 454         if (_card_counts != NULL) os::free(_card_counts);
 455         if (_card_epochs != NULL) os::free(_card_epochs);
 456 
 457         // Cache the size of the arrays and the index that got us there.
 458         _n_card_counts = cache_size;
 459         _cache_size_index = cache_size_idx;
 460 
 461         _card_counts = counts;
 462         _card_epochs = epochs;
 463 
 464         // We successfully allocated/expanded the caches.
 465         return true;
 466       }
 467     }
 468   }
 469 
 470   // We did not successfully expand the caches.
 471   return false;
 472 }
 473 
 474 void ConcurrentG1Refine::clear_and_record_card_counts() {
 475   if (G1ConcRSLogCacheSize == 0) return;
 476 
 477 #ifndef PRODUCT
 478   double start = os::elapsedTime();
 479 #endif
 480 
 481   if (_expand_card_counts) {
 482     int new_idx = _cache_size_index + 1;
 483 
 484     if (expand_card_count_cache(new_idx)) {
 485       // Allocation was successful and  _n_card_counts has
 486       // been updated to the new size. We only need to clear
 487       // the epochs so we don't read a bogus epoch value
 488       // when inserting a card into the hot card cache.
 489       Copy::fill_to_bytes(&_card_epochs[0], _n_card_counts * sizeof(CardEpochCacheEntry));
 490     }
 491     _expand_card_counts = false;
 492   }
 493 
 494   int this_epoch = (int) _n_periods;
 495   assert((this_epoch+1) <= max_jint, "to many periods");
 496   // Update epoch
 497   _n_periods++;
 498 
 499 #ifndef PRODUCT
 500   double elapsed = os::elapsedTime() - start;
 501   _g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
 502 #endif
 503 }
 504 
 505 void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
 506   for (int i = 0; i < _n_threads; ++i) {
 507     _threads[i]->print_on(st);
 508     st->cr();
 509   }
 510 }