< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7249 : 8061748: Remove check_ct_logs_at_safepoint()
Summary: Remove unused function and related closure class
Reviewed-by:
Contributed-by: kim.barrett@oracle.com


 110 
 111   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 112     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 113     // This path is executed by the concurrent refine or mutator threads,
 114     // concurrently, and so we do not care if card_ptr contains references
 115     // that point into the collection set.
 116     assert(!oops_into_cset, "should be");
 117 
 118     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 119       // Caller will actually yield.
 120       return false;
 121     }
 122     // Otherwise, we finished successfully; return true.
 123     return true;
 124   }
 125 
 126   void set_concurrent(bool b) { _concurrent = b; }
 127 };
 128 
 129 
 130 class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure {
 131   size_t _num_processed;
 132   CardTableModRefBS* _ctbs;
 133   int _histo[256];
 134 
 135  public:
 136   ClearLoggedCardTableEntryClosure() :
 137     _num_processed(0), _ctbs(G1CollectedHeap::heap()->g1_barrier_set())
 138   {
 139     for (int i = 0; i < 256; i++) _histo[i] = 0;
 140   }
 141 
 142   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 143     unsigned char* ujb = (unsigned char*)card_ptr;
 144     int ind = (int)(*ujb);
 145     _histo[ind]++;
 146 
 147     *card_ptr = (jbyte)CardTableModRefBS::clean_card_val();
 148     _num_processed++;
 149 
 150     return true;
 151   }
 152 
 153   size_t num_processed() { return _num_processed; }
 154 
 155   void print_histo() {
 156     gclog_or_tty->print_cr("Card table value histogram:");
 157     for (int i = 0; i < 256; i++) {
 158       if (_histo[i] != 0) {
 159         gclog_or_tty->print_cr("  %d: %d", i, _histo[i]);
 160       }
 161     }
 162   }
 163 };
 164 
 165 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 166  private:
 167   size_t _num_processed;
 168 
 169  public:
 170   RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
 171 
 172   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 173     *card_ptr = CardTableModRefBS::dirty_card_val();
 174     _num_processed++;
 175     return true;
 176   }
 177 
 178   size_t num_processed() const { return _num_processed; }
 179 };
 180 
 181 YoungList::YoungList(G1CollectedHeap* g1h) :
 182     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 183     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 184   guarantee(check_list_empty(false), "just making sure...");


 456 // therefore is only accurate during a GC pause after all
 457 // regions have been retired.  It is used for debugging
 458 // to check if an nmethod has references to objects that can
 459 // be move during a partial collection.  Though it can be
 460 // inaccurate, it is sufficient for G1 because the conservative
 461 // implementation of is_scavengable() for G1 will indicate that
 462 // all nmethods must be scanned during a partial collection.
 463 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 464   if (p == NULL) {
 465     return false;
 466   }
 467   return heap_region_containing(p)->in_collection_set();
 468 }
 469 #endif
 470 
 471 // Returns true if the reference points to an object that
 472 // can move in an incremental collection.
 473 bool G1CollectedHeap::is_scavengable(const void* p) {
 474   HeapRegion* hr = heap_region_containing(p);
 475   return !hr->is_humongous();
 476 }
 477 
 478 void G1CollectedHeap::check_ct_logs_at_safepoint() {
 479   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
 480   CardTableModRefBS* ct_bs = g1_barrier_set();
 481 
 482   // Count the dirty cards at the start.
 483   CountNonCleanMemRegionClosure count1(this);
 484   ct_bs->mod_card_iterate(&count1);
 485   int orig_count = count1.n();
 486 
 487   // First clear the logged cards.
 488   ClearLoggedCardTableEntryClosure clear;
 489   dcqs.apply_closure_to_all_completed_buffers(&clear);
 490   dcqs.iterate_closure_all_threads(&clear, false);
 491   clear.print_histo();
 492 
 493   // Now ensure that there's no dirty cards.
 494   CountNonCleanMemRegionClosure count2(this);
 495   ct_bs->mod_card_iterate(&count2);
 496   if (count2.n() != 0) {
 497     gclog_or_tty->print_cr("Card table has %d entries; %d originally",
 498                            count2.n(), orig_count);
 499   }
 500   guarantee(count2.n() == 0, "Card table should be clean.");
 501 
 502   RedirtyLoggedCardTableEntryClosure redirty;
 503   dcqs.apply_closure_to_all_completed_buffers(&redirty);
 504   dcqs.iterate_closure_all_threads(&redirty, false);
 505   gclog_or_tty->print_cr("Log entries = %d, dirty cards = %d.",
 506                          clear.num_processed(), orig_count);
 507   guarantee(redirty.num_processed() == clear.num_processed(),
 508             err_msg("Redirtied "SIZE_FORMAT" cards, bug cleared "SIZE_FORMAT,
 509                     redirty.num_processed(), clear.num_processed()));
 510 
 511   CountNonCleanMemRegionClosure count3(this);
 512   ct_bs->mod_card_iterate(&count3);
 513   if (count3.n() != orig_count) {
 514     gclog_or_tty->print_cr("Should have restored them all: orig = %d, final = %d.",
 515                            orig_count, count3.n());
 516     guarantee(count3.n() >= orig_count, "Should have restored them all.");
 517   }
 518 }
 519 
 520 // Private class members.
 521 
 522 G1CollectedHeap* G1CollectedHeap::_g1h;
 523 
 524 // Private methods.
 525 
 526 HeapRegion*
 527 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 528   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 529   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 530     if (!_secondary_free_list.is_empty()) {
 531       if (G1ConcRegionFreeingVerbose) {
 532         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 533                                "secondary_free_list has %u entries",
 534                                _secondary_free_list.length());
 535       }
 536       // It looks as if there are free regions available on the
 537       // secondary_free_list. Let's move them to the free_list and try




 110 
 111   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 112     bool oops_into_cset = G1CollectedHeap::heap()->g1_rem_set()->refine_card(card_ptr, worker_i, false);
 113     // This path is executed by the concurrent refine or mutator threads,
 114     // concurrently, and so we do not care if card_ptr contains references
 115     // that point into the collection set.
 116     assert(!oops_into_cset, "should be");
 117 
 118     if (_concurrent && SuspendibleThreadSet::should_yield()) {
 119       // Caller will actually yield.
 120       return false;
 121     }
 122     // Otherwise, we finished successfully; return true.
 123     return true;
 124   }
 125 
 126   void set_concurrent(bool b) { _concurrent = b; }
 127 };
 128 
 129 



































 130 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 131  private:
 132   size_t _num_processed;
 133 
 134  public:
 135   RedirtyLoggedCardTableEntryClosure() : CardTableEntryClosure(), _num_processed(0) { }
 136 
 137   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
 138     *card_ptr = CardTableModRefBS::dirty_card_val();
 139     _num_processed++;
 140     return true;
 141   }
 142 
 143   size_t num_processed() const { return _num_processed; }
 144 };
 145 
 146 YoungList::YoungList(G1CollectedHeap* g1h) :
 147     _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
 148     _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
 149   guarantee(check_list_empty(false), "just making sure...");


 421 // therefore is only accurate during a GC pause after all
 422 // regions have been retired.  It is used for debugging
 423 // to check if an nmethod has references to objects that can
 424 // be move during a partial collection.  Though it can be
 425 // inaccurate, it is sufficient for G1 because the conservative
 426 // implementation of is_scavengable() for G1 will indicate that
 427 // all nmethods must be scanned during a partial collection.
 428 bool G1CollectedHeap::is_in_partial_collection(const void* p) {
 429   if (p == NULL) {
 430     return false;
 431   }
 432   return heap_region_containing(p)->in_collection_set();
 433 }
 434 #endif
 435 
 436 // Returns true if the reference points to an object that
 437 // can move in an incremental collection.
 438 bool G1CollectedHeap::is_scavengable(const void* p) {
 439   HeapRegion* hr = heap_region_containing(p);
 440   return !hr->is_humongous();










































 441 }
 442 
 443 // Private class members.
 444 
 445 G1CollectedHeap* G1CollectedHeap::_g1h;
 446 
 447 // Private methods.
 448 
 449 HeapRegion*
 450 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
 451   MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
 452   while (!_secondary_free_list.is_empty() || free_regions_coming()) {
 453     if (!_secondary_free_list.is_empty()) {
 454       if (G1ConcRegionFreeingVerbose) {
 455         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
 456                                "secondary_free_list has %u entries",
 457                                _secondary_free_list.length());
 458       }
 459       // It looks as if there are free regions available on the
 460       // secondary_free_list. Let's move them to the free_list and try


< prev index next >