< prev index next >

src/share/vm/gc/shared/cardTableRS.cpp

Print this page
rev 12906 : [mq]: gc_interface


  57   ClassLoaderDataGraph::classes_do(&closure);
  58 
  59   return !closure.found();
  60 }
  61 
  62 
  63 class ClearKlassModUnionClosure : public KlassClosure {
  64  public:
  65   void do_klass(Klass* klass) {
  66     if (klass->has_accumulated_modified_oops()) {
  67       klass->clear_accumulated_modified_oops();
  68     }
  69   }
  70 };
  71 
  72 void KlassRemSet::clear_mod_union() {
  73   ClearKlassModUnionClosure closure;
  74   ClassLoaderDataGraph::classes_do(&closure);
  75 }
  76 
  77 CardTableRS::CardTableRS(MemRegion whole_heap) :
  78   _bs(NULL),
  79   _cur_youngergen_card_val(youngergenP1_card)
  80 {
  81   _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
  82   _ct_bs->initialize();
  83   set_bs(_ct_bs);
  84   // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
  85   // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
  86   uint max_gens = 2;
  87   _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
  88                          mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
  89   if (_last_cur_val_in_gen == NULL) {
  90     vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
  91   }
  92   for (uint i = 0; i < max_gens + 1; i++) {
  93     _last_cur_val_in_gen[i] = clean_card_val();
  94   }
  95   _ct_bs->set_CTRS(this);
  96 }
  97 
  98 CardTableRS::~CardTableRS() {
  99   if (_ct_bs) {
 100     delete _ct_bs;
 101     _ct_bs = NULL;
 102   }
 103   if (_last_cur_val_in_gen) {
 104     FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
 105   }
 106 }
 107 
 108 void CardTableRS::resize_covered_region(MemRegion new_region) {
 109   _ct_bs->resize_covered_region(new_region);
 110 }
 111 
 112 jbyte CardTableRS::find_unused_youngergenP_card_value() {
 113   for (jbyte v = youngergenP1_card;
 114        v < cur_youngergen_and_prev_nonclean_card;
 115        v++) {
 116     bool seen = false;
 117     for (int g = 0; g < _regions_to_iterate; g++) {
 118       if (_last_cur_val_in_gen[g] == v) {
 119         seen = true;
 120         break;
 121       }
 122     }
 123     if (!seen) {
 124       return v;
 125     }
 126   }
 127   ShouldNotReachHere();
 128   return 0;
 129 }
 130 
 131 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {


 229   const jbyte* limit = _ct->byte_for(mr.start());
 230   HeapWord* end_of_non_clean = mr.end();
 231   HeapWord* start_of_non_clean = end_of_non_clean;
 232   while (cur_entry >= limit) {
 233     HeapWord* cur_hw = _ct->addr_for(cur_entry);
 234     if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
 235       // Continue the dirty range by opening the
 236       // dirty window one card to the left.
 237       start_of_non_clean = cur_hw;
 238     } else {
 239       // We hit a "clean" card; process any non-empty
 240       // "dirty" range accumulated so far.
 241       if (start_of_non_clean < end_of_non_clean) {
 242         const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 243         _dirty_card_closure->do_MemRegion(mrd);
 244       }
 245 
 246       // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
 247       if (is_word_aligned(cur_entry)) {
 248         jbyte* cur_row = cur_entry - BytesPerWord;
 249         while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row()) {
 250           cur_row -= BytesPerWord;
 251         }
 252         cur_entry = cur_row + BytesPerWord;
 253         cur_hw = _ct->addr_for(cur_entry);
 254       }
 255 
 256       // Reset the dirty window, while continuing to look
 257       // for the next dirty card that will start a
 258       // new dirty window.
 259       end_of_non_clean = cur_hw;
 260       start_of_non_clean = cur_hw;
 261     }
 262     // Note that "cur_entry" leads "start_of_non_clean" in
 263     // its leftward excursion after this point
 264     // in the loop and, when we hit the left end of "mr",
 265     // will point off of the left end of the card-table
 266     // for "mr".
 267     cur_entry--;
 268   }
 269   // If the first card of "mr" was dirty, we will have
 270   // been left with a dirty window, co-initial with "mr",
 271   // which we now process.
 272   if (start_of_non_clean < end_of_non_clean) {
 273     const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 274     _dirty_card_closure->do_MemRegion(mrd);
 275   }
 276 }
 277 
 278 // clean (by dirty->clean before) ==> cur_younger_gen
 279 // dirty                          ==> cur_youngergen_and_prev_nonclean_card
 280 // precleaned                     ==> cur_youngergen_and_prev_nonclean_card
 281 // prev-younger-gen               ==> cur_youngergen_and_prev_nonclean_card
 282 // cur-younger-gen                ==> cur_younger_gen
 283 // cur_youngergen_and_prev_nonclean_card ==> no change.
 284 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
 285   volatile jbyte* entry = _ct_bs->byte_for(field);
 286   do {
 287     jbyte entry_val = *entry;
 288     // We put this first because it's probably the most common case.
 289     if (entry_val == clean_card_val()) {
 290       // No threat of contention with cleaning threads.
 291       *entry = cur_youngergen_card_val();
 292       return;
 293     } else if (card_is_dirty_wrt_gen_iter(entry_val)
 294                || is_prev_youngergen_card_val(entry_val)) {
 295       // Mark it as both cur and prev youngergen; card cleaning thread will
 296       // eventually remove the previous stuff.
 297       jbyte new_val = cur_youngergen_and_prev_nonclean_card;
 298       jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
 299       // Did the CAS succeed?
 300       if (res == entry_val) return;
 301       // Otherwise, retry, to see the new value.
 302       continue;
 303     } else {
 304       assert(entry_val == cur_youngergen_and_prev_nonclean_card
 305              || entry_val == cur_youngergen_card_val(),


 323          "[" PTR_FORMAT ", " PTR_FORMAT ")",
 324          p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 325   // In the case of CMS+ParNew, issue a warning
 326   if (!ur.contains(urasm)) {
 327     assert(UseConcMarkSweepGC, "Tautology: see assert above");
 328     log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? "
 329                     "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 330                     "[" PTR_FORMAT ", " PTR_FORMAT ")",
 331                     p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 332     MemRegion ur2 = sp->used_region();
 333     MemRegion urasm2 = sp->used_region_at_save_marks();
 334     if (!ur.equals(ur2)) {
 335       log_warning(gc)("CMS+ParNew: Flickering used_region()!!");
 336     }
 337     if (!urasm.equals(urasm2)) {
 338       log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!");
 339     }
 340     ShouldNotReachHere();
 341   }
 342 #endif
 343   _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
 344 }
 345 
 346 void CardTableRS::clear_into_younger(Generation* old_gen) {
 347   assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
 348          "Should only be called for the old generation");
 349   // The card tables for the youngest gen need never be cleared.
 350   // There's a bit of subtlety in the clear() and invalidate()
 351   // methods that we exploit here and in invalidate_or_clear()
 352   // below to avoid missing cards at the fringes. If clear() or
 353   // invalidate() are changed in the future, this code should
 354   // be revisited. 20040107.ysr
 355   clear(old_gen->prev_used_region());
 356 }
 357 
 358 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
 359   assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
 360          "Should only be called for the old generation");
 361   // Invalidate the cards for the currently occupied part of
 362   // the old generation and clear the cards for the
 363   // unoccupied part of the generation (if any, making use


 624       // young collection, its rank can either decline or stay unchanged.
 625       // In this case, no extra work is done in terms of redundant
 626       // younger refs scanning of that card.
 627       // Then, the case analysis above reveals that, in the worst case,
 628       // any such stale card will be scanned unnecessarily at most twice.
 629       //
 630       // It is nonetheless advisable to try and get rid of some of this
 631       // redundant work in a subsequent (low priority) re-design of
 632       // the card-scanning code, if only to simplify the underlying
 633       // state machine analysis/proof. ysr 1/28/2002. XXX
 634       cur_entry++;
 635     }
 636   }
 637 }
 638 
 639 void CardTableRS::verify() {
 640   // At present, we only know how to verify the card table RS for
 641   // generational heaps.
 642   VerifyCTGenClosure blk(this);
 643   GenCollectedHeap::heap()->generation_iterate(&blk, false);
 644   _ct_bs->verify();













































































































 645 }


  57   ClassLoaderDataGraph::classes_do(&closure);
  58 
  59   return !closure.found();
  60 }
  61 
  62 
  63 class ClearKlassModUnionClosure : public KlassClosure {
  64  public:
  65   void do_klass(Klass* klass) {
  66     if (klass->has_accumulated_modified_oops()) {
  67       klass->clear_accumulated_modified_oops();
  68     }
  69   }
  70 };
  71 
  72 void KlassRemSet::clear_mod_union() {
  73   ClearKlassModUnionClosure closure;
  74   ClassLoaderDataGraph::classes_do(&closure);
  75 }
  76 



































  77 jbyte CardTableRS::find_unused_youngergenP_card_value() {
  78   for (jbyte v = youngergenP1_card;
  79        v < cur_youngergen_and_prev_nonclean_card;
  80        v++) {
  81     bool seen = false;
  82     for (int g = 0; g < _regions_to_iterate; g++) {
  83       if (_last_cur_val_in_gen[g] == v) {
  84         seen = true;
  85         break;
  86       }
  87     }
  88     if (!seen) {
  89       return v;
  90     }
  91   }
  92   ShouldNotReachHere();
  93   return 0;
  94 }
  95 
  96 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {


 194   const jbyte* limit = _ct->byte_for(mr.start());
 195   HeapWord* end_of_non_clean = mr.end();
 196   HeapWord* start_of_non_clean = end_of_non_clean;
 197   while (cur_entry >= limit) {
 198     HeapWord* cur_hw = _ct->addr_for(cur_entry);
 199     if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
 200       // Continue the dirty range by opening the
 201       // dirty window one card to the left.
 202       start_of_non_clean = cur_hw;
 203     } else {
 204       // We hit a "clean" card; process any non-empty
 205       // "dirty" range accumulated so far.
 206       if (start_of_non_clean < end_of_non_clean) {
 207         const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 208         _dirty_card_closure->do_MemRegion(mrd);
 209       }
 210 
 211       // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
 212       if (is_word_aligned(cur_entry)) {
 213         jbyte* cur_row = cur_entry - BytesPerWord;
 214         while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row_val()) {
 215           cur_row -= BytesPerWord;
 216         }
 217         cur_entry = cur_row + BytesPerWord;
 218         cur_hw = _ct->addr_for(cur_entry);
 219       }
 220 
 221       // Reset the dirty window, while continuing to look
 222       // for the next dirty card that will start a
 223       // new dirty window.
 224       end_of_non_clean = cur_hw;
 225       start_of_non_clean = cur_hw;
 226     }
 227     // Note that "cur_entry" leads "start_of_non_clean" in
 228     // its leftward excursion after this point
 229     // in the loop and, when we hit the left end of "mr",
 230     // will point off of the left end of the card-table
 231     // for "mr".
 232     cur_entry--;
 233   }
 234   // If the first card of "mr" was dirty, we will have
 235   // been left with a dirty window, co-initial with "mr",
 236   // which we now process.
 237   if (start_of_non_clean < end_of_non_clean) {
 238     const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 239     _dirty_card_closure->do_MemRegion(mrd);
 240   }
 241 }
 242 
 243 // clean (by dirty->clean before) ==> cur_younger_gen
 244 // dirty                          ==> cur_youngergen_and_prev_nonclean_card
 245 // precleaned                     ==> cur_youngergen_and_prev_nonclean_card
 246 // prev-younger-gen               ==> cur_youngergen_and_prev_nonclean_card
 247 // cur-younger-gen                ==> cur_younger_gen
 248 // cur_youngergen_and_prev_nonclean_card ==> no change.
 249 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
 250   volatile jbyte* entry = byte_for(field);
 251   do {
 252     jbyte entry_val = *entry;
 253     // We put this first because it's probably the most common case.
 254     if (entry_val == clean_card_val()) {
 255       // No threat of contention with cleaning threads.
 256       *entry = cur_youngergen_card_val();
 257       return;
 258     } else if (card_is_dirty_wrt_gen_iter(entry_val)
 259                || is_prev_youngergen_card_val(entry_val)) {
 260       // Mark it as both cur and prev youngergen; card cleaning thread will
 261       // eventually remove the previous stuff.
 262       jbyte new_val = cur_youngergen_and_prev_nonclean_card;
 263       jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
 264       // Did the CAS succeed?
 265       if (res == entry_val) return;
 266       // Otherwise, retry, to see the new value.
 267       continue;
 268     } else {
 269       assert(entry_val == cur_youngergen_and_prev_nonclean_card
 270              || entry_val == cur_youngergen_card_val(),


 288          "[" PTR_FORMAT ", " PTR_FORMAT ")",
 289          p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 290   // In the case of CMS+ParNew, issue a warning
 291   if (!ur.contains(urasm)) {
 292     assert(UseConcMarkSweepGC, "Tautology: see assert above");
 293     log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? "
 294                     "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 295                     "[" PTR_FORMAT ", " PTR_FORMAT ")",
 296                     p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 297     MemRegion ur2 = sp->used_region();
 298     MemRegion urasm2 = sp->used_region_at_save_marks();
 299     if (!ur.equals(ur2)) {
 300       log_warning(gc)("CMS+ParNew: Flickering used_region()!!");
 301     }
 302     if (!urasm.equals(urasm2)) {
 303       log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!");
 304     }
 305     ShouldNotReachHere();
 306   }
 307 #endif
 308   non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
 309 }
 310 
 311 void CardTableRS::clear_into_younger(Generation* old_gen) {
 312   assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
 313          "Should only be called for the old generation");
 314   // The card tables for the youngest gen need never be cleared.
 315   // There's a bit of subtlety in the clear() and invalidate()
 316   // methods that we exploit here and in invalidate_or_clear()
 317   // below to avoid missing cards at the fringes. If clear() or
 318   // invalidate() are changed in the future, this code should
 319   // be revisited. 20040107.ysr
 320   clear(old_gen->prev_used_region());
 321 }
 322 
 323 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
 324   assert(GenCollectedHeap::heap()->is_old_gen(old_gen),
 325          "Should only be called for the old generation");
 326   // Invalidate the cards for the currently occupied part of
 327   // the old generation and clear the cards for the
 328   // unoccupied part of the generation (if any, making use


 589       // young collection, its rank can either decline or stay unchanged.
 590       // In this case, no extra work is done in terms of redundant
 591       // younger refs scanning of that card.
 592       // Then, the case analysis above reveals that, in the worst case,
 593       // any such stale card will be scanned unnecessarily at most twice.
 594       //
 595       // It is nonetheless advisable to try and get rid of some of this
 596       // redundant work in a subsequent (low priority) re-design of
 597       // the card-scanning code, if only to simplify the underlying
 598       // state machine analysis/proof. ysr 1/28/2002. XXX
 599       cur_entry++;
 600     }
 601   }
 602 }
 603 
 604 void CardTableRS::verify() {
 605   // At present, we only know how to verify the card table RS for
 606   // generational heaps.
 607   VerifyCTGenClosure blk(this);
 608   GenCollectedHeap::heap()->generation_iterate(&blk, false);
 609   CardTable::verify();
 610 }
 611 
 612 CardTableRS::CardTableRS(MemRegion whole_heap) :
 613   CardTable(whole_heap, /* scanned concurrently */ UseConcMarkSweepGC),
 614   _cur_youngergen_card_val(youngergenP1_card),
 615   // LNC functionality
 616   _lowest_non_clean(NULL),
 617   _lowest_non_clean_chunk_size(NULL),
 618   _lowest_non_clean_base_chunk_index(NULL),
 619   _last_LNC_resizing_collection(NULL)
 620 {
 621   // max_gens is really GenCollectedHeap::heap()->gen_policy()->number_of_generations()
 622   // (which is always 2, young & old), but GenCollectedHeap has not been initialized yet.
 623   uint max_gens = 2;
 624   _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, max_gens + 1,
 625                          mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL);
 626   if (_last_cur_val_in_gen == NULL) {
 627     vm_exit_during_initialization("Could not create last_cur_val_in_gen array.");
 628   }
 629   for (uint i = 0; i < max_gens + 1; i++) {
 630     _last_cur_val_in_gen[i] = clean_card_val();
 631   }
 632 }
 633 
 634 CardTableRS::~CardTableRS() {
 635   if (_last_cur_val_in_gen) {
 636     FREE_C_HEAP_ARRAY(jbyte, _last_cur_val_in_gen);
 637   }
 638   if (_lowest_non_clean) {
 639     FREE_C_HEAP_ARRAY(CardArr, _lowest_non_clean);
 640     _lowest_non_clean = NULL;
 641   }
 642   if (_lowest_non_clean_chunk_size) {
 643     FREE_C_HEAP_ARRAY(size_t, _lowest_non_clean_chunk_size);
 644     _lowest_non_clean_chunk_size = NULL;
 645   }
 646   if (_lowest_non_clean_base_chunk_index) {
 647     FREE_C_HEAP_ARRAY(uintptr_t, _lowest_non_clean_base_chunk_index);
 648     _lowest_non_clean_base_chunk_index = NULL;
 649   }
 650   if (_last_LNC_resizing_collection) {
 651     FREE_C_HEAP_ARRAY(int, _last_LNC_resizing_collection);
 652     _last_LNC_resizing_collection = NULL;
 653   }
 654 }
 655 
 656 void CardTableRS::initialize() {
 657   CardTable::initialize();
 658   _lowest_non_clean =
 659     NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
 660   _lowest_non_clean_chunk_size =
 661     NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
 662   _lowest_non_clean_base_chunk_index =
 663     NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
 664   _last_LNC_resizing_collection =
 665     NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
 666   if (_lowest_non_clean == NULL
 667       || _lowest_non_clean_chunk_size == NULL
 668       || _lowest_non_clean_base_chunk_index == NULL
 669       || _last_LNC_resizing_collection == NULL)
 670     vm_exit_during_initialization("couldn't allocate an LNC array.");
 671   for (int i = 0; i < _max_covered_regions; i++) {
 672     _lowest_non_clean[i] = NULL;
 673     _lowest_non_clean_chunk_size[i] = 0;
 674     _last_LNC_resizing_collection[i] = -1;
 675   }
 676 }
 677 
 678 bool CardTableRS::card_will_be_scanned(jbyte cv) {
 679   return card_is_dirty_wrt_gen_iter(cv) || is_prev_nonclean_card_val(cv);
 680 }
 681 
 682 bool CardTableRS::card_may_have_been_dirty(jbyte cv) {
 683   return
 684     cv != clean_card &&
 685     (card_is_dirty_wrt_gen_iter(cv) ||
 686      CardTableRS::youngergen_may_have_been_dirty(cv));
 687 }
 688 
 689 void CardTableRS::non_clean_card_iterate_possibly_parallel(
 690   Space* sp,
 691   MemRegion mr,
 692   OopsInGenClosure* cl,
 693   CardTableRS* ct,
 694   uint n_threads)
 695 {
 696   if (!mr.is_empty()) {
 697     if (n_threads > 0) {
 698 #if INCLUDE_ALL_GCS
 699       non_clean_card_iterate_parallel_work(sp, mr, cl, ct, n_threads);
 700 #else  // INCLUDE_ALL_GCS
 701       fatal("Parallel gc not supported here.");
 702 #endif // INCLUDE_ALL_GCS
 703     } else {
 704       // clear_cl finds contiguous dirty ranges of cards to process and clear.
 705 
 706       // This is the single-threaded version used by DefNew.
 707       const bool parallel = false;
 708 
 709       DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), cl->gen_boundary(), parallel);
 710       ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
 711 
 712       clear_cl.do_MemRegion(mr);
 713     }
 714   }
 715 }
 716 
 717 bool CardTableRS::is_in_young(void* addr) const {
 718   return GenCollectedHeap::heap()->is_in_young((oop)addr);
 719 }
< prev index next >