< prev index next >

src/share/vm/gc/shared/cardTableRS.cpp

Print this page




  87 }
  88 
  89 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
  90   // Parallel or sequential, we must always set the prev to equal the
  91   // last one written.
  92   if (parallel) {
  93     // Find a parallel value to be used next.
  94     jbyte next_val = find_unused_youngergenP_card_value();
  95     set_cur_youngergen_card_val(next_val);
  96 
  97   } else {
  98     // In an sequential traversal we will always write youngergen, so that
  99     // the inline barrier is  correct.
 100     set_cur_youngergen_card_val(youngergen_card);
 101   }
 102 }
 103 
 104 void CardTableRS::younger_refs_iterate(Generation* g,
 105                                        OopsInGenClosure* blk,
 106                                        uint n_threads) {
 107   _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();


 108   g->younger_refs_iterate(blk, n_threads);
 109 }
 110 
 111 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
 112   if (_is_par) {
 113     return clear_card_parallel(entry);
 114   } else {
 115     return clear_card_serial(entry);
 116   }
 117 }
 118 
 119 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
 120   while (true) {
 121     // In the parallel case, we may have to do this several times.
 122     jbyte entry_val = *entry;
 123     assert(entry_val != CardTableRS::clean_card_val(),
 124            "We shouldn't be looking at clean cards, and this should "
 125            "be the only place they get cleaned.");
 126     if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
 127         || _ct->is_prev_youngergen_card_val(entry_val)) {


 283     assert(UseConcMarkSweepGC, "Tautology: see assert above");
 284     warning("CMS+ParNew: Did you forget to call save_marks()? "
 285             "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 286             "[" PTR_FORMAT ", " PTR_FORMAT ")",
 287              p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 288     MemRegion ur2 = sp->used_region();
 289     MemRegion urasm2 = sp->used_region_at_save_marks();
 290     if (!ur.equals(ur2)) {
 291       warning("CMS+ParNew: Flickering used_region()!!");
 292     }
 293     if (!urasm.equals(urasm2)) {
 294       warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
 295     }
 296     ShouldNotReachHere();
 297   }
 298 #endif
 299   _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
 300 }
 301 
 302 void CardTableRS::clear_into_younger(Generation* old_gen) {
 303   assert(old_gen->level() == 1, "Should only be called for the old generation");

 304   // The card tables for the youngest gen need never be cleared.
 305   // There's a bit of subtlety in the clear() and invalidate()
 306   // methods that we exploit here and in invalidate_or_clear()
 307   // below to avoid missing cards at the fringes. If clear() or
 308   // invalidate() are changed in the future, this code should
 309   // be revisited. 20040107.ysr
 310   clear(old_gen->prev_used_region());
 311 }
 312 
 313 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
 314   assert(old_gen->level() == 1, "Should only be called for the old generation");

 315   // Invalidate the cards for the currently occupied part of
 316   // the old generation and clear the cards for the
 317   // unoccupied part of the generation (if any, making use
 318   // of that generation's prev_used_region to determine that
 319   // region). No need to do anything for the youngest
 320   // generation. Also see note#20040107.ysr above.
 321   MemRegion used_mr = old_gen->used_region();
 322   MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
 323   if (!to_be_cleared_mr.is_empty()) {
 324     clear(to_be_cleared_mr);
 325   }
 326   invalidate(used_mr);
 327 }
 328 
 329 
 330 class VerifyCleanCardClosure: public OopClosure {
 331 private:
 332   HeapWord* _boundary;
 333   HeapWord* _begin;
 334   HeapWord* _end;


 360   virtual void do_oop(oop* p)       { VerifyCleanCardClosure::do_oop_work(p); }
 361   virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
 362 };
 363 
 364 class VerifyCTSpaceClosure: public SpaceClosure {
 365 private:
 366   CardTableRS* _ct;
 367   HeapWord* _boundary;
 368 public:
 369   VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
 370     _ct(ct), _boundary(boundary) {}
 371   virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
 372 };
 373 
 374 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
 375   CardTableRS* _ct;
 376 public:
 377   VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
 378   void do_generation(Generation* gen) {
 379     // Skip the youngest generation.
 380     if (gen->level() == 0) return;


 381     // Normally, we're interested in pointers to younger generations.
 382     VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
 383     gen->space_iterate(&blk, true);
 384   }
 385 };
 386 
 387 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
 388   // We don't need to do young-gen spaces.
 389   if (s->end() <= gen_boundary) return;
 390   MemRegion used = s->used_region();
 391 
 392   jbyte* cur_entry = byte_for(used.start());
 393   jbyte* limit = byte_after(used.last());
 394   while (cur_entry < limit) {
 395     if (*cur_entry == CardTableModRefBS::clean_card) {
 396       jbyte* first_dirty = cur_entry+1;
 397       while (first_dirty < limit &&
 398              *first_dirty == CardTableModRefBS::clean_card) {
 399         first_dirty++;
 400       }




  87 }
  88 
  89 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
  90   // Parallel or sequential, we must always set the prev to equal the
  91   // last one written.
  92   if (parallel) {
  93     // Find a parallel value to be used next.
  94     jbyte next_val = find_unused_youngergenP_card_value();
  95     set_cur_youngergen_card_val(next_val);
  96 
  97   } else {
  98     // In an sequential traversal we will always write youngergen, so that
  99     // the inline barrier is  correct.
 100     set_cur_youngergen_card_val(youngergen_card);
 101   }
 102 }
 103 
 104 void CardTableRS::younger_refs_iterate(Generation* g,
 105                                        OopsInGenClosure* blk,
 106                                        uint n_threads) {
 107   // The indexing in this array is slightly odd. We want to access
 108   // the old generation record here, which is at index 2.
 109   _last_cur_val_in_gen[2] = cur_youngergen_card_val();
 110   g->younger_refs_iterate(blk, n_threads);
 111 }
 112 
 113 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
 114   if (_is_par) {
 115     return clear_card_parallel(entry);
 116   } else {
 117     return clear_card_serial(entry);
 118   }
 119 }
 120 
 121 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
 122   while (true) {
 123     // In the parallel case, we may have to do this several times.
 124     jbyte entry_val = *entry;
 125     assert(entry_val != CardTableRS::clean_card_val(),
 126            "We shouldn't be looking at clean cards, and this should "
 127            "be the only place they get cleaned.");
 128     if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
 129         || _ct->is_prev_youngergen_card_val(entry_val)) {


 285     assert(UseConcMarkSweepGC, "Tautology: see assert above");
 286     warning("CMS+ParNew: Did you forget to call save_marks()? "
 287             "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 288             "[" PTR_FORMAT ", " PTR_FORMAT ")",
 289              p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 290     MemRegion ur2 = sp->used_region();
 291     MemRegion urasm2 = sp->used_region_at_save_marks();
 292     if (!ur.equals(ur2)) {
 293       warning("CMS+ParNew: Flickering used_region()!!");
 294     }
 295     if (!urasm.equals(urasm2)) {
 296       warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
 297     }
 298     ShouldNotReachHere();
 299   }
 300 #endif
 301   _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
 302 }
 303 
 304 void CardTableRS::clear_into_younger(Generation* old_gen) {
 305   assert(old_gen == GenCollectedHeap::heap()->old_gen(),
 306          "Should only be called for the old generation");
 307   // The card tables for the youngest gen need never be cleared.
 308   // There's a bit of subtlety in the clear() and invalidate()
 309   // methods that we exploit here and in invalidate_or_clear()
 310   // below to avoid missing cards at the fringes. If clear() or
 311   // invalidate() are changed in the future, this code should
 312   // be revisited. 20040107.ysr
 313   clear(old_gen->prev_used_region());
 314 }
 315 
 316 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
 317   assert(old_gen == GenCollectedHeap::heap()->old_gen(),
 318          "Should only be called for the old generation");
 319   // Invalidate the cards for the currently occupied part of
 320   // the old generation and clear the cards for the
 321   // unoccupied part of the generation (if any, making use
 322   // of that generation's prev_used_region to determine that
 323   // region). No need to do anything for the youngest
 324   // generation. Also see note#20040107.ysr above.
 325   MemRegion used_mr = old_gen->used_region();
 326   MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
 327   if (!to_be_cleared_mr.is_empty()) {
 328     clear(to_be_cleared_mr);
 329   }
 330   invalidate(used_mr);
 331 }
 332 
 333 
 334 class VerifyCleanCardClosure: public OopClosure {
 335 private:
 336   HeapWord* _boundary;
 337   HeapWord* _begin;
 338   HeapWord* _end;


 364   virtual void do_oop(oop* p)       { VerifyCleanCardClosure::do_oop_work(p); }
 365   virtual void do_oop(narrowOop* p) { VerifyCleanCardClosure::do_oop_work(p); }
 366 };
 367 
 368 class VerifyCTSpaceClosure: public SpaceClosure {
 369 private:
 370   CardTableRS* _ct;
 371   HeapWord* _boundary;
 372 public:
 373   VerifyCTSpaceClosure(CardTableRS* ct, HeapWord* boundary) :
 374     _ct(ct), _boundary(boundary) {}
 375   virtual void do_space(Space* s) { _ct->verify_space(s, _boundary); }
 376 };
 377 
 378 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
 379   CardTableRS* _ct;
 380 public:
 381   VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
 382   void do_generation(Generation* gen) {
 383     // Skip the youngest generation.
 384     if (gen == GenCollectedHeap::heap()->young_gen()) {
 385       return;
 386     }
 387     // Normally, we're interested in pointers to younger generations.
 388     VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
 389     gen->space_iterate(&blk, true);
 390   }
 391 };
 392 
 393 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
 394   // We don't need to do young-gen spaces.
 395   if (s->end() <= gen_boundary) return;
 396   MemRegion used = s->used_region();
 397 
 398   jbyte* cur_entry = byte_for(used.start());
 399   jbyte* limit = byte_after(used.last());
 400   while (cur_entry < limit) {
 401     if (*cur_entry == CardTableModRefBS::clean_card) {
 402       jbyte* first_dirty = cur_entry+1;
 403       while (first_dirty < limit &&
 404              *first_dirty == CardTableModRefBS::clean_card) {
 405         first_dirty++;
 406       }


< prev index next >