< prev index next >

src/share/vm/gc/shared/cardTableRS.cpp

Print this page




  85   ShouldNotReachHere();
  86   return 0;
  87 }
  88 
  89 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
  90   // Parallel or sequential, we must always set the prev to equal the
  91   // last one written.
  92   if (parallel) {
  93     // Find a parallel value to be used next.
  94     jbyte next_val = find_unused_youngergenP_card_value();
  95     set_cur_youngergen_card_val(next_val);
  96 
  97   } else {
  98     // In an sequential traversal we will always write youngergen, so that
  99     // the inline barrier is  correct.
 100     set_cur_youngergen_card_val(youngergen_card);
 101   }
 102 }
 103 
 104 void CardTableRS::younger_refs_iterate(Generation* g,
 105                                        OopsInGenClosure* blk) {

 106   _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
 107   g->younger_refs_iterate(blk);
 108 }
 109 
 110 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
 111   if (_is_par) {
 112     return clear_card_parallel(entry);
 113   } else {
 114     return clear_card_serial(entry);
 115   }
 116 }
 117 
 118 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
 119   while (true) {
 120     // In the parallel case, we may have to do this several times.
 121     jbyte entry_val = *entry;
 122     assert(entry_val != CardTableRS::clean_card_val(),
 123            "We shouldn't be looking at clean cards, and this should "
 124            "be the only place they get cleaned.");
 125     if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
 126         || _ct->is_prev_youngergen_card_val(entry_val)) {
 127       jbyte res =


 147       // We don't have to look at the card.
 148       return false;
 149     }
 150   }
 151   return true;
 152 }
 153 
 154 
 155 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
 156   jbyte entry_val = *entry;
 157   assert(entry_val != CardTableRS::clean_card_val(),
 158          "We shouldn't be looking at clean cards, and this should "
 159          "be the only place they get cleaned.");
 160   assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
 161          "This should be possible in the sequential case.");
 162   *entry = CardTableRS::clean_card_val();
 163   return true;
 164 }
 165 
 166 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
 167   DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct) :
 168     _dirty_card_closure(dirty_card_closure), _ct(ct) {
 169     // Cannot yet substitute active_workers for n_par_threads
 170     // in the case where parallelism is being turned off by
 171     // setting n_par_threads to 0.
 172     _is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
 173     assert(!_is_par ||
 174            (GenCollectedHeap::heap()->n_par_threads() ==
 175             GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
 176 }
 177 
 178 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
 179   return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
 180 }
 181 
 182 // The regions are visited in *decreasing* address order.
 183 // This order aids with imprecise card marking, where a dirty
 184 // card may cause scanning, and summarization marking, of objects
 185 // that extend onto subsequent cards.
 186 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
 187   assert(mr.word_size() > 0, "Error");
 188   assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
 189   // mr.end() may not necessarily be card aligned.
 190   jbyte* cur_entry = _ct->byte_for(mr.last());
 191   const jbyte* limit = _ct->byte_for(mr.start());
 192   HeapWord* end_of_non_clean = mr.end();
 193   HeapWord* start_of_non_clean = end_of_non_clean;
 194   while (cur_entry >= limit) {
 195     HeapWord* cur_hw = _ct->addr_for(cur_entry);


 255     } else if (card_is_dirty_wrt_gen_iter(entry_val)
 256                || is_prev_youngergen_card_val(entry_val)) {
 257       // Mark it as both cur and prev youngergen; card cleaning thread will
 258       // eventually remove the previous stuff.
 259       jbyte new_val = cur_youngergen_and_prev_nonclean_card;
 260       jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
 261       // Did the CAS succeed?
 262       if (res == entry_val) return;
 263       // Otherwise, retry, to see the new value.
 264       continue;
 265     } else {
 266       assert(entry_val == cur_youngergen_and_prev_nonclean_card
 267              || entry_val == cur_youngergen_card_val(),
 268              "should be only possibilities.");
 269       return;
 270     }
 271   } while (true);
 272 }
 273 
 274 void CardTableRS::younger_refs_in_space_iterate(Space* sp,
 275                                                 OopsInGenClosure* cl) {

 276   const MemRegion urasm = sp->used_region_at_save_marks();
 277 #ifdef ASSERT
 278   // Convert the assertion check to a warning if we are running
 279   // CMS+ParNew until related bug is fixed.
 280   MemRegion ur    = sp->used_region();
 281   assert(ur.contains(urasm) || (UseConcMarkSweepGC),
 282          err_msg("Did you forget to call save_marks()? "
 283                  "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 284                  "[" PTR_FORMAT ", " PTR_FORMAT ")",
 285                  p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())));
 286   // In the case of CMS+ParNew, issue a warning
 287   if (!ur.contains(urasm)) {
 288     assert(UseConcMarkSweepGC, "Tautology: see assert above");
 289     warning("CMS+ParNew: Did you forget to call save_marks()? "
 290             "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 291             "[" PTR_FORMAT ", " PTR_FORMAT ")",
 292              p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 293     MemRegion ur2 = sp->used_region();
 294     MemRegion urasm2 = sp->used_region_at_save_marks();
 295     if (!ur.equals(ur2)) {
 296       warning("CMS+ParNew: Flickering used_region()!!");
 297     }
 298     if (!urasm.equals(urasm2)) {
 299       warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
 300     }
 301     ShouldNotReachHere();
 302   }
 303 #endif
 304   _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
 305 }
 306 
 307 void CardTableRS::clear_into_younger(Generation* old_gen) {
 308   assert(old_gen->level() == 1, "Should only be called for the old generation");
 309   // The card tables for the youngest gen need never be cleared.
 310   // There's a bit of subtlety in the clear() and invalidate()
 311   // methods that we exploit here and in invalidate_or_clear()
 312   // below to avoid missing cards at the fringes. If clear() or
 313   // invalidate() are changed in the future, this code should
 314   // be revisited. 20040107.ysr
 315   clear(old_gen->prev_used_region());
 316 }
 317 
 318 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
 319   assert(old_gen->level() == 1, "Should only be called for the old generation");
 320   // Invalidate the cards for the currently occupied part of
 321   // the old generation and clear the cards for the
 322   // unoccupied part of the generation (if any, making use
 323   // of that generation's prev_used_region to determine that
 324   // region). No need to do anything for the youngest




  85   ShouldNotReachHere();
  86   return 0;
  87 }
  88 
  89 void CardTableRS::prepare_for_younger_refs_iterate(bool parallel) {
  90   // Parallel or sequential, we must always set the prev to equal the
  91   // last one written.
  92   if (parallel) {
  93     // Find a parallel value to be used next.
  94     jbyte next_val = find_unused_youngergenP_card_value();
  95     set_cur_youngergen_card_val(next_val);
  96 
  97   } else {
  98     // In an sequential traversal we will always write youngergen, so that
  99     // the inline barrier is  correct.
 100     set_cur_youngergen_card_val(youngergen_card);
 101   }
 102 }
 103 
 104 void CardTableRS::younger_refs_iterate(Generation* g,
 105                                        OopsInGenClosure* blk,
 106                                        uint n_threads) {
 107   _last_cur_val_in_gen[g->level()+1] = cur_youngergen_card_val();
 108   g->younger_refs_iterate(blk, n_threads);
 109 }
 110 
 111 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
 112   if (_is_par) {
 113     return clear_card_parallel(entry);
 114   } else {
 115     return clear_card_serial(entry);
 116   }
 117 }
 118 
 119 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
 120   while (true) {
 121     // In the parallel case, we may have to do this several times.
 122     jbyte entry_val = *entry;
 123     assert(entry_val != CardTableRS::clean_card_val(),
 124            "We shouldn't be looking at clean cards, and this should "
 125            "be the only place they get cleaned.");
 126     if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
 127         || _ct->is_prev_youngergen_card_val(entry_val)) {
 128       jbyte res =


 148       // We don't have to look at the card.
 149       return false;
 150     }
 151   }
 152   return true;
 153 }
 154 
 155 
 156 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
 157   jbyte entry_val = *entry;
 158   assert(entry_val != CardTableRS::clean_card_val(),
 159          "We shouldn't be looking at clean cards, and this should "
 160          "be the only place they get cleaned.");
 161   assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
 162          "This should be possible in the sequential case.");
 163   *entry = CardTableRS::clean_card_val();
 164   return true;
 165 }
 166 
 167 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
 168   DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) :
 169     _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {







 170 }
 171 
 172 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
 173   return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
 174 }
 175 
 176 // The regions are visited in *decreasing* address order.
 177 // This order aids with imprecise card marking, where a dirty
 178 // card may cause scanning, and summarization marking, of objects
 179 // that extend onto subsequent cards.
 180 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
 181   assert(mr.word_size() > 0, "Error");
 182   assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
 183   // mr.end() may not necessarily be card aligned.
 184   jbyte* cur_entry = _ct->byte_for(mr.last());
 185   const jbyte* limit = _ct->byte_for(mr.start());
 186   HeapWord* end_of_non_clean = mr.end();
 187   HeapWord* start_of_non_clean = end_of_non_clean;
 188   while (cur_entry >= limit) {
 189     HeapWord* cur_hw = _ct->addr_for(cur_entry);


 249     } else if (card_is_dirty_wrt_gen_iter(entry_val)
 250                || is_prev_youngergen_card_val(entry_val)) {
 251       // Mark it as both cur and prev youngergen; card cleaning thread will
 252       // eventually remove the previous stuff.
 253       jbyte new_val = cur_youngergen_and_prev_nonclean_card;
 254       jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
 255       // Did the CAS succeed?
 256       if (res == entry_val) return;
 257       // Otherwise, retry, to see the new value.
 258       continue;
 259     } else {
 260       assert(entry_val == cur_youngergen_and_prev_nonclean_card
 261              || entry_val == cur_youngergen_card_val(),
 262              "should be only possibilities.");
 263       return;
 264     }
 265   } while (true);
 266 }
 267 
 268 void CardTableRS::younger_refs_in_space_iterate(Space* sp,
 269                                                 OopsInGenClosure* cl,
 270                                                 uint n_threads) {
 271   const MemRegion urasm = sp->used_region_at_save_marks();
 272 #ifdef ASSERT
 273   // Convert the assertion check to a warning if we are running
 274   // CMS+ParNew until related bug is fixed.
 275   MemRegion ur    = sp->used_region();
 276   assert(ur.contains(urasm) || (UseConcMarkSweepGC),
 277          err_msg("Did you forget to call save_marks()? "
 278                  "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 279                  "[" PTR_FORMAT ", " PTR_FORMAT ")",
 280                  p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())));
 281   // In the case of CMS+ParNew, issue a warning
 282   if (!ur.contains(urasm)) {
 283     assert(UseConcMarkSweepGC, "Tautology: see assert above");
 284     warning("CMS+ParNew: Did you forget to call save_marks()? "
 285             "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
 286             "[" PTR_FORMAT ", " PTR_FORMAT ")",
 287              p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
 288     MemRegion ur2 = sp->used_region();
 289     MemRegion urasm2 = sp->used_region_at_save_marks();
 290     if (!ur.equals(ur2)) {
 291       warning("CMS+ParNew: Flickering used_region()!!");
 292     }
 293     if (!urasm.equals(urasm2)) {
 294       warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
 295     }
 296     ShouldNotReachHere();
 297   }
 298 #endif
 299   _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this, n_threads);
 300 }
 301 
 302 void CardTableRS::clear_into_younger(Generation* old_gen) {
 303   assert(old_gen->level() == 1, "Should only be called for the old generation");
 304   // The card tables for the youngest gen need never be cleared.
 305   // There's a bit of subtlety in the clear() and invalidate()
 306   // methods that we exploit here and in invalidate_or_clear()
 307   // below to avoid missing cards at the fringes. If clear() or
 308   // invalidate() are changed in the future, this code should
 309   // be revisited. 20040107.ysr
 310   clear(old_gen->prev_used_region());
 311 }
 312 
 313 void CardTableRS::invalidate_or_clear(Generation* old_gen) {
 314   assert(old_gen->level() == 1, "Should only be called for the old generation");
 315   // Invalidate the cards for the currently occupied part of
 316   // the old generation and clear the cards for the
 317   // unoccupied part of the generation (if any, making use
 318   // of that generation's prev_used_region to determine that
 319   // region). No need to do anything for the youngest


< prev index next >