< prev index next >

src/share/vm/gc/shared/cardTableRS.cpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile


 135     // Find a parallel value to be used next.
 136     jbyte next_val = find_unused_youngergenP_card_value();
 137     set_cur_youngergen_card_val(next_val);
 138 
 139   } else {
 140     // In an sequential traversal we will always write youngergen, so that
 141     // the inline barrier is  correct.
 142     set_cur_youngergen_card_val(youngergen_card);
 143   }
 144 }
 145 
 146 void CardTableRS::younger_refs_iterate(Generation* g,
 147                                        OopsInGenClosure* blk,
 148                                        uint n_threads) {
 149   // The indexing in this array is slightly odd. We want to access
 150   // the old generation record here, which is at index 2.
 151   _last_cur_val_in_gen[2] = cur_youngergen_card_val();
 152   g->younger_refs_iterate(blk, n_threads);
 153 }
 154 
 155 inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
 156   if (_is_par) {
 157     return clear_card_parallel(entry);
 158   } else {
 159     return clear_card_serial(entry);
 160   }
 161 }
 162 
 163 inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
 164   while (true) {
 165     // In the parallel case, we may have to do this several times.
 166     jbyte entry_val = *entry;
 167     assert(entry_val != CardTableRS::clean_card_val(),
 168            "We shouldn't be looking at clean cards, and this should "
 169            "be the only place they get cleaned.");
 170     if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
 171         || _ct->is_prev_youngergen_card_val(entry_val)) {
 172       jbyte res =
 173         Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
 174       if (res == entry_val) {
 175         break;
 176       } else {
 177         assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
 178                "The CAS above should only fail if another thread did "
 179                "a GC write barrier.");
 180       }
 181     } else if (entry_val ==
 182                CardTableRS::cur_youngergen_and_prev_nonclean_card) {
 183       // Parallelism shouldn't matter in this case.  Only the thread
 184       // assigned to scan the card should change this value.
 185       *entry = _ct->cur_youngergen_card_val();
 186       break;
 187     } else {
 188       assert(entry_val == _ct->cur_youngergen_card_val(),
 189              "Should be the only possibility.");
 190       // In this case, the card was clean before, and become
 191       // cur_youngergen only because of processing of a promoted object.
 192       // We don't have to look at the card.
 193       return false;
 194     }
 195   }
 196   return true;
 197 }
 198 
 199 
 200 inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
 201   jbyte entry_val = *entry;
 202   assert(entry_val != CardTableRS::clean_card_val(),
 203          "We shouldn't be looking at clean cards, and this should "
 204          "be the only place they get cleaned.");
 205   assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
 206          "This should be possible in the sequential case.");
 207   *entry = CardTableRS::clean_card_val();
 208   return true;
 209 }
 210 
 211 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
 212   DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) :
 213     _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
 214 }
 215 
 216 bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) {
 217   return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
 218 }
 219 
 220 // The regions are visited in *decreasing* address order.
 221 // This order aids with imprecise card marking, where a dirty
 222 // card may cause scanning, and summarization marking, of objects
 223 // that extend onto subsequent cards.
 224 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
 225   assert(mr.word_size() > 0, "Error");
 226   assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
 227   // mr.end() may not necessarily be card aligned.
 228   jbyte* cur_entry = _ct->byte_for(mr.last());
 229   const jbyte* limit = _ct->byte_for(mr.start());
 230   HeapWord* end_of_non_clean = mr.end();
 231   HeapWord* start_of_non_clean = end_of_non_clean;
 232   while (cur_entry >= limit) {
 233     HeapWord* cur_hw = _ct->addr_for(cur_entry);
 234     if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
 235       // Continue the dirty range by opening the
 236       // dirty window one card to the left.
 237       start_of_non_clean = cur_hw;
 238     } else {
 239       // We hit a "clean" card; process any non-empty
 240       // "dirty" range accumulated so far.
 241       if (start_of_non_clean < end_of_non_clean) {
 242         const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 243         _dirty_card_closure->do_MemRegion(mrd);
 244       }
 245 
 246       // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
 247       if (is_word_aligned(cur_entry)) {
 248         jbyte* cur_row = cur_entry - BytesPerWord;
 249         while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row()) {
 250           cur_row -= BytesPerWord;
 251         }
 252         cur_entry = cur_row + BytesPerWord;
 253         cur_hw = _ct->addr_for(cur_entry);
 254       }
 255 
 256       // Reset the dirty window, while continuing to look
 257       // for the next dirty card that will start a
 258       // new dirty window.
 259       end_of_non_clean = cur_hw;
 260       start_of_non_clean = cur_hw;
 261     }
 262     // Note that "cur_entry" leads "start_of_non_clean" in
 263     // its leftward excursion after this point
 264     // in the loop and, when we hit the left end of "mr",
 265     // will point off of the left end of the card-table
 266     // for "mr".
 267     cur_entry--;
 268   }
 269   // If the first card of "mr" was dirty, we will have
 270   // been left with a dirty window, co-initial with "mr",
 271   // which we now process.
 272   if (start_of_non_clean < end_of_non_clean) {
 273     const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 274     _dirty_card_closure->do_MemRegion(mrd);
 275   }
 276 }
 277 
 278 // clean (by dirty->clean before) ==> cur_younger_gen
 279 // dirty                          ==> cur_youngergen_and_prev_nonclean_card
 280 // precleaned                     ==> cur_youngergen_and_prev_nonclean_card
 281 // prev-younger-gen               ==> cur_youngergen_and_prev_nonclean_card
 282 // cur-younger-gen                ==> cur_younger_gen
 283 // cur_youngergen_and_prev_nonclean_card ==> no change.
 284 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
 285   jbyte* entry = _ct_bs->byte_for(field);
 286   do {
 287     jbyte entry_val = *entry;
 288     // We put this first because it's probably the most common case.
 289     if (entry_val == clean_card_val()) {
 290       // No threat of contention with cleaning threads.
 291       *entry = cur_youngergen_card_val();
 292       return;
 293     } else if (card_is_dirty_wrt_gen_iter(entry_val)
 294                || is_prev_youngergen_card_val(entry_val)) {
 295       // Mark it as both cur and prev youngergen; card cleaning thread will
 296       // eventually remove the previous stuff.
 297       jbyte new_val = cur_youngergen_and_prev_nonclean_card;
 298       jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
 299       // Did the CAS succeed?
 300       if (res == entry_val) return;
 301       // Otherwise, retry, to see the new value.
 302       continue;
 303     } else {
 304       assert(entry_val == cur_youngergen_and_prev_nonclean_card
 305              || entry_val == cur_youngergen_card_val(),


 420 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
 421   CardTableRS* _ct;
 422 public:
 423   VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
 424   void do_generation(Generation* gen) {
 425     // Skip the youngest generation.
 426     if (GenCollectedHeap::heap()->is_young_gen(gen)) {
 427       return;
 428     }
 429     // Normally, we're interested in pointers to younger generations.
 430     VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
 431     gen->space_iterate(&blk, true);
 432   }
 433 };
 434 
 435 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
 436   // We don't need to do young-gen spaces.
 437   if (s->end() <= gen_boundary) return;
 438   MemRegion used = s->used_region();
 439 
 440   jbyte* cur_entry = byte_for(used.start());
 441   jbyte* limit = byte_after(used.last());
 442   while (cur_entry < limit) {
 443     if (*cur_entry == clean_card_val()) {
 444       jbyte* first_dirty = cur_entry+1;
 445       while (first_dirty < limit &&
 446              *first_dirty == clean_card_val()) {
 447         first_dirty++;
 448       }
 449       // If the first object is a regular object, and it has a
 450       // young-to-old field, that would mark the previous card.
 451       HeapWord* boundary = addr_for(cur_entry);
 452       HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
 453       HeapWord* boundary_block = s->block_start(boundary);
 454       HeapWord* begin = boundary;             // Until proven otherwise.
 455       HeapWord* start_block = boundary_block; // Until proven otherwise.
 456       if (boundary_block < boundary) {
 457         if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
 458           oop boundary_obj = oop(boundary_block);
 459           if (!boundary_obj->is_objArray() &&
 460               !boundary_obj->is_typeArray()) {
 461             guarantee(cur_entry > byte_for(used.start()),
 462                       "else boundary would be boundary_block");
 463             if (*byte_for(boundary_block) != clean_card_val()) {
 464               begin = boundary_block + s->block_size(boundary_block);




 135     // Find a parallel value to be used next.
 136     jbyte next_val = find_unused_youngergenP_card_value();
 137     set_cur_youngergen_card_val(next_val);
 138 
 139   } else {
 140     // In an sequential traversal we will always write youngergen, so that
 141     // the inline barrier is  correct.
 142     set_cur_youngergen_card_val(youngergen_card);
 143   }
 144 }
 145 
 146 void CardTableRS::younger_refs_iterate(Generation* g,
 147                                        OopsInGenClosure* blk,
 148                                        uint n_threads) {
 149   // The indexing in this array is slightly odd. We want to access
 150   // the old generation record here, which is at index 2.
 151   _last_cur_val_in_gen[2] = cur_youngergen_card_val();
 152   g->younger_refs_iterate(blk, n_threads);
 153 }
 154 
 155 inline bool ClearNoncleanCardWrapper::clear_card(volatile jbyte* entry) {
 156   if (_is_par) {
 157     return clear_card_parallel(entry);
 158   } else {
 159     return clear_card_serial(entry);
 160   }
 161 }
 162 
 163 inline bool ClearNoncleanCardWrapper::clear_card_parallel(volatile jbyte* entry) {
 164   while (true) {
 165     // In the parallel case, we may have to do this several times.
 166     jbyte entry_val = *entry;
 167     assert(entry_val != CardTableRS::clean_card_val(),
 168            "We shouldn't be looking at clean cards, and this should "
 169            "be the only place they get cleaned.");
 170     if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
 171         || _ct->is_prev_youngergen_card_val(entry_val)) {
 172       jbyte res =
 173         Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
 174       if (res == entry_val) {
 175         break;
 176       } else {
 177         assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
 178                "The CAS above should only fail if another thread did "
 179                "a GC write barrier.");
 180       }
 181     } else if (entry_val ==
 182                CardTableRS::cur_youngergen_and_prev_nonclean_card) {
 183       // Parallelism shouldn't matter in this case.  Only the thread
 184       // assigned to scan the card should change this value.
 185       *entry = _ct->cur_youngergen_card_val();
 186       break;
 187     } else {
 188       assert(entry_val == _ct->cur_youngergen_card_val(),
 189              "Should be the only possibility.");
 190       // In this case, the card was clean before, and become
 191       // cur_youngergen only because of processing of a promoted object.
 192       // We don't have to look at the card.
 193       return false;
 194     }
 195   }
 196   return true;
 197 }
 198 
 199 
 200 inline bool ClearNoncleanCardWrapper::clear_card_serial(volatile jbyte* entry) {
 201   jbyte entry_val = *entry;
 202   assert(entry_val != CardTableRS::clean_card_val(),
 203          "We shouldn't be looking at clean cards, and this should "
 204          "be the only place they get cleaned.");
 205   assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
 206          "This should be possible in the sequential case.");
 207   *entry = CardTableRS::clean_card_val();
 208   return true;
 209 }
 210 
 211 ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
 212   DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) :
 213     _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) {
 214 }
 215 
 216 bool ClearNoncleanCardWrapper::is_word_aligned(volatile jbyte* entry) {
 217   return (((intptr_t)entry) & (BytesPerWord-1)) == 0;
 218 }
 219 
 220 // The regions are visited in *decreasing* address order.
 221 // This order aids with imprecise card marking, where a dirty
 222 // card may cause scanning, and summarization marking, of objects
 223 // that extend onto subsequent cards.
 224 void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
 225   assert(mr.word_size() > 0, "Error");
 226   assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
 227   // mr.end() may not necessarily be card aligned.
 228   volatile jbyte* cur_entry = _ct->byte_for(mr.last());
 229   const volatile jbyte* limit = _ct->byte_for(mr.start());
 230   HeapWord* end_of_non_clean = mr.end();
 231   HeapWord* start_of_non_clean = end_of_non_clean;
 232   while (cur_entry >= limit) {
 233     HeapWord* cur_hw = _ct->addr_for(cur_entry);
 234     if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
 235       // Continue the dirty range by opening the
 236       // dirty window one card to the left.
 237       start_of_non_clean = cur_hw;
 238     } else {
 239       // We hit a "clean" card; process any non-empty
 240       // "dirty" range accumulated so far.
 241       if (start_of_non_clean < end_of_non_clean) {
 242         const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 243         _dirty_card_closure->do_MemRegion(mrd);
 244       }
 245 
 246       // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary
 247       if (is_word_aligned(cur_entry)) {
 248         volatile jbyte* cur_row = cur_entry - BytesPerWord;
 249         while (cur_row >= limit && *((intptr_t*)cur_row) ==  CardTableRS::clean_card_row()) {
 250           cur_row -= BytesPerWord;
 251         }
 252         cur_entry = cur_row + BytesPerWord;
 253         cur_hw = _ct->addr_for(cur_entry);
 254       }
 255 
 256       // Reset the dirty window, while continuing to look
 257       // for the next dirty card that will start a
 258       // new dirty window.
 259       end_of_non_clean = cur_hw;
 260       start_of_non_clean = cur_hw;
 261     }
 262     // Note that "cur_entry" leads "start_of_non_clean" in
 263     // its leftward excursion after this point
 264     // in the loop and, when we hit the left end of "mr",
 265     // will point off of the left end of the card-table
 266     // for "mr".
 267     cur_entry--;
 268   }
 269   // If the first card of "mr" was dirty, we will have
 270   // been left with a dirty window, co-initial with "mr",
 271   // which we now process.
 272   if (start_of_non_clean < end_of_non_clean) {
 273     const MemRegion mrd(start_of_non_clean, end_of_non_clean);
 274     _dirty_card_closure->do_MemRegion(mrd);
 275   }
 276 }
 277 
 278 // clean (by dirty->clean before) ==> cur_younger_gen
 279 // dirty                          ==> cur_youngergen_and_prev_nonclean_card
 280 // precleaned                     ==> cur_youngergen_and_prev_nonclean_card
 281 // prev-younger-gen               ==> cur_youngergen_and_prev_nonclean_card
 282 // cur-younger-gen                ==> cur_younger_gen
 283 // cur_youngergen_and_prev_nonclean_card ==> no change.
 284 void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) {
 285   volatile jbyte* entry = _ct_bs->byte_for(field);
 286   do {
 287     jbyte entry_val = *entry;
 288     // We put this first because it's probably the most common case.
 289     if (entry_val == clean_card_val()) {
 290       // No threat of contention with cleaning threads.
 291       *entry = cur_youngergen_card_val();
 292       return;
 293     } else if (card_is_dirty_wrt_gen_iter(entry_val)
 294                || is_prev_youngergen_card_val(entry_val)) {
 295       // Mark it as both cur and prev youngergen; card cleaning thread will
 296       // eventually remove the previous stuff.
 297       jbyte new_val = cur_youngergen_and_prev_nonclean_card;
 298       jbyte res = Atomic::cmpxchg(new_val, entry, entry_val);
 299       // Did the CAS succeed?
 300       if (res == entry_val) return;
 301       // Otherwise, retry, to see the new value.
 302       continue;
 303     } else {
 304       assert(entry_val == cur_youngergen_and_prev_nonclean_card
 305              || entry_val == cur_youngergen_card_val(),


 420 class VerifyCTGenClosure: public GenCollectedHeap::GenClosure {
 421   CardTableRS* _ct;
 422 public:
 423   VerifyCTGenClosure(CardTableRS* ct) : _ct(ct) {}
 424   void do_generation(Generation* gen) {
 425     // Skip the youngest generation.
 426     if (GenCollectedHeap::heap()->is_young_gen(gen)) {
 427       return;
 428     }
 429     // Normally, we're interested in pointers to younger generations.
 430     VerifyCTSpaceClosure blk(_ct, gen->reserved().start());
 431     gen->space_iterate(&blk, true);
 432   }
 433 };
 434 
 435 void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) {
 436   // We don't need to do young-gen spaces.
 437   if (s->end() <= gen_boundary) return;
 438   MemRegion used = s->used_region();
 439 
 440   volatile jbyte* cur_entry = byte_for(used.start());
 441   volatile jbyte* limit = byte_after(used.last());
 442   while (cur_entry < limit) {
 443     if (*cur_entry == clean_card_val()) {
 444       volatile jbyte* first_dirty = cur_entry+1;
 445       while (first_dirty < limit &&
 446              *first_dirty == clean_card_val()) {
 447         first_dirty++;
 448       }
 449       // If the first object is a regular object, and it has a
 450       // young-to-old field, that would mark the previous card.
 451       HeapWord* boundary = addr_for(cur_entry);
 452       HeapWord* end = (first_dirty >= limit) ? used.end() : addr_for(first_dirty);
 453       HeapWord* boundary_block = s->block_start(boundary);
 454       HeapWord* begin = boundary;             // Until proven otherwise.
 455       HeapWord* start_block = boundary_block; // Until proven otherwise.
 456       if (boundary_block < boundary) {
 457         if (s->block_is_obj(boundary_block) && s->obj_is_alive(boundary_block)) {
 458           oop boundary_obj = oop(boundary_block);
 459           if (!boundary_obj->is_objArray() &&
 460               !boundary_obj->is_typeArray()) {
 461             guarantee(cur_entry > byte_for(used.start()),
 462                       "else boundary would be boundary_block");
 463             if (*byte_for(boundary_block) != clean_card_val()) {
 464               begin = boundary_block + s->block_size(boundary_block);


< prev index next >