< prev index next >

src/share/vm/gc/shared/cardTableRS.cpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile

*** 150,168 **** // the old generation record here, which is at index 2. _last_cur_val_in_gen[2] = cur_youngergen_card_val(); g->younger_refs_iterate(blk, n_threads); } ! inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) { if (_is_par) { return clear_card_parallel(entry); } else { return clear_card_serial(entry); } } ! inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) { while (true) { // In the parallel case, we may have to do this several times. jbyte entry_val = *entry; assert(entry_val != CardTableRS::clean_card_val(), "We shouldn't be looking at clean cards, and this should " --- 150,168 ---- // the old generation record here, which is at index 2. _last_cur_val_in_gen[2] = cur_youngergen_card_val(); g->younger_refs_iterate(blk, n_threads); } ! inline bool ClearNoncleanCardWrapper::clear_card(volatile jbyte* entry) { if (_is_par) { return clear_card_parallel(entry); } else { return clear_card_serial(entry); } } ! inline bool ClearNoncleanCardWrapper::clear_card_parallel(volatile jbyte* entry) { while (true) { // In the parallel case, we may have to do this several times. jbyte entry_val = *entry; assert(entry_val != CardTableRS::clean_card_val(), "We shouldn't be looking at clean cards, and this should "
*** 195,205 **** } return true; } ! inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) { jbyte entry_val = *entry; assert(entry_val != CardTableRS::clean_card_val(), "We shouldn't be looking at clean cards, and this should " "be the only place they get cleaned."); assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card, --- 195,205 ---- } return true; } ! inline bool ClearNoncleanCardWrapper::clear_card_serial(volatile jbyte* entry) { jbyte entry_val = *entry; assert(entry_val != CardTableRS::clean_card_val(), "We shouldn't be looking at clean cards, and this should " "be the only place they get cleaned."); assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
*** 211,221 **** ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) : _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { } ! bool ClearNoncleanCardWrapper::is_word_aligned(jbyte* entry) { return (((intptr_t)entry) & (BytesPerWord-1)) == 0; } // The regions are visited in *decreasing* address order. // This order aids with imprecise card marking, where a dirty --- 211,221 ---- ClearNoncleanCardWrapper::ClearNoncleanCardWrapper( DirtyCardToOopClosure* dirty_card_closure, CardTableRS* ct, bool is_par) : _dirty_card_closure(dirty_card_closure), _ct(ct), _is_par(is_par) { } ! bool ClearNoncleanCardWrapper::is_word_aligned(volatile jbyte* entry) { return (((intptr_t)entry) & (BytesPerWord-1)) == 0; } // The regions are visited in *decreasing* address order. // This order aids with imprecise card marking, where a dirty
*** 223,234 **** // that extend onto subsequent cards. void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { assert(mr.word_size() > 0, "Error"); assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); // mr.end() may not necessarily be card aligned. ! jbyte* cur_entry = _ct->byte_for(mr.last()); ! const jbyte* limit = _ct->byte_for(mr.start()); HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; while (cur_entry >= limit) { HeapWord* cur_hw = _ct->addr_for(cur_entry); if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) { --- 223,234 ---- // that extend onto subsequent cards. void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) { assert(mr.word_size() > 0, "Error"); assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned"); // mr.end() may not necessarily be card aligned. ! volatile jbyte* cur_entry = _ct->byte_for(mr.last()); ! const volatile jbyte* limit = _ct->byte_for(mr.start()); HeapWord* end_of_non_clean = mr.end(); HeapWord* start_of_non_clean = end_of_non_clean; while (cur_entry >= limit) { HeapWord* cur_hw = _ct->addr_for(cur_entry); if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
*** 243,253 **** _dirty_card_closure->do_MemRegion(mrd); } // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary if (is_word_aligned(cur_entry)) { ! jbyte* cur_row = cur_entry - BytesPerWord; while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) { cur_row -= BytesPerWord; } cur_entry = cur_row + BytesPerWord; cur_hw = _ct->addr_for(cur_entry); --- 243,253 ---- _dirty_card_closure->do_MemRegion(mrd); } // fast forward through potential continuous whole-word range of clean cards beginning at a word-boundary if (is_word_aligned(cur_entry)) { ! volatile jbyte* cur_row = cur_entry - BytesPerWord; while (cur_row >= limit && *((intptr_t*)cur_row) == CardTableRS::clean_card_row()) { cur_row -= BytesPerWord; } cur_entry = cur_row + BytesPerWord; cur_hw = _ct->addr_for(cur_entry);
*** 280,290 **** // precleaned ==> cur_youngergen_and_prev_nonclean_card // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card // cur-younger-gen ==> cur_younger_gen // cur_youngergen_and_prev_nonclean_card ==> no change. void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { ! jbyte* entry = _ct_bs->byte_for(field); do { jbyte entry_val = *entry; // We put this first because it's probably the most common case. if (entry_val == clean_card_val()) { // No threat of contention with cleaning threads. --- 280,290 ---- // precleaned ==> cur_youngergen_and_prev_nonclean_card // prev-younger-gen ==> cur_youngergen_and_prev_nonclean_card // cur-younger-gen ==> cur_younger_gen // cur_youngergen_and_prev_nonclean_card ==> no change. void CardTableRS::write_ref_field_gc_par(void* field, oop new_val) { ! volatile jbyte* entry = _ct_bs->byte_for(field); do { jbyte entry_val = *entry; // We put this first because it's probably the most common case. if (entry_val == clean_card_val()) { // No threat of contention with cleaning threads.
*** 435,449 **** void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { // We don't need to do young-gen spaces. if (s->end() <= gen_boundary) return; MemRegion used = s->used_region(); ! jbyte* cur_entry = byte_for(used.start()); ! jbyte* limit = byte_after(used.last()); while (cur_entry < limit) { if (*cur_entry == clean_card_val()) { ! jbyte* first_dirty = cur_entry+1; while (first_dirty < limit && *first_dirty == clean_card_val()) { first_dirty++; } // If the first object is a regular object, and it has a --- 435,449 ---- void CardTableRS::verify_space(Space* s, HeapWord* gen_boundary) { // We don't need to do young-gen spaces. if (s->end() <= gen_boundary) return; MemRegion used = s->used_region(); ! volatile jbyte* cur_entry = byte_for(used.start()); ! volatile jbyte* limit = byte_after(used.last()); while (cur_entry < limit) { if (*cur_entry == clean_card_val()) { ! volatile jbyte* first_dirty = cur_entry+1; while (first_dirty < limit && *first_dirty == clean_card_val()) { first_dirty++; } // If the first object is a regular object, and it has a
< prev index next >