< prev index next >

src/share/vm/gc/shared/cardTableModRefBS.cpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile

*** 102,127 **** // The assembler store_check code will do an unsigned shift of the oop, // then add it to byte_map_base, i.e. // // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) ! _byte_map = (jbyte*) heap_rs.base(); byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); ! jbyte* guard_card = &_byte_map[_guard_index]; uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); _guard_region = MemRegion((HeapWord*)guard_page, _page_size); os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, !ExecMem, "card table last card"); *guard_card = last_card; log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: "); log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, ! p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); ! log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i(byte_map_base)); } CardTableModRefBS::~CardTableModRefBS() { if (_covered) { delete[] _covered; --- 102,127 ---- // The assembler store_check code will do an unsigned shift of the oop, // then add it to byte_map_base, i.e. // // _byte_map = byte_map_base + (uintptr_t(low_bound) >> card_shift) ! _byte_map = (volatile jbyte*) heap_rs.base(); byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map"); assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map"); ! volatile jbyte* guard_card = &_byte_map[_guard_index]; uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); _guard_region = MemRegion((HeapWord*)guard_page, _page_size); os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, !ExecMem, "card table last card"); *guard_card = last_card; log_trace(gc, barrier)("CardTableModRefBS::CardTableModRefBS: "); log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, ! p2i((jbyte*)&_byte_map[0]), p2i((jbyte*)&_byte_map[_last_valid_index])); ! log_trace(gc, barrier)(" byte_map_base: " INTPTR_FORMAT, p2i((jbyte*)byte_map_base)); } CardTableModRefBS::~CardTableModRefBS() { if (_covered) { delete[] _covered;
*** 149,159 **** } int res = i; _cur_covered_regions++; _covered[res].set_start(base); _covered[res].set_word_size(0); ! jbyte* ct_start = byte_for(base); uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); _committed[res].set_start((HeapWord*)ct_start_aligned); _committed[res].set_word_size(0); return res; } --- 149,159 ---- } int res = i; _cur_covered_regions++; _covered[res].set_start(base); _covered[res].set_word_size(0); ! volatile jbyte* ct_start = byte_for(base); uintptr_t ct_start_aligned = align_size_down((uintptr_t)ct_start, _page_size); _committed[res].set_start((HeapWord*)ct_start_aligned); _committed[res].set_word_size(0); return res; }
*** 209,219 **** HeapWord* const max_prev_end = largest_prev_committed_end(ind); if (max_prev_end > cur_committed.end()) { cur_committed.set_end(max_prev_end); } // Align the end up to a page size (starts are already aligned). ! jbyte* const new_end = byte_after(new_region.last()); HeapWord* new_end_aligned = (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); assert(new_end_aligned >= (HeapWord*) new_end, "align up, but less"); // Check the other regions (excludes "ind") to ensure that --- 209,219 ---- HeapWord* const max_prev_end = largest_prev_committed_end(ind); if (max_prev_end > cur_committed.end()) { cur_committed.set_end(max_prev_end); } // Align the end up to a page size (starts are already aligned). ! volatile jbyte* const new_end = byte_after(new_region.last()); HeapWord* new_end_aligned = (HeapWord*) align_size_up((uintptr_t)new_end, _page_size); assert(new_end_aligned >= (HeapWord*) new_end, "align up, but less"); // Check the other regions (excludes "ind") to ensure that
*** 300,347 **** #ifdef ASSERT // Check that the last card in the new region is committed according // to the tables. bool covered = false; for (int cr = 0; cr < _cur_covered_regions; cr++) { ! if (_committed[cr].contains(new_end - 1)) { covered = true; break; } } assert(covered, "Card for end of new region not committed"); #endif // The default of 0 is not necessarily clean cards. ! jbyte* entry; if (old_region.last() < _whole_heap.start()) { entry = byte_for(_whole_heap.start()); } else { entry = byte_after(old_region.last()); } assert(index_for(new_region.last()) < _guard_index, "The guard card will be overwritten"); // This line commented out cleans the newly expanded region and // not the aligned up expanded region. ! // jbyte* const end = byte_after(new_region.last()); ! jbyte* const end = (jbyte*) new_end_for_commit; assert((end >= byte_after(new_region.last())) || collided || guarded, "Expect to be beyond new region unless impacting another region"); // do nothing if we resized downward. #ifdef ASSERT for (int ri = 0; ri < _cur_covered_regions; ri++) { if (ri != ind) { // The end of the new committed region should not // be in any existing region unless it matches // the start of the next region. ! assert(!_committed[ri].contains(end) || (_committed[ri].start() == (HeapWord*) end), "Overlapping committed regions"); } } #endif if (entry < end) { ! memset(entry, clean_card, pointer_delta(end, entry, sizeof(jbyte))); } } // In any case, the covered size changes. _covered[ind].set_word_size(new_region.word_size()); --- 300,347 ---- #ifdef ASSERT // Check that the last card in the new region is committed according // to the tables. bool covered = false; for (int cr = 0; cr < _cur_covered_regions; cr++) { ! if (_committed[cr].contains((jbyte*)new_end - 1)) { covered = true; break; } } assert(covered, "Card for end of new region not committed"); #endif // The default of 0 is not necessarily clean cards. ! volatile jbyte* entry; if (old_region.last() < _whole_heap.start()) { entry = byte_for(_whole_heap.start()); } else { entry = byte_after(old_region.last()); } assert(index_for(new_region.last()) < _guard_index, "The guard card will be overwritten"); // This line commented out cleans the newly expanded region and // not the aligned up expanded region. ! // volatile jbyte* const end = byte_after(new_region.last()); ! volatile jbyte* const end = (volatile jbyte*) new_end_for_commit; assert((end >= byte_after(new_region.last())) || collided || guarded, "Expect to be beyond new region unless impacting another region"); // do nothing if we resized downward. #ifdef ASSERT for (int ri = 0; ri < _cur_covered_regions; ri++) { if (ri != ind) { // The end of the new committed region should not // be in any existing region unless it matches // the start of the next region. ! assert(!_committed[ri].contains((jbyte*)end) || (_committed[ri].start() == (HeapWord*) end), "Overlapping committed regions"); } } #endif if (entry < end) { ! memset((void*)entry, clean_card, pointer_delta((void*)end, (void*)entry, sizeof(jbyte))); } } // In any case, the covered size changes. _covered[ind].set_word_size(new_region.word_size());
*** 349,365 **** log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, ! p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, ! p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); // Touch the last card of the covered region to show that it // is committed (or SEGV). ! debug_only((void) (*byte_for(_covered[ind].last()));) debug_only(verify_guard();) } // Note that these versions are precise! The scanning code has to handle the // fact that the write barrier may be either precise or imprecise. --- 349,365 ---- log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, ! p2i((jbyte*)byte_for(_covered[ind].start())), p2i((jbyte*)byte_for(_covered[ind].last()))); log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, ! p2i(addr_for((volatile jbyte*) _committed[ind].start())), p2i(addr_for((volatile jbyte*) _committed[ind].last()))); // Touch the last card of the covered region to show that it // is committed (or SEGV). ! debug_only(jbyte last_covered_card = *byte_for(_covered[ind].last());) debug_only(verify_guard();) } // Note that these versions are precise! The scanning code has to handle the // fact that the write barrier may be either precise or imprecise.
*** 370,381 **** void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); ! jbyte* cur = byte_for(mr.start()); ! jbyte* last = byte_after(mr.last()); while (cur < last) { *cur = dirty_card; cur++; } } --- 370,381 ---- void CardTableModRefBS::dirty_MemRegion(MemRegion mr) { assert((HeapWord*)align_size_down((uintptr_t)mr.start(), HeapWordSize) == mr.start(), "Unaligned start"); assert((HeapWord*)align_size_up ((uintptr_t)mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); ! volatile jbyte* cur = byte_for(mr.start()); ! volatile jbyte* last = byte_after(mr.last()); while (cur < last) { *cur = dirty_card; cur++; } }
*** 390,431 **** } void CardTableModRefBS::clear_MemRegion(MemRegion mr) { // Be conservative: only clean cards entirely contained within the // region. ! jbyte* cur; if (mr.start() == _whole_heap.start()) { cur = byte_for(mr.start()); } else { assert(mr.start() > _whole_heap.start(), "mr is not covered."); cur = byte_after(mr.start() - 1); } ! jbyte* last = byte_after(mr.last()); ! memset(cur, clean_card, pointer_delta(last, cur, sizeof(jbyte))); } void CardTableModRefBS::clear(MemRegion mr) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) clear_MemRegion(mri); } } void CardTableModRefBS::dirty(MemRegion mr) { ! jbyte* first = byte_for(mr.start()); ! jbyte* last = byte_after(mr.last()); ! memset(first, dirty_card, last-first); } // Unlike several other card table methods, dirty_card_iterate() // iterates over dirty cards ranges in increasing address order. void CardTableModRefBS::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { ! jbyte *cur_entry, *next_entry, *limit; for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); cur_entry <= limit; cur_entry = next_entry) { next_entry = cur_entry + 1; if (*cur_entry == dirty_card) { --- 390,431 ---- } void CardTableModRefBS::clear_MemRegion(MemRegion mr) { // Be conservative: only clean cards entirely contained within the // region. ! volatile jbyte* cur; if (mr.start() == _whole_heap.start()) { cur = byte_for(mr.start()); } else { assert(mr.start() > _whole_heap.start(), "mr is not covered."); cur = byte_after(mr.start() - 1); } ! volatile jbyte* last = byte_after(mr.last()); ! memset((void*)cur, clean_card, pointer_delta((void*)last, (void*)cur, sizeof(jbyte))); } void CardTableModRefBS::clear(MemRegion mr) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) clear_MemRegion(mri); } } void CardTableModRefBS::dirty(MemRegion mr) { ! volatile jbyte* first = byte_for(mr.start()); ! volatile jbyte* last = byte_after(mr.last()); ! memset((void*)first, dirty_card, pointer_delta((void*)last, (void*)first, sizeof(jbyte))); } // Unlike several other card table methods, dirty_card_iterate() // iterates over dirty cards ranges in increasing address order. void CardTableModRefBS::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { ! volatile jbyte *cur_entry, *next_entry, *limit; for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); cur_entry <= limit; cur_entry = next_entry) { next_entry = cur_entry + 1; if (*cur_entry == dirty_card) {
*** 447,457 **** bool reset, int reset_val) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { ! jbyte* cur_entry, *next_entry, *limit; for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); cur_entry <= limit; cur_entry = next_entry) { next_entry = cur_entry + 1; if (*cur_entry == dirty_card) { --- 447,457 ---- bool reset, int reset_val) { for (int i = 0; i < _cur_covered_regions; i++) { MemRegion mri = mr.intersection(_covered[i]); if (!mri.is_empty()) { ! volatile jbyte* cur_entry, *next_entry, *limit; for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); cur_entry <= limit; cur_entry = next_entry) { next_entry = cur_entry + 1; if (*cur_entry == dirty_card) {
*** 490,513 **** } #ifndef PRODUCT void CardTableModRefBS::verify_region(MemRegion mr, jbyte val, bool val_equals) { ! jbyte* start = byte_for(mr.start()); ! jbyte* end = byte_for(mr.last()); bool failures = false; ! for (jbyte* curr = start; curr <= end; ++curr) { jbyte curr_val = *curr; bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); if (failed) { if (!failures) { ! log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i(start), p2i(end)); log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); failures = true; } log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", ! p2i(curr), p2i(addr_for(curr)), p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), (int) curr_val); } } guarantee(!failures, "there should not have been any failures"); --- 490,513 ---- } #ifndef PRODUCT void CardTableModRefBS::verify_region(MemRegion mr, jbyte val, bool val_equals) { ! volatile jbyte* start = byte_for(mr.start()); ! volatile jbyte* end = byte_for(mr.last()); bool failures = false; ! for (volatile jbyte* curr = start; curr <= end; ++curr) { jbyte curr_val = *curr; bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); if (failed) { if (!failures) { ! log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]", p2i((jbyte*)start), p2i((jbyte*)end)); log_error(gc, verify)("== %sexpecting value: %d", (val_equals) ? "" : "not ", val); failures = true; } log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d", ! p2i((jbyte*)curr), p2i((jbyte*)addr_for(curr)), p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), (int) curr_val); } } guarantee(!failures, "there should not have been any failures");
*** 522,529 **** } #endif void CardTableModRefBS::print_on(outputStream* st) const { st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, ! p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); } - --- 522,528 ---- } #endif void CardTableModRefBS::print_on(outputStream* st) const { st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, ! p2i((jbyte*)_byte_map), p2i(((jbyte*)_byte_map) + _byte_map_size), p2i((jbyte*)byte_map_base)); }
< prev index next >