< prev index next >

src/share/vm/gc/parallel/cardTableExtension.cpp

Print this page
rev 10742 : Make fields used in lock-free algorithms volatile

*** 137,159 **** int dirty_card_count = 0; // It is a waste to get here if empty. assert(sp->bottom() < sp->top(), "Should not be called if empty"); oop* sp_top = (oop*)space_top; ! jbyte* start_card = byte_for(sp->bottom()); ! jbyte* end_card = byte_for(sp_top - 1) + 1; oop* last_scanned = NULL; // Prevent scanning objects more than once // The width of the stripe ssize*stripe_total must be // consistent with the number of stripes so that the complete slice // is covered. size_t slice_width = ssize * stripe_total; ! for (jbyte* slice = start_card; slice < end_card; slice += slice_width) { ! jbyte* worker_start_card = slice + stripe_number * ssize; if (worker_start_card >= end_card) return; // We're done. ! jbyte* worker_end_card = worker_start_card + ssize; if (worker_end_card > end_card) worker_end_card = end_card; // We do not want to scan objects more than once. In order to accomplish // this, we assert that any object with an object head inside our 'slice' --- 137,159 ---- int dirty_card_count = 0; // It is a waste to get here if empty. assert(sp->bottom() < sp->top(), "Should not be called if empty"); oop* sp_top = (oop*)space_top; ! volatile jbyte* start_card = byte_for(sp->bottom()); ! volatile jbyte* end_card = byte_for(sp_top - 1) + 1; oop* last_scanned = NULL; // Prevent scanning objects more than once // The width of the stripe ssize*stripe_total must be // consistent with the number of stripes so that the complete slice // is covered. size_t slice_width = ssize * stripe_total; ! for (volatile jbyte* slice = start_card; slice < end_card; slice += slice_width) { ! volatile jbyte* worker_start_card = slice + stripe_number * ssize; if (worker_start_card >= end_card) return; // We're done. ! volatile jbyte* worker_end_card = worker_start_card + ssize; if (worker_end_card > end_card) worker_end_card = end_card; // We do not want to scan objects more than once. In order to accomplish // this, we assert that any object with an object head inside our 'slice'
*** 206,222 **** // Note that worker_start_card >= worker_end_card is legal, and happens when // an object spans an entire slice. assert(worker_start_card <= end_card, "worker start card beyond end card"); assert(worker_end_card <= end_card, "worker end card beyond end card"); ! jbyte* current_card = worker_start_card; while (current_card < worker_end_card) { // Find an unclean card. while (current_card < worker_end_card && card_is_clean(*current_card)) { current_card++; } ! jbyte* first_unclean_card = current_card; // Find the end of a run of contiguous unclean cards while (current_card < worker_end_card && !card_is_clean(*current_card)) { while (current_card < worker_end_card && !card_is_clean(*current_card)) { current_card++; --- 206,222 ---- // Note that worker_start_card >= worker_end_card is legal, and happens when // an object spans an entire slice. assert(worker_start_card <= end_card, "worker start card beyond end card"); assert(worker_end_card <= end_card, "worker end card beyond end card"); ! volatile jbyte* current_card = worker_start_card; while (current_card < worker_end_card) { // Find an unclean card. while (current_card < worker_end_card && card_is_clean(*current_card)) { current_card++; } ! volatile jbyte* first_unclean_card = current_card; // Find the end of a run of contiguous unclean cards while (current_card < worker_end_card && !card_is_clean(*current_card)) { while (current_card < worker_end_card && !card_is_clean(*current_card)) { current_card++;
*** 229,248 **** // prevents the redundant object scan, but it does not prevent newly // marked cards from being cleaned. HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; ! jbyte* ending_card_of_last_object = byte_for(end_of_last_object); assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); if (ending_card_of_last_object > current_card) { // This means the object spans the next complete card. // We need to bump the current_card to ending_card_of_last_object current_card = ending_card_of_last_object; } } } ! jbyte* following_clean_card = current_card; if (first_unclean_card < worker_end_card) { oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); // "p" should always be >= "last_scanned" because newly GC dirtied --- 229,248 ---- // prevents the redundant object scan, but it does not prevent newly // marked cards from being cleaned. HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; ! volatile jbyte* ending_card_of_last_object = byte_for(end_of_last_object); assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card"); if (ending_card_of_last_object > current_card) { // This means the object spans the next complete card. // We need to bump the current_card to ending_card_of_last_object current_card = ending_card_of_last_object; } } } ! volatile jbyte* following_clean_card = current_card; if (first_unclean_card < worker_end_card) { oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); assert((HeapWord*)p <= addr_for(first_unclean_card), "checking"); // "p" should always be >= "last_scanned" because newly GC dirtied
*** 344,365 **** void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { CardTableExtension* card_table = barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set()); ! jbyte* bot = card_table->byte_for(mr.start()); ! jbyte* top = card_table->byte_for(mr.end()); while(bot <= top) { assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); if (*bot == verify_card) *bot = youngergen_card; bot++; } } bool CardTableExtension::addr_is_marked_imprecise(void *addr) { ! jbyte* p = byte_for(addr); jbyte val = *p; if (card_is_dirty(val)) return true; --- 344,365 ---- void CardTableExtension::verify_all_young_refs_precise_helper(MemRegion mr) { CardTableExtension* card_table = barrier_set_cast<CardTableExtension>(ParallelScavengeHeap::heap()->barrier_set()); ! volatile jbyte* bot = card_table->byte_for(mr.start()); ! volatile jbyte* top = card_table->byte_for(mr.end()); while(bot <= top) { assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark"); if (*bot == verify_card) *bot = youngergen_card; bot++; } } bool CardTableExtension::addr_is_marked_imprecise(void *addr) { ! volatile jbyte* p = byte_for(addr); jbyte val = *p; if (card_is_dirty(val)) return true;
*** 374,384 **** return false; } // Also includes verify_card bool CardTableExtension::addr_is_marked_precise(void *addr) { ! jbyte* p = byte_for(addr); jbyte val = *p; if (card_is_newgen(val)) return true; --- 374,384 ---- return false; } // Also includes verify_card bool CardTableExtension::addr_is_marked_precise(void *addr) { ! volatile jbyte* p = byte_for(addr); jbyte val = *p; if (card_is_newgen(val)) return true;
*** 474,486 **** log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, ! p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, ! p2i(addr_for((jbyte*) _committed[ind].start())), p2i(addr_for((jbyte*) _committed[ind].last()))); debug_only(verify_guard();) } bool CardTableExtension::resize_commit_uncommit(int changed_region, --- 474,486 ---- log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, ! p2i((jbyte*)byte_for(_covered[ind].start())), p2i((jbyte*)byte_for(_covered[ind].last()))); log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, ! p2i(addr_for((volatile jbyte*) _committed[ind].start())), p2i(addr_for((volatile jbyte*) _committed[ind].last()))); debug_only(verify_guard();) } bool CardTableExtension::resize_commit_uncommit(int changed_region,
*** 508,518 **** (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), os::vm_page_size()), "Starts should have proper alignment"); #endif ! jbyte* new_start = byte_for(new_region.start()); // Round down because this is for the start address HeapWord* new_start_aligned = (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); // The guard page is always committed and should not be committed over. // This method is used in cases where the generation is growing toward --- 508,518 ---- (HeapWord*) align_size_up((uintptr_t) cur_committed.start(), os::vm_page_size()), "Starts should have proper alignment"); #endif ! volatile jbyte* new_start = byte_for(new_region.start()); // Round down because this is for the start address HeapWord* new_start_aligned = (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); // The guard page is always committed and should not be committed over. // This method is used in cases where the generation is growing toward
*** 581,591 **** } void CardTableExtension::resize_update_committed_table(int changed_region, MemRegion new_region) { ! jbyte* new_start = byte_for(new_region.start()); // Set the new start of the committed region HeapWord* new_start_aligned = (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); MemRegion new_committed = MemRegion(new_start_aligned, --- 581,591 ---- } void CardTableExtension::resize_update_committed_table(int changed_region, MemRegion new_region) { ! volatile jbyte* new_start = byte_for(new_region.start()); // Set the new start of the committed region HeapWord* new_start_aligned = (HeapWord*)align_size_down((uintptr_t)new_start, os::vm_page_size()); MemRegion new_committed = MemRegion(new_start_aligned,
*** 598,614 **** MemRegion new_region) { debug_only(verify_guard();) MemRegion original_covered = _covered[changed_region]; // Initialize the card entries. Only consider the // region covered by the card table (_whole_heap) ! jbyte* entry; if (new_region.start() < _whole_heap.start()) { entry = byte_for(_whole_heap.start()); } else { entry = byte_for(new_region.start()); } ! jbyte* end = byte_for(original_covered.start()); // If _whole_heap starts at the original covered regions start, // this loop will not execute. while (entry < end) { *entry++ = clean_card; } } --- 598,614 ---- MemRegion new_region) { debug_only(verify_guard();) MemRegion original_covered = _covered[changed_region]; // Initialize the card entries. Only consider the // region covered by the card table (_whole_heap) ! volatile jbyte* entry; if (new_region.start() < _whole_heap.start()) { entry = byte_for(_whole_heap.start()); } else { entry = byte_for(new_region.start()); } ! volatile jbyte* end = byte_for(original_covered.start()); // If _whole_heap starts at the original covered regions start, // this loop will not execute. while (entry < end) { *entry++ = clean_card; } }
< prev index next >