src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 3900 : 8004816: G1: Kitchensink failures after marking stack changes
Summary: Reset the marking state, including the mark stack overflow flag, in the event of a marking stack overflow during serial reference processing.
Reviewed-by: jmasa

*** 190,199 **** --- 190,200 ---- "Didn't reserve backing store for all of ConcurrentMark stack?"); _base = (oop*) _virtual_space.low(); setEmpty(); _capacity = (jint) capacity; _saved_index = -1; + _should_expand = false; NOT_PRODUCT(_max_depth = 0); return true; } void CMMarkStack::expand() {
*** 745,756 **** // Separated the asserts so that we know which one fires. assert(_heap_start != NULL, "heap bounds should look ok"); assert(_heap_end != NULL, "heap bounds should look ok"); assert(_heap_start < _heap_end, "heap bounds should look ok"); ! // reset all the marking data structures and any necessary flags ! clear_marking_state(); if (verbose_low()) { gclog_or_tty->print_cr("[global] resetting"); } --- 746,757 ---- // Separated the asserts so that we know which one fires. assert(_heap_start != NULL, "heap bounds should look ok"); assert(_heap_end != NULL, "heap bounds should look ok"); assert(_heap_start < _heap_end, "heap bounds should look ok"); ! // Reset all the marking data structures and any necessary flags ! reset_marking_state(); if (verbose_low()) { gclog_or_tty->print_cr("[global] resetting"); }
*** 764,773 **** --- 765,791 ---- // we need this to make sure that the flag is on during the evac // pause with initial mark piggy-backed set_concurrent_marking_in_progress(); } + + void ConcurrentMark::reset_marking_state(bool clear_overflow) { + _markStack.set_should_expand(); + _markStack.setEmpty(); // Also clears the _markStack overflow flag + if (clear_overflow) { + clear_has_overflown(); + } else { + assert(has_overflown(), "pre-condition"); + } + _finger = _heap_start; + + for (uint i = 0; i < _max_worker_id; ++i) { + CMTaskQueue* queue = _task_queues->queue(i); + queue->set_empty(); + } + } + void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) { assert(active_tasks <= _max_worker_id, "we should not have more"); _active_tasks = active_tasks; // Need to update the three data structures below according to the
*** 794,804 **** } void ConcurrentMark::set_non_marking_state() { // We set the global marking state to some default values when we're // not doing marking. ! clear_marking_state(); _active_tasks = 0; clear_concurrent_marking_in_progress(); } ConcurrentMark::~ConcurrentMark() { --- 812,822 ---- } void ConcurrentMark::set_non_marking_state() { // We set the global marking state to some default values when we're // not doing marking. ! reset_marking_state(); _active_tasks = 0; clear_concurrent_marking_in_progress(); } ConcurrentMark::~ConcurrentMark() {
*** 961,971 **** // task 0 is responsible for clearing the global data structures // We should be here because of an overflow. During STW we should // not clear the overflow flag since we rely on it being true when // we exit this method to abort the pause and restart concurent // marking. ! clear_marking_state(concurrent() /* clear_overflow */); force_overflow()->update(); if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps); --- 979,989 ---- // task 0 is responsible for clearing the global data structures // We should be here because of an overflow. During STW we should // not clear the overflow flag since we rely on it being true when // we exit this method to abort the pause and restart concurent // marking. ! reset_marking_state(concurrent() /* clear_overflow */); force_overflow()->update(); if (G1Log::fine()) { gclog_or_tty->date_stamp(PrintGCDateStamps); gclog_or_tty->stamp(PrintGCTimeStamps);
*** 1255,1266 **** weakRefsWork(clear_all_soft_refs); if (has_overflown()) { // Oops. We overflowed. Restart concurrent marking. _restart_for_overflow = true; ! // Clear the flag. We do not need it any more. ! clear_has_overflown(); if (G1TraceMarkStackOverflow) { gclog_or_tty->print_cr("\nRemark led to restart for overflow."); } } else { // Aggregate the per-task counting data that we have accumulated --- 1273,1285 ---- weakRefsWork(clear_all_soft_refs); if (has_overflown()) { // Oops. We overflowed. Restart concurrent marking. _restart_for_overflow = true; ! // Clear the marking state because we will be restarting ! // marking due to overflowing the global mark stack. ! reset_marking_state(); if (G1TraceMarkStackOverflow) { gclog_or_tty->print_cr("\nRemark led to restart for overflow."); } } else { // Aggregate the per-task counting data that we have accumulated
*** 1280,1301 **** Universe::heap()->prepare_for_verify(); Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UseNextMarking); } assert(!restart_for_overflow(), "sanity"); } // Expand the marking stack, if we have to and if we can. if (_markStack.should_expand()) { _markStack.expand(); } - // Reset the marking state if marking completed - if (!restart_for_overflow()) { - set_non_marking_state(); - } - #if VERIFY_OBJS_PROCESSED _scan_obj_cl.objs_processed = 0; ThreadLocalObjQueue::objs_enqueued = 0; #endif --- 1299,1317 ---- Universe::heap()->prepare_for_verify(); Universe::verify(/* silent */ false, /* option */ VerifyOption_G1UseNextMarking); } assert(!restart_for_overflow(), "sanity"); + // Completely reset the marking state since marking completed + set_non_marking_state(); } // Expand the marking stack, if we have to and if we can. if (_markStack.should_expand()) { _markStack.expand(); } #if VERIFY_OBJS_PROCESSED _scan_obj_cl.objs_processed = 0; ThreadLocalObjQueue::objs_enqueued = 0; #endif
*** 2961,2986 **** } } } #endif // PRODUCT - void ConcurrentMark::clear_marking_state(bool clear_overflow) { - _markStack.set_should_expand(); - _markStack.setEmpty(); // Also clears the _markStack overflow flag - if (clear_overflow) { - clear_has_overflown(); - } else { - assert(has_overflown(), "pre-condition"); - } - _finger = _heap_start; - - for (uint i = 0; i < _max_worker_id; ++i) { - CMTaskQueue* queue = _task_queues->queue(i); - queue->set_empty(); - } - } - // Aggregate the counting data that was constructed concurrently // with marking. class AggregateCountDataHRClosure: public HeapRegionClosure { G1CollectedHeap* _g1h; ConcurrentMark* _cm; --- 2977,2986 ----
*** 3183,3193 **** // Clear all marks to force marking thread to do nothing _nextMarkBitMap->clearAll(); // Clear the liveness counting data clear_all_count_data(); // Empty mark stack ! clear_marking_state(); for (uint i = 0; i < _max_worker_id; ++i) { _tasks[i]->clear_region_fields(); } _has_aborted = true; --- 3183,3193 ---- // Clear all marks to force marking thread to do nothing _nextMarkBitMap->clearAll(); // Clear the liveness counting data clear_all_count_data(); // Empty mark stack ! reset_marking_state(); for (uint i = 0; i < _max_worker_id; ++i) { _tasks[i]->clear_region_fields(); } _has_aborted = true;