src/share/vm/gc/g1/g1ConcurrentMark.cpp
Print this page
*** 412,422 ****
// _active_tasks set in set_non_marking_state
// _tasks set inside the constructor
_task_queues(new G1CMTaskQueueSet((int) _max_worker_id)),
_terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
- _has_overflown(false),
_concurrent(false),
_has_aborted(false),
_restart_for_overflow(false),
_concurrent_marking_in_progress(false),
_gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
--- 412,421 ----
*** 590,607 ****
// pause with initial mark piggy-backed
set_concurrent_marking_in_progress();
}
! void G1ConcurrentMark::reset_marking_state(bool clear_overflow) {
_global_mark_stack.set_should_expand(has_overflown());
_global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag
- if (clear_overflow) {
- clear_has_overflown();
- } else {
- assert(has_overflown(), "pre-condition");
- }
_finger = _heap_start;
for (uint i = 0; i < _max_worker_id; ++i) {
G1CMTaskQueue* queue = _task_queues->queue(i);
queue->set_empty();
--- 589,601 ----
// pause with initial mark piggy-backed
set_concurrent_marking_in_progress();
}
! void G1ConcurrentMark::reset_marking_state() {
_global_mark_stack.set_should_expand(has_overflown());
_global_mark_stack.set_empty(); // Also clears the overflow stack's overflow flag
_finger = _heap_start;
for (uint i = 0; i < _max_worker_id; ++i) {
G1CMTaskQueue* queue = _task_queues->queue(i);
queue->set_empty();
*** 881,891 ****
// task 0 is responsible for clearing the global data structures
// We should be here because of an overflow. During STW we should
// not clear the overflow flag since we rely on it being true when
// we exit this method to abort the pause and restart concurrent
// marking.
! reset_marking_state(true /* clear_overflow */);
log_info(gc, marking)("Concurrent Mark reset for overflow");
}
}
--- 875,885 ----
// task 0 is responsible for clearing the global data structures
// We should be here because of an overflow. During STW we should
// not clear the overflow flag since we rely on it being true when
// we exit this method to abort the pause and restart concurrent
// marking.
! reset_marking_state();
log_info(gc, marking)("Concurrent Mark reset for overflow");
}
}
*** 1750,1765 ****
// global marking stack.
assert(_global_mark_stack.is_out_of_memory() || _global_mark_stack.is_empty(),
"Mark stack should be empty (unless it is out of memory)");
- if (_global_mark_stack.is_out_of_memory()) {
- // This should have been done already when we tried to push an
- // entry on to the global mark stack. But let's do it again.
- set_has_overflown();
- }
-
assert(rp->num_q() == active_workers, "why not");
rp->enqueue_discovered_references(executor);
rp->verify_no_references_recorded();
--- 1744,1753 ----
*** 2929,2939 ****
// which one.
guarantee(_cm->out_of_regions(), "only way to reach here");
guarantee(_cm->mark_stack_empty(), "only way to reach here");
guarantee(_task_queue->size() == 0, "only way to reach here");
guarantee(!_cm->has_overflown(), "only way to reach here");
- guarantee(!_cm->mark_stack_overflow(), "only way to reach here");
} else {
// Apparently there's more work to do. Let's abort this task. It
// will restart it and we can hopefully find more things to do.
set_has_aborted();
}
--- 2917,2926 ----