src/share/vm/gc_implementation/g1/concurrentMark.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File
*** old/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jan 15 01:40:44 2014
--- new/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jan 15 01:40:43 2014
*** 904,914 ****
--- 904,914 ----
print_reachable("at-cycle-start",
VerifyOption_G1UsePrevMarking, true /* all */);
}
#endif
! // Initialise marking structures. This has to be done in a STW phase.
! // Initialize marking structures. This has to be done in a STW phase.
reset();
// For each region note start of marking.
NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl);
*** 918,929 ****
--- 918,929 ----
void ConcurrentMark::checkpointRootsInitialPost() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If we force an overflow during remark, the remark operation will
// actually abort and we'll restart concurrent marking. If we always
! // force an oveflow during remark we'll never actually complete the
! // marking phase. So, we initilize this here, at the start of the
! // force an overflow during remark we'll never actually complete the
! // marking phase. So, we initialize this here, at the start of the
// cycle, so that at the remaining overflow number will decrease at
// every remark and we'll eventually not need to cause one.
force_overflow_stw()->init();
// Start Concurrent Marking weak-reference discovery.
*** 954,964 ****
--- 954,964 ----
* sync up, whereas another one could be trying to yield, while also
* waiting for the other threads to sync up too.
*
* Note, however, that this code is also used during remark and in
* this case we should not attempt to leave / enter the STS, otherwise
! * we'll either hit an asseert (debug / fastdebug) or deadlock
! * we'll either hit an assert (debug / fastdebug) or deadlock
* (product). So we should only leave / enter the STS if we are
* operating concurrently.
*
* Because the thread that does the sync barrier has left the STS, it
* is possible to be suspended for a Full GC or an evacuation pause
*** 996,1006 ****
--- 996,1006 ----
// let the task associated with with worker 0 do this
if (worker_id == 0) {
// task 0 is responsible for clearing the global data structures
// We should be here because of an overflow. During STW we should
// not clear the overflow flag since we rely on it being true when
! // we exit this method to abort the pause and restart concurent
! // we exit this method to abort the pause and restart concurrent
// marking.
reset_marking_state(true /* clear_overflow */);
force_overflow()->update();
if (G1Log::fine()) {
*** 1246,1256 ****
--- 1246,1256 ----
set_concurrency_and_phase(active_workers, true /* concurrent */);
CMConcurrentMarkingTask markingTask(this, cmThread());
if (use_parallel_marking_threads()) {
_parallel_workers->set_active_workers((int)active_workers);
! // Don't set _n_par_threads because it affects MT in proceess_strong_roots()
! // Don't set _n_par_threads because it affects MT in process_strong_roots()
// and the decisions on that MT processing is made elsewhere.
assert(_parallel_workers->active_workers() > 0, "Should have been set");
_parallel_workers->run_task(&markingTask);
} else {
markingTask.work(0);
*** 1479,1489 ****
--- 1479,1489 ----
if (marked_bytes > 0) {
set_bit_for_region(hr);
}
// Set the marked bytes for the current region so that
! // it can be queried by a calling verificiation routine
! // it can be queried by a calling verification routine
_region_marked_bytes = marked_bytes;
return false;
}
*** 2302,2312 ****
--- 2302,2312 ----
// We call CMTask::do_marking_step() to completely drain the local
// and global marking stacks of entries pushed by the 'keep alive'
// oop closure (an instance of G1CMKeepAliveAndDrainClosure above).
//
// CMTask::do_marking_step() is called in a loop, which we'll exit
! // if there's nothing more to do (i.e. we've completely drained the
// entries that were pushed as a a result of applying the 'keep alive'
// closure to the entries on the discovered ref lists) or we overflow
// the global marking stack.
//
// Note: CMTask::do_marking_step() can set the CMTask::has_aborted()
*** 2465,2475 ****
--- 2465,2475 ----
// threads involved in parallel reference processing as these
// instances are executed serially by the current thread (e.g.
// reference processing is not multi-threaded and is thus
// performed by the current thread instead of a gang worker).
//
! // The gang tasks involved in parallel reference procssing create
! // The gang tasks involved in parallel reference processing create
// their own instances of these closures, which do their own
// synchronization among themselves.
G1CMKeepAliveAndDrainClosure g1_keep_alive(this, task(0), true /* is_serial */);
G1CMDrainMarkingStackClosure g1_drain_mark_stack(this, task(0), true /* is_serial */);
*** 2543,2553 ****
--- 2543,2553 ----
ConcurrentMark* _cm;
bool _is_serial;
public:
void work(uint worker_id) {
// Since all available tasks are actually started, we should
- // only proceed if we're supposed to be actived.
if (worker_id < _cm->active_tasks()) {
CMTask* task = _cm->task(worker_id);
task->record_start_time();
do {
task->do_marking_step(1000000000.0 /* something very large */,
*** 3063,3073 ****
--- 3063,3073 ----
return false;
}
// 'start' should be in the heap.
assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
! // 'end' *may* be just beyone the end of the heap (if hr is the last region)
! // 'end' *may* be just beyond the end of the heap (if hr is the last region)
assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
*** 4411,4421 ****
--- 4411,4421 ----
if (_cm->has_overflown()) {
// This is the interesting one. We aborted because a global
// overflow was raised. This means we have to restart the
// marking phase and start iterating over regions. However, in
// order to do this we have to make sure that all tasks stop
! // what they are doing and re-initialise in a safe manner. We
! // what they are doing and re-initialize in a safe manner. We
// will achieve this with the use of two barrier sync points.
if (_cm->verbose_low()) {
gclog_or_tty->print_cr("[%u] detected overflow", _worker_id);
}
*** 4425,4435 ****
--- 4425,4435 ----
// from a parallel context
_cm->enter_first_sync_barrier(_worker_id);
// When we exit this sync barrier we know that all tasks have
// stopped doing marking work. So, it's now safe to
! // re-initialise our data structures. At the end of this method,
! // re-initialize our data structures. At the end of this method,
// task 0 will clear the global data structures.
}
statsOnly( ++_aborted_overflow );
src/share/vm/gc_implementation/g1/concurrentMark.cpp
Index
Unified diffs
Context diffs
Sdiffs
Patch
New
Old
Previous File
Next File