< prev index next >
src/share/vm/gc/g1/concurrentMark.cpp
Print this page
*** 39,48 ****
--- 39,49 ----
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
#include "gc/g1/suspendibleThreadSet.hpp"
+ #include "gc/shared/gcId.hpp"
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/referencePolicy.hpp"
*** 518,528 ****
_terminator(ParallelTaskTerminator((int) _max_worker_id, _task_queues)),
_has_overflown(false),
_concurrent(false),
_has_aborted(false),
- _aborted_gc_id(GCId::undefined()),
_restart_for_overflow(false),
_concurrent_marking_in_progress(false),
// _verbose_level set below
--- 519,528 ----
*** 989,999 ****
// marking.
reset_marking_state(true /* clear_overflow */);
force_overflow()->update();
if (G1Log::fine()) {
! gclog_or_tty->gclog_stamp(concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
}
}
}
--- 989,999 ----
// marking.
reset_marking_state(true /* clear_overflow */);
force_overflow()->update();
if (G1Log::fine()) {
! gclog_or_tty->gclog_stamp();
gclog_or_tty->print_cr("[GC concurrent-mark-reset-for-overflow]");
}
}
}
*** 1179,1189 ****
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
if (root_regions()->scan_in_progress()) {
if (G1Log::fine()) {
! gclog_or_tty->gclog_stamp(concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
}
_parallel_marking_threads = calc_parallel_marking_threads();
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
--- 1179,1189 ----
// scan_in_progress() will have been set to true only if there was
// at least one root region to scan. So, if it's false, we
// should not attempt to do any further work.
if (root_regions()->scan_in_progress()) {
if (G1Log::fine()) {
! gclog_or_tty->gclog_stamp();
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
}
_parallel_marking_threads = calc_parallel_marking_threads();
assert(parallel_marking_threads() <= max_parallel_marking_threads(),
*** 1193,1203 ****
CMRootRegionScanTask task(this);
_parallel_workers->set_active_workers(active_workers);
_parallel_workers->run_task(&task);
if (G1Log::fine()) {
! gclog_or_tty->gclog_stamp(concurrent_gc_id());
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
}
// It's possible that has_aborted() is true here without actually
// aborting the survivor scan earlier. This is OK as it's
--- 1193,1203 ----
CMRootRegionScanTask task(this);
_parallel_workers->set_active_workers(active_workers);
_parallel_workers->run_task(&task);
if (G1Log::fine()) {
! gclog_or_tty->gclog_stamp();
gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]", os::elapsedTime() - scan_start);
}
// It's possible that has_aborted() is true here without actually
// aborting the survivor scan earlier. This is OK as it's
*** 1243,1254 ****
return doit;
}
public:
G1CMTraceTime(const char* title, bool doit)
! : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm(),
! G1CollectedHeap::heap()->concurrent_mark()->concurrent_gc_id()) {
}
};
void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
// world is stopped at this checkpoint
--- 1243,1253 ----
return doit;
}
public:
G1CMTraceTime(const char* title, bool doit)
! : GCTraceTime(title, doit_and_prepend(doit), false, G1CollectedHeap::heap()->gc_timer_cm()) {
}
};
void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
// world is stopped at this checkpoint
*** 2389,2400 ****
const ReferenceProcessorStats& stats =
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
&g1_drain_mark_stack,
executor,
! g1h->gc_timer_cm(),
! concurrent_gc_id());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the
// global marking stack.
--- 2388,2398 ----
const ReferenceProcessorStats& stats =
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
&g1_drain_mark_stack,
executor,
! g1h->gc_timer_cm());
g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
// The do_oop work routines of the keep_alive and drain_marking_stack
// oop closures will set the has_overflown flag if we overflow the
// global marking stack.
*** 2968,2978 ****
void ConcurrentMark::abort() {
if (!cmThread()->during_cycle() || _has_aborted) {
// We haven't started a concurrent cycle or we have already aborted it. No need to do anything.
return;
}
-
// Clear all marks in the next bitmap for the next marking cycle. This will allow us to skip the next
// concurrent bitmap clearing.
_nextMarkBitMap->clearAll();
// Note we cannot clear the previous marking bitmap here
--- 2966,2975 ----
*** 2986,2997 ****
for (uint i = 0; i < _max_worker_id; ++i) {
_tasks[i]->clear_region_fields();
}
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
- _aborted_gc_id = _g1h->gc_tracer_cm()->gc_id();
- assert(!_aborted_gc_id.is_undefined(), "ConcurrentMark::abort() executed more than once?");
_has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
satb_mq_set.abandon_partial_marking();
// This can be called either during or outside marking, we'll read
--- 2983,2992 ----
*** 3002,3018 ****
_g1h->trace_heap_after_concurrent_cycle();
_g1h->register_concurrent_cycle_end();
}
- const GCId& ConcurrentMark::concurrent_gc_id() {
- if (has_aborted()) {
- return _aborted_gc_id;
- }
- return _g1h->gc_tracer_cm()->gc_id();
- }
-
static void print_ms_time_info(const char* prefix, const char* name,
NumberSeq& ns) {
gclog_or_tty->print_cr("%s%5d %12s: total time = %8.2f s (avg = %8.2f ms).",
prefix, ns.num(), name, ns.sum()/1000.0, ns.avg());
if (ns.num() > 0) {
--- 2997,3006 ----
< prev index next >