< prev index next >
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Print this page
rev 60594 : [mq]: 8240556-abort-conc-mark-new
@@ -2717,14 +2717,16 @@
assert_heap_not_locked();
return result;
}
-void G1CollectedHeap::do_concurrent_mark() {
+void G1CollectedHeap::start_concurrent_cycle(bool is_mark_cycle) {
+ _cm->post_concurrent_start(is_mark_cycle);
+
MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
if (!_cm_thread->in_progress()) {
- _cm_thread->set_started();
+ _cm_thread->set_started(is_mark_cycle);
CGC_lock->notify();
}
}
bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
@@ -2975,17 +2977,15 @@
assert(!collector_state()->in_concurrent_start_gc() ||
collector_state()->in_young_only_phase(), "sanity");
// We also do not allow mixed GCs during marking.
assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity");
- // Record whether this pause is a concurrent start. When the current
- // thread has completed its logging output and it's safe to signal
- // the CM thread, the flag's value in the policy has been reset.
- bool should_start_conc_mark = collector_state()->in_concurrent_start_gc();
- if (should_start_conc_mark) {
- _cm->gc_tracer_cm()->set_gc_cause(gc_cause());
- }
+ // Record whether this pause may need to trigger a concurrent operation. Later,
+ // when we signal the G1ConcurrentMarkThread, the collector state has already
+ // been reset for the next pause.
+ bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();
+ bool concurrent_operation_is_mark_cycle = false;
// Inner scope for scope based logging, timers, and stats collection
{
G1EvacuationInfo evacuation_info;
@@ -3058,29 +3058,25 @@
start_new_collection_set();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
- if (should_start_conc_mark) {
- // We have to do this before we notify the CM threads that
- // they can start working to make sure that all the
- // appropriate initialization is done on the CM object.
- concurrent_mark()->post_concurrent_start();
- // Note that we don't actually trigger the CM thread at
- // this point. We do that later when we're sure that
- // the current thread has completed its logging output.
- }
+ // Refine the type of a concurrent mark operation now that we did the
+ // evacuation, eventually aborting it.
+ concurrent_operation_is_mark_cycle =
+ should_start_concurrent_mark_operation &&
+ ((gc_cause() != GCCause::_g1_humongous_allocation) || policy()->need_to_start_conc_mark("Revise"));
allocate_dummy_regions();
_allocator->init_mutator_alloc_regions();
expand_heap_after_young_collection();
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
- policy()->record_collection_pause_end(pause_time_ms);
+ policy()->record_collection_pause_end(pause_time_ms, concurrent_operation_is_mark_cycle);
}
verify_after_young_collection(verify_type);
gc_epilogue(false);
@@ -3117,17 +3113,17 @@
}
// It should now be safe to tell the concurrent mark thread to start
// without its logging output interfering with the logging output
// that came from the pause.
- if (should_start_conc_mark) {
- // CAUTION: after the doConcurrentMark() call below, the concurrent marking
+ if (should_start_concurrent_mark_operation) {
+ // CAUTION: after the start_concurrent_mark() call below, the concurrent marking
// thread(s) could be running concurrently with us. Make sure that anything
// after this point does not assume that we are the only GC thread running.
// Note: of course, the actual marking work will not start until the safepoint
// itself is released in SuspendibleThreadSet::desynchronize().
- do_concurrent_mark();
+ start_concurrent_cycle(concurrent_operation_is_mark_cycle /* full_mark */);
ConcurrentGCBreakpoints::notify_idle_to_active();
}
}
void G1CollectedHeap::remove_self_forwarding_pointers(G1RedirtyCardsQueueSet* rdcqs) {
@@ -3733,11 +3729,11 @@
DerivedPointerTable::clear();
#endif
// Concurrent start needs claim bits to keep track of the marked-through CLDs.
if (collector_state()->in_concurrent_start_gc()) {
- concurrent_mark()->pre_concurrent_start();
+ concurrent_mark()->pre_concurrent_start(gc_cause());
double start_clear_claimed_marks = os::elapsedTime();
ClassLoaderDataGraph::clear_claimed_marks();
< prev index next >