< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 60060 : [mq]: 8210462-lkorinth-review

*** 1804,1814 **** // (both full and incremental). // * Both ref processors need to 'span' the entire heap as // the regions in the collection set may be dotted around. // // * For the concurrent marking ref processor: ! // * Reference discovery is enabled at initial marking. // * Reference discovery is disabled and the discovered // references processed etc during remarking. // * Reference discovery is MT (see below). // * Reference discovery requires a barrier (see below). // * Reference processing may or may not be MT --- 1804,1814 ---- // (both full and incremental). // * Both ref processors need to 'span' the entire heap as // the regions in the collection set may be dotted around. // // * For the concurrent marking ref processor: ! // * Reference discovery is enabled at concurrent start. // * Reference discovery is disabled and the discovered // references processed etc during remarking. // * Reference discovery is MT (see below). // * Reference discovery requires a barrier (see below). // * Reference processing may or may not be MT
*** 2045,2055 **** assert_heap_not_locked(); assert(should_do_concurrent_full_gc(cause), "Non-concurrent cause %s", GCCause::to_string(cause)); for (uint i = 1; true; ++i) { ! // Try to schedule an initial-mark evacuation pause that will // start a concurrent cycle. LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i); VM_G1TryInitiateConcMark op(gc_counter, cause, policy()->max_pause_time_ms()); --- 2045,2055 ---- assert_heap_not_locked(); assert(should_do_concurrent_full_gc(cause), "Non-concurrent cause %s", GCCause::to_string(cause)); for (uint i = 1; true; ++i) { ! // Try to schedule concurrent start evacuation pause that will // start a concurrent cycle. LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i); VM_G1TryInitiateConcMark op(gc_counter, cause, policy()->max_pause_time_ms());
*** 2114,2124 **** // Cases (4) and (5) are detected together by a change to // _old_marking_cycles_started. // // Note that (1) does not imply (4). If we're still in the mixed // phase of an earlier concurrent collection, the request to make the ! // collection an initial-mark won't be honored. If we don't check for // both conditions we'll spin doing back-to-back collections. if (op.gc_succeeded() || op.cycle_already_in_progress() || op.whitebox_attached() || (old_marking_started_before != old_marking_started_after)) { --- 2114,2124 ---- // Cases (4) and (5) are detected together by a change to // _old_marking_cycles_started. // // Note that (1) does not imply (4). If we're still in the mixed // phase of an earlier concurrent collection, the request to make the ! // collection a concurrent start won't be honored. If we don't check for // both conditions we'll spin doing back-to-back collections. if (op.gc_succeeded() || op.cycle_already_in_progress() || op.whitebox_attached() || (old_marking_started_before != old_marking_started_after)) {
*** 2618,2628 **** // This summary needs to be printed before incrementing total collections. rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); // Update common counters. increment_total_collections(full /* full gc */); ! if (full || collector_state()->in_initial_mark_gc()) { increment_old_marking_cycles_started(); } // Fill TLAB's and such { --- 2618,2628 ---- // This summary needs to be printed before incrementing total collections. rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); // Update common counters. increment_total_collections(full /* full gc */); ! if (full || collector_state()->in_concurrent_start_gc()) { increment_old_marking_cycles_started(); } // Fill TLAB's and such {
*** 2849,2859 **** _collection_set.iterate_optional(&cl); } } G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const { ! if (collector_state()->in_initial_mark_gc()) { return G1HeapVerifier::G1VerifyConcurrentStart; } else if (collector_state()->in_young_only_phase()) { return G1HeapVerifier::G1VerifyYoungNormal; } else { return G1HeapVerifier::G1VerifyMixed; --- 2849,2859 ---- _collection_set.iterate_optional(&cl); } } G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const { ! if (collector_state()->in_concurrent_start_gc()) { return G1HeapVerifier::G1VerifyConcurrentStart; } else if (collector_state()->in_young_only_phase()) { return G1HeapVerifier::G1VerifyYoungNormal; } else { return G1HeapVerifier::G1VerifyMixed;
*** 2894,2904 **** phase_times()->record_expand_heap_time(expand_ms); } } const char* G1CollectedHeap::young_gc_name() const { ! if (collector_state()->in_initial_mark_gc()) { return "Pause Young (Concurrent Start)"; } else if (collector_state()->in_young_only_phase()) { if (collector_state()->in_young_gc_before_mixed()) { return "Pause Young (Prepare Mixed)"; } else { --- 2894,2904 ---- phase_times()->record_expand_heap_time(expand_ms); } } const char* G1CollectedHeap::young_gc_name() const { ! if (collector_state()->in_concurrent_start_gc()) { return "Pause Young (Concurrent Start)"; } else if (collector_state()->in_young_only_phase()) { if (collector_state()->in_young_gc_before_mixed()) { return "Pause Young (Prepare Mixed)"; } else {
*** 2947,2974 **** trace_heap_before_gc(_gc_tracer_stw); _verifier->verify_region_sets_optional(); _verifier->verify_dirty_young_regions(); ! // We should not be doing initial mark unless the conc mark thread is running if (!_cm_thread->should_terminate()) { ! // This call will decide whether this pause is an initial-mark ! // pause. If it is, in_initial_mark_gc() will return true // for the duration of this pause. policy()->decide_on_conc_mark_initiation(); } ! // We do not allow initial-mark to be piggy-backed on a mixed GC. ! assert(!collector_state()->in_initial_mark_gc() || collector_state()->in_young_only_phase(), "sanity"); // We also do not allow mixed GCs during marking. assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity"); ! // Record whether this pause is an initial mark. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. ! bool should_start_conc_mark = collector_state()->in_initial_mark_gc(); if (should_start_conc_mark) { _cm->gc_tracer_cm()->set_gc_cause(gc_cause()); } // Inner scope for scope based logging, timers, and stats collection --- 2947,2974 ---- trace_heap_before_gc(_gc_tracer_stw); _verifier->verify_region_sets_optional(); _verifier->verify_dirty_young_regions(); ! // We should not be doing concurrent start unless the concurrent mark thread is running if (!_cm_thread->should_terminate()) { ! // This call will decide whether this pause is a concurrent start ! // pause. If it is, in_concurrent_start_gc() will return true // for the duration of this pause. policy()->decide_on_conc_mark_initiation(); } ! // We do not allow concurrent start to be piggy-backed on a mixed GC. ! assert(!collector_state()->in_concurrent_start_gc() || collector_state()->in_young_only_phase(), "sanity"); // We also do not allow mixed GCs during marking. assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity"); ! // Record whether this pause is a concurrent start. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. ! bool should_start_conc_mark = collector_state()->in_concurrent_start_gc(); if (should_start_conc_mark) { _cm->gc_tracer_cm()->set_gc_cause(gc_cause()); } // Inner scope for scope based logging, timers, and stats collection
*** 3048,3058 **** if (should_start_conc_mark) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. ! concurrent_mark()->post_initial_mark(); // Note that we don't actually trigger the CM thread at // this point. We do that later when we're sure that // the current thread has completed its logging output. } --- 3048,3058 ---- if (should_start_conc_mark) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. ! concurrent_mark()->post_concurrent_start(); // Note that we don't actually trigger the CM thread at // this point. We do that later when we're sure that // the current thread has completed its logging output. }
*** 3529,3539 **** double ref_proc_time = os::elapsedTime() - ref_proc_start; phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } void G1CollectedHeap::make_pending_list_reachable() { ! if (collector_state()->in_initial_mark_gc()) { oop pll_head = Universe::reference_pending_list(); if (pll_head != NULL) { // Any valid worker id is fine here as we are in the VM thread and single-threaded. _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head); } --- 3529,3539 ---- double ref_proc_time = os::elapsedTime() - ref_proc_start; phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } void G1CollectedHeap::make_pending_list_reachable() { ! if (collector_state()->in_concurrent_start_gc()) { oop pll_head = Universe::reference_pending_list(); if (pll_head != NULL) { // Any valid worker id is fine here as we are in the VM thread and single-threaded. _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head); }
*** 3718,3730 **** #if COMPILER2_OR_JVMCI DerivedPointerTable::clear(); #endif ! // InitialMark needs claim bits to keep track of the marked-through CLDs. ! if (collector_state()->in_initial_mark_gc()) { ! concurrent_mark()->pre_initial_mark(); double start_clear_claimed_marks = os::elapsedTime(); ClassLoaderDataGraph::clear_claimed_marks(); --- 3718,3730 ---- #if COMPILER2_OR_JVMCI DerivedPointerTable::clear(); #endif ! // Concurrent start needs claim bits to keep track of the marked-through CLDs. ! if (collector_state()->in_concurrent_start_gc()) { ! concurrent_mark()->pre_concurrent_start(); double start_clear_claimed_marks = os::elapsedTime(); ClassLoaderDataGraph::clear_claimed_marks();
*** 4788,4798 **** } else { assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type()); _survivor.add_used_bytes(allocated_bytes); } ! bool const during_im = collector_state()->in_initial_mark_gc(); if (during_im && allocated_bytes > 0) { _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top()); } _hr_printer.retire(alloc_region); } --- 4788,4798 ---- } else { assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type()); _survivor.add_used_bytes(allocated_bytes); } ! bool const during_im = collector_state()->in_concurrent_start_gc(); if (during_im && allocated_bytes > 0) { _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top()); } _hr_printer.retire(alloc_region); }
< prev index next >