< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 59991 : imported patch 8210462-fix-remaining-mentions-of-im

*** 1804,1814 **** // (both full and incremental). // * Both ref processors need to 'span' the entire heap as // the regions in the collection set may be dotted around. // // * For the concurrent marking ref processor: ! // * Reference discovery is enabled at initial marking. // * Reference discovery is disabled and the discovered // references processed etc during remarking. // * Reference discovery is MT (see below). // * Reference discovery requires a barrier (see below). // * Reference processing may or may not be MT --- 1804,1814 ---- // (both full and incremental). // * Both ref processors need to 'span' the entire heap as // the regions in the collection set may be dotted around. // // * For the concurrent marking ref processor: ! // * Reference discovery is enabled at concurrent start. // * Reference discovery is disabled and the discovered // references processed etc during remarking. // * Reference discovery is MT (see below). // * Reference discovery requires a barrier (see below). // * Reference processing may or may not be MT
*** 2612,2622 **** // This summary needs to be printed before incrementing total collections. rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); // Update common counters. increment_total_collections(full /* full gc */); ! if (full || collector_state()->in_initial_mark_gc()) { increment_old_marking_cycles_started(); } // Fill TLAB's and such { --- 2612,2622 ---- // This summary needs to be printed before incrementing total collections. rem_set()->print_periodic_summary_info("Before GC RS summary", total_collections()); // Update common counters. increment_total_collections(full /* full gc */); ! if (full || collector_state()->in_concurrent_start_gc()) { increment_old_marking_cycles_started(); } // Fill TLAB's and such {
*** 2843,2853 **** _collection_set.iterate_optional(&cl); } } G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const { ! if (collector_state()->in_initial_mark_gc()) { return G1HeapVerifier::G1VerifyConcurrentStart; } else if (collector_state()->in_young_only_phase()) { return G1HeapVerifier::G1VerifyYoungNormal; } else { return G1HeapVerifier::G1VerifyMixed; --- 2843,2853 ---- _collection_set.iterate_optional(&cl); } } G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const { ! if (collector_state()->in_concurrent_start_gc()) { return G1HeapVerifier::G1VerifyConcurrentStart; } else if (collector_state()->in_young_only_phase()) { return G1HeapVerifier::G1VerifyYoungNormal; } else { return G1HeapVerifier::G1VerifyMixed;
*** 2888,2898 **** phase_times()->record_expand_heap_time(expand_ms); } } const char* G1CollectedHeap::young_gc_name() const { ! if (collector_state()->in_initial_mark_gc()) { return "Pause Young (Concurrent Start)"; } else if (collector_state()->in_young_only_phase()) { if (collector_state()->in_young_gc_before_mixed()) { return "Pause Young (Prepare Mixed)"; } else { --- 2888,2898 ---- phase_times()->record_expand_heap_time(expand_ms); } } const char* G1CollectedHeap::young_gc_name() const { ! if (collector_state()->in_concurrent_start_gc()) { return "Pause Young (Concurrent Start)"; } else if (collector_state()->in_young_only_phase()) { if (collector_state()->in_young_gc_before_mixed()) { return "Pause Young (Prepare Mixed)"; } else {
*** 2941,2968 **** trace_heap_before_gc(_gc_tracer_stw); _verifier->verify_region_sets_optional(); _verifier->verify_dirty_young_regions(); ! // We should not be doing initial mark unless the conc mark thread is running if (!_cm_thread->should_terminate()) { ! // This call will decide whether this pause is an initial-mark ! // pause. If it is, in_initial_mark_gc() will return true // for the duration of this pause. policy()->decide_on_conc_mark_initiation(); } // We do not allow initial-mark to be piggy-backed on a mixed GC. ! assert(!collector_state()->in_initial_mark_gc() || collector_state()->in_young_only_phase(), "sanity"); // We also do not allow mixed GCs during marking. assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity"); ! // Record whether this pause is an initial mark. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. ! bool should_start_conc_mark = collector_state()->in_initial_mark_gc(); if (should_start_conc_mark) { _cm->gc_tracer_cm()->set_gc_cause(gc_cause()); } // Inner scope for scope based logging, timers, and stats collection --- 2941,2968 ---- trace_heap_before_gc(_gc_tracer_stw); _verifier->verify_region_sets_optional(); _verifier->verify_dirty_young_regions(); ! // We should not be doing concurrent start unless the concurrent mark thread is running if (!_cm_thread->should_terminate()) { ! // This call will decide whether this pause is a concurrent start ! // pause. If it is, in_concurrent_start_gc() will return true // for the duration of this pause. policy()->decide_on_conc_mark_initiation(); } // We do not allow initial-mark to be piggy-backed on a mixed GC. ! assert(!collector_state()->in_concurrent_start_gc() || collector_state()->in_young_only_phase(), "sanity"); // We also do not allow mixed GCs during marking. assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity"); ! // Record whether this pause is a concurrent start. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. ! bool should_start_conc_mark = collector_state()->in_concurrent_start_gc(); if (should_start_conc_mark) { _cm->gc_tracer_cm()->set_gc_cause(gc_cause()); } // Inner scope for scope based logging, timers, and stats collection
*** 3042,3052 **** if (should_start_conc_mark) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. ! concurrent_mark()->post_initial_mark(); // Note that we don't actually trigger the CM thread at // this point. We do that later when we're sure that // the current thread has completed its logging output. } --- 3042,3052 ---- if (should_start_conc_mark) { // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. ! concurrent_mark()->post_concurrent_start(); // Note that we don't actually trigger the CM thread at // this point. We do that later when we're sure that // the current thread has completed its logging output. }
*** 3531,3541 **** double ref_proc_time = os::elapsedTime() - ref_proc_start; phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } void G1CollectedHeap::make_pending_list_reachable() { ! if (collector_state()->in_initial_mark_gc()) { oop pll_head = Universe::reference_pending_list(); if (pll_head != NULL) { // Any valid worker id is fine here as we are in the VM thread and single-threaded. _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head); } --- 3531,3541 ---- double ref_proc_time = os::elapsedTime() - ref_proc_start; phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } void G1CollectedHeap::make_pending_list_reachable() { ! if (collector_state()->in_concurrent_start_gc()) { oop pll_head = Universe::reference_pending_list(); if (pll_head != NULL) { // Any valid worker id is fine here as we are in the VM thread and single-threaded. _cm->mark_in_next_bitmap(0 /* worker_id */, pll_head); }
*** 3720,3732 **** #if COMPILER2_OR_JVMCI DerivedPointerTable::clear(); #endif ! // InitialMark needs claim bits to keep track of the marked-through CLDs. ! if (collector_state()->in_initial_mark_gc()) { ! concurrent_mark()->pre_initial_mark(); double start_clear_claimed_marks = os::elapsedTime(); ClassLoaderDataGraph::clear_claimed_marks(); --- 3720,3732 ---- #if COMPILER2_OR_JVMCI DerivedPointerTable::clear(); #endif ! // Concurrent start needs claim bits to keep track of the marked-through CLDs. ! if (collector_state()->in_concurrent_start_gc()) { ! concurrent_mark()->pre_concurrent_start(); double start_clear_claimed_marks = os::elapsedTime(); ClassLoaderDataGraph::clear_claimed_marks();
*** 4790,4800 **** } else { assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type()); _survivor.add_used_bytes(allocated_bytes); } ! bool const during_im = collector_state()->in_initial_mark_gc(); if (during_im && allocated_bytes > 0) { _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top()); } _hr_printer.retire(alloc_region); } --- 4790,4800 ---- } else { assert(dest.is_young(), "Retiring alloc region should be young (%d)", dest.type()); _survivor.add_used_bytes(allocated_bytes); } ! bool const during_im = collector_state()->in_concurrent_start_gc(); if (during_im && allocated_bytes > 0) { _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top()); } _hr_printer.retire(alloc_region); }
< prev index next >