--- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-06-30 15:59:22.603206984 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2020-06-30 15:59:22.503205788 +0200 @@ -1806,7 +1806,7 @@ // the regions in the collection set may be dotted around. // // * For the concurrent marking ref processor: - // * Reference discovery is enabled at initial marking. + // * Reference discovery is enabled at concurrent start. // * Reference discovery is disabled and the discovered // references processed etc during remarking. // * Reference discovery is MT (see below). @@ -2614,7 +2614,7 @@ // Update common counters. increment_total_collections(full /* full gc */); - if (full || collector_state()->in_initial_mark_gc()) { + if (full || collector_state()->in_concurrent_start_gc()) { increment_old_marking_cycles_started(); } @@ -2845,7 +2845,7 @@ } G1HeapVerifier::G1VerifyType G1CollectedHeap::young_collection_verify_type() const { - if (collector_state()->in_initial_mark_gc()) { + if (collector_state()->in_concurrent_start_gc()) { return G1HeapVerifier::G1VerifyConcurrentStart; } else if (collector_state()->in_young_only_phase()) { return G1HeapVerifier::G1VerifyYoungNormal; @@ -2890,7 +2890,7 @@ } const char* G1CollectedHeap::young_gc_name() const { - if (collector_state()->in_initial_mark_gc()) { + if (collector_state()->in_concurrent_start_gc()) { return "Pause Young (Concurrent Start)"; } else if (collector_state()->in_young_only_phase()) { if (collector_state()->in_young_gc_before_mixed()) { @@ -2943,24 +2943,24 @@ _verifier->verify_region_sets_optional(); _verifier->verify_dirty_young_regions(); - // We should not be doing initial mark unless the conc mark thread is running + // We should not be doing concurrent start unless the concurrent mark thread is running if (!_cm_thread->should_terminate()) { - // This call will decide whether this pause is an initial-mark - // pause. If it is, in_initial_mark_gc() will return true + // This call will decide whether this pause is a concurrent start + // pause. If it is, in_concurrent_start_gc() will return true // for the duration of this pause. policy()->decide_on_conc_mark_initiation(); } // We do not allow initial-mark to be piggy-backed on a mixed GC. - assert(!collector_state()->in_initial_mark_gc() || + assert(!collector_state()->in_concurrent_start_gc() || collector_state()->in_young_only_phase(), "sanity"); // We also do not allow mixed GCs during marking. assert(!collector_state()->mark_or_rebuild_in_progress() || collector_state()->in_young_only_phase(), "sanity"); - // Record whether this pause is an initial mark. When the current + // Record whether this pause is a concurrent start. When the current // thread has completed its logging output and it's safe to signal // the CM thread, the flag's value in the policy has been reset. - bool should_start_conc_mark = collector_state()->in_initial_mark_gc(); + bool should_start_conc_mark = collector_state()->in_concurrent_start_gc(); if (should_start_conc_mark) { _cm->gc_tracer_cm()->set_gc_cause(gc_cause()); } @@ -3044,7 +3044,7 @@ // We have to do this before we notify the CM threads that // they can start working to make sure that all the // appropriate initialization is done on the CM object. - concurrent_mark()->post_initial_mark(); + concurrent_mark()->post_concurrent_start(); // Note that we don't actually trigger the CM thread at // this point. We do that later when we're sure that // the current thread has completed its logging output. @@ -3533,7 +3533,7 @@ } void G1CollectedHeap::make_pending_list_reachable() { - if (collector_state()->in_initial_mark_gc()) { + if (collector_state()->in_concurrent_start_gc()) { oop pll_head = Universe::reference_pending_list(); if (pll_head != NULL) { // Any valid worker id is fine here as we are in the VM thread and single-threaded. @@ -3722,9 +3722,9 @@ DerivedPointerTable::clear(); #endif - // InitialMark needs claim bits to keep track of the marked-through CLDs. - if (collector_state()->in_initial_mark_gc()) { - concurrent_mark()->pre_initial_mark(); + // Concurrent start needs claim bits to keep track of the marked-through CLDs. + if (collector_state()->in_concurrent_start_gc()) { + concurrent_mark()->pre_concurrent_start(); double start_clear_claimed_marks = os::elapsedTime(); @@ -4792,7 +4792,7 @@ _survivor.add_used_bytes(allocated_bytes); } - bool const during_im = collector_state()->in_initial_mark_gc(); + bool const during_im = collector_state()->in_concurrent_start_gc(); if (during_im && allocated_bytes > 0) { _cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top()); } --- old/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-06-30 15:59:23.159213646 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.hpp 2020-06-30 15:59:23.059212449 +0200 @@ -533,7 +533,7 @@ // Process any reference objects discovered. void process_discovered_references(G1ParScanThreadStateSet* per_thread_states); - // If during an initial mark pause we may install a pending list head which is not + // If during a concurrent start pause we may install a pending list head which is not // otherwise reachable ensure that it is marked in the bitmap for concurrent marking // to discover. void make_pending_list_reachable(); @@ -856,7 +856,7 @@ // for the current GC (based upon the type of GC and which // command line flags are set); inline bool evacuation_failure_alot_for_gc_type(bool for_young_gc, - bool during_initial_mark, + bool during_concurrent_start, bool mark_or_rebuild_in_progress); inline void set_evacuation_failure_alot_for_current_gc(); @@ -916,7 +916,7 @@ // making the STW ref processor inactive by disabling discovery. // * Verify that the CM ref processor is still inactive // and no references have been placed on it's discovered - // lists (also checked as a precondition during initial marking). + // lists (also checked as a precondition during concurrent start). // The (stw) reference processor... ReferenceProcessor* _ref_processor_stw; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp 2020-06-30 15:59:23.639219398 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp 2020-06-30 15:59:23.539218198 +0200 @@ -193,13 +193,13 @@ inline bool G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc, - bool during_initial_mark, + bool during_concurrent_start, bool mark_or_rebuild_in_progress) { bool res = false; if (mark_or_rebuild_in_progress) { res |= G1EvacuationFailureALotDuringConcMark; } - if (during_initial_mark) { + if (during_concurrent_start) { res |= G1EvacuationFailureALotDuringInitialMark; } if (for_young_gc) { @@ -227,12 +227,12 @@ // Now check if G1EvacuationFailureALot is enabled for the current GC type. const bool in_young_only_phase = collector_state()->in_young_only_phase(); - const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc(); + const bool in_concurrent_start_gc = collector_state()->in_concurrent_start_gc(); const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress(); _evacuation_failure_alot_for_current_gc &= evacuation_failure_alot_for_gc_type(in_young_only_phase, - in_initial_mark_gc, + in_concurrent_start_gc, mark_or_rebuild_in_progress); } } --- old/src/hotspot/share/gc/g1/g1CollectorState.hpp 2020-06-30 15:59:24.079224668 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectorState.hpp 2020-06-30 15:59:23.983223518 +0200 @@ -40,28 +40,28 @@ // If _initiate_conc_mark_if_possible is set at the beginning of a // pause, it is a suggestion that the pause should start a marking - // cycle by doing the initial-mark work. However, it is possible + // cycle by doing the concurrent start work. However, it is possible // that the concurrent marking thread is still finishing up the // previous marking cycle (e.g., clearing the next marking // bitmap). If that is the case we cannot start a new cycle and // we'll have to wait for the concurrent marking thread to finish // what it is doing. In this case we will postpone the marking cycle // initiation decision for the next pause. When we eventually decide - // to start a cycle, we will set _in_initial_mark_gc which - // will stay true until the end of the initial-mark pause doing the - // initial-mark work. - volatile bool _in_initial_mark_gc; + // to start a cycle, we will set _in_concurrent_mark_gc which + // will stay true until the end of the concurrent start pause doing the + // concurrent start work. + volatile bool _in_concurrent_mark_gc; // At the end of a pause we check the heap occupancy and we decide // whether we will start a marking cycle during the next pause. If // we decide that we want to do that, set this parameter. This parameter will // stay set until the beginning of a subsequent pause (not necessarily // the next one) when we decide that we will indeed start a marking cycle and - // do the initial-mark work. + // do the concurrent start phase work. volatile bool _initiate_conc_mark_if_possible; // Marking or rebuilding remembered set work is in progress. Set from the end - // of the initial mark pause to the end of the Cleanup pause. + // of the concurrent start pause to the end of the Cleanup pause. bool _mark_or_rebuild_in_progress; // The next bitmap is currently being cleared or about to be cleared. TAMS and bitmap @@ -76,7 +76,7 @@ _in_young_only_phase(true), _in_young_gc_before_mixed(false), - _in_initial_mark_gc(false), + _in_concurrent_mark_gc(false), _initiate_conc_mark_if_possible(false), _mark_or_rebuild_in_progress(false), @@ -88,7 +88,7 @@ // Pause setters void set_in_young_gc_before_mixed(bool v) { _in_young_gc_before_mixed = v; } - void set_in_initial_mark_gc(bool v) { _in_initial_mark_gc = v; } + void set_in_concurrent_start_gc(bool v) { _in_concurrent_mark_gc = v; } void set_in_full_gc(bool v) { _in_full_gc = v; } void set_initiate_conc_mark_if_possible(bool v) { _initiate_conc_mark_if_possible = v; } @@ -103,7 +103,7 @@ // Specific pauses bool in_young_gc_before_mixed() const { return _in_young_gc_before_mixed; } bool in_full_gc() const { return _in_full_gc; } - bool in_initial_mark_gc() const { return _in_initial_mark_gc; } + bool in_concurrent_start_gc() const { return _in_concurrent_mark_gc; } bool initiate_conc_mark_if_possible() const { return _initiate_conc_mark_if_possible; } @@ -111,8 +111,8 @@ bool clearing_next_bitmap() const { return _clearing_next_bitmap; } G1YCType yc_type() const { - if (in_initial_mark_gc()) { - return InitialMark; + if (in_concurrent_start_gc()) { + return ConcurrentStart; } else if (mark_or_rebuild_in_progress()) { return DuringMarkOrRebuild; } else if (in_young_only_phase()) { --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-06-30 15:59:24.523229989 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2020-06-30 15:59:24.423228789 +0200 @@ -683,7 +683,7 @@ } }; -void G1ConcurrentMark::pre_initial_mark() { +void G1ConcurrentMark::pre_concurrent_start() { assert_at_safepoint_on_vm_thread(); // Reset marking state. @@ -697,7 +697,7 @@ } -void G1ConcurrentMark::post_initial_mark() { +void G1ConcurrentMark::post_concurrent_start() { // Start Concurrent Marking weak-reference discovery. ReferenceProcessor* rp = _g1h->ref_processor_cm(); // enable ("weak") refs discovery @@ -714,7 +714,7 @@ // update_g1_committed() will be called at the end of an evac pause // when marking is on. So, it's also called at the end of the - // initial-mark pause to update the heap end, if the heap expands + // concurrent start pause to update the heap end, if the heap expands // during it. No need to call it here. } @@ -2411,7 +2411,7 @@ (1) Marking Bitmap. If there are gray objects that appear only on the bitmap (this happens either when dealing with an overflow - or when the initial marking phase has simply marked the roots + or when the concurrent start pause has simply marked the roots and didn't push them on the stack), then tasks claim heap regions whose bitmap they then scan to find gray objects. A global finger indicates where the end of the last claimed region --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2020-06-30 15:59:25.055236361 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp 2020-06-30 15:59:24.959235211 +0200 @@ -220,8 +220,8 @@ // roots wrt to the marking. They must be scanned before marking to maintain the // SATB invariant. // Typically they contain the areas from nTAMS to top of the regions. -// We could scan and mark through these objects during the initial-mark pause, but for -// pause time reasons we move this work to the concurrent phase. +// We could scan and mark through these objects during the concurrent start pause, +// but for pause time reasons we move this work to the concurrent phase. // We need to complete this procedure before the next GC because it might determine // that some of these "root objects" are dead, potentially dropping some required // references. @@ -384,7 +384,7 @@ void clear_statistics(HeapRegion* r); // Resets the global marking data structures, as well as the - // task local ones; should be called during initial mark. + // task local ones; should be called during concurrent start. void reset(); // Resets all the marking data structures. Called when we have to restart @@ -435,7 +435,7 @@ // Returns the task with the given id G1CMTask* task(uint id) { - // During initial mark we use the parallel gc threads to do some work, so + // During concurrent start we use the parallel gc threads to do some work, so // we can only compare against _max_num_tasks. assert(id < _max_num_tasks, "Task id %u not within bounds up to %u", id, _max_num_tasks); return _tasks[id]; @@ -541,9 +541,9 @@ void clear_prev_bitmap(WorkGang* workers); // These two methods do the work that needs to be done at the start and end of the - // initial mark pause. - void pre_initial_mark(); - void post_initial_mark(); + // concurrent start pause. + void pre_concurrent_start(); + void post_concurrent_start(); // Scan all the root regions and mark everything reachable from // them. --- old/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp 2020-06-30 15:59:25.523241966 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.hpp 2020-06-30 15:59:25.423240769 +0200 @@ -76,7 +76,7 @@ bool in_progress() { return _state == InProgress; } // Returns true from the moment a marking cycle is - // initiated (during the initial-mark pause when started() is set) + // initiated (during the concurrent start pause when started() is set) // to the moment when the cycle completes (just after the next // marking bitmap has been cleared and in_progress() is // cleared). While during_cycle() is true we will not start another cycle --- old/src/hotspot/share/gc/g1/g1EvacFailure.cpp 2020-06-30 15:59:25.987247525 +0200 +++ new/src/hotspot/share/gc/g1/g1EvacFailure.cpp 2020-06-30 15:59:25.887246328 +0200 @@ -79,21 +79,21 @@ HeapRegion* _hr; size_t _marked_bytes; UpdateLogBuffersDeferred* _log_buffer_cl; - bool _during_initial_mark; + bool _during_concurrent_start; uint _worker_id; HeapWord* _last_forwarded_object_end; public: RemoveSelfForwardPtrObjClosure(HeapRegion* hr, UpdateLogBuffersDeferred* log_buffer_cl, - bool during_initial_mark, + bool during_concurrent_start, uint worker_id) : _g1h(G1CollectedHeap::heap()), _cm(_g1h->concurrent_mark()), _hr(hr), _marked_bytes(0), _log_buffer_cl(log_buffer_cl), - _during_initial_mark(during_initial_mark), + _during_concurrent_start(during_concurrent_start), _worker_id(worker_id), _last_forwarded_object_end(hr->bottom()) { } @@ -119,14 +119,14 @@ if (!_cm->is_marked_in_prev_bitmap(obj)) { _cm->mark_in_prev_bitmap(obj); } - if (_during_initial_mark) { + if (_during_concurrent_start) { // For the next marking info we'll only mark the // self-forwarded objects explicitly if we are during - // initial-mark (since, normally, we only mark objects pointed + // concurrent start (since, normally, we only mark objects pointed // to by roots if we succeed in copying them). By marking all // self-forwarded objects we ensure that we mark any that are // still pointed to be roots. During concurrent marking, and - // after initial-mark, we don't need to mark any objects + // after concurrent start, we don't need to mark any objects // explicitly and all objects in the CSet are considered // (implicitly) live. So, we won't mark them explicitly and // we'll leave them over NTAMS. @@ -211,10 +211,10 @@ } size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr, - bool during_initial_mark) { + bool during_concurrent_start) { RemoveSelfForwardPtrObjClosure rspc(hr, &_log_buffer_cl, - during_initial_mark, + during_concurrent_start, _worker_id); hr->object_iterate(&rspc); // Need to zap the remainder area of the processed region. @@ -230,16 +230,16 @@ if (hr->evacuation_failed()) { hr->clear_index_in_opt_cset(); - bool during_initial_mark = _g1h->collector_state()->in_initial_mark_gc(); - bool during_conc_mark = _g1h->collector_state()->mark_or_rebuild_in_progress(); + bool during_concurrent_start = _g1h->collector_state()->in_concurrent_start_gc(); + bool during_concurrent_mark = _g1h->collector_state()->mark_or_rebuild_in_progress(); - hr->note_self_forwarding_removal_start(during_initial_mark, - during_conc_mark); + hr->note_self_forwarding_removal_start(during_concurrent_start, + during_concurrent_mark); _g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr); hr->reset_bot(); - size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_initial_mark); + size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_concurrent_start); hr->rem_set()->clean_strong_code_roots(hr); hr->rem_set()->clear_locked(true); --- old/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-06-30 15:59:26.451253081 +0200 +++ new/src/hotspot/share/gc/g1/g1IHOPControl.hpp 2020-06-30 15:59:26.355251933 +0200 @@ -51,7 +51,7 @@ // occupancy will be updated at the first heap expansion. G1IHOPControl(double initial_ihop_percent); - // Most recent time from the end of the initial mark to the start of the first + // Most recent time from the end of the concurrent start to the start of the first // mixed gc. virtual double last_marking_length_s() const = 0; public: @@ -71,7 +71,7 @@ // difference between old gen size and total heap size at the start of reclamation, // and space required for that reclamation. virtual void update_allocation_info(double allocation_time_s, size_t allocated_bytes, size_t additional_buffer_size); - // Update the time spent in the mutator beginning from the end of initial mark to + // Update the time spent in the mutator beginning from the end of concurrent start to // the first mixed gc. virtual void update_marking_length(double marking_length_s) = 0; @@ -82,7 +82,7 @@ // The returned concurrent mark starting occupancy threshold is a fixed value // relative to the maximum heap size. class G1StaticIHOPControl : public G1IHOPControl { - // Most recent mutator time between the end of initial mark to the start of the + // Most recent mutator time between the end of concurrent mark to the start of the // first mixed gc. double _last_marking_length_s; protected: @@ -104,7 +104,7 @@ // This algorithm tries to return a concurrent mark starting occupancy value that // makes sure that during marking the given target occupancy is never exceeded, // based on predictions of current allocation rate and time periods between -// initial mark and the first mixed gc. +// concurrent start and the first mixed gc. class G1AdaptiveIHOPControl : public G1IHOPControl { size_t _heap_reserve_percent; // Percentage of maximum heap capacity we should avoid to touch size_t _heap_waste_percent; // Percentage of free heap that should be considered as waste. --- old/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2020-06-30 15:59:26.911258590 +0200 +++ new/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2020-06-30 15:59:26.815257442 +0200 @@ -251,7 +251,7 @@ } // The object is not in collection set. If we're a root scanning - // closure during an initial mark pause then attempt to mark the object. + // closure during a concurrent mark pause then attempt to mark the object. if (do_mark_object == G1MarkFromRoot) { mark_object(obj); } --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2020-06-30 15:59:27.363264004 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2020-06-30 15:59:27.259262758 +0200 @@ -75,7 +75,7 @@ _rs_length_prediction(0), _pending_cards_at_gc_start(0), _old_gen_alloc_tracker(), - _initial_mark_to_mixed(), + _concurrent_start_to_mixed(), _collection_set(NULL), _g1h(NULL), _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)), @@ -560,7 +560,7 @@ collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false); collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0)); - collector_state()->set_in_initial_mark_gc(false); + collector_state()->set_in_concurrent_start_gc(false); collector_state()->set_mark_or_rebuild_in_progress(false); collector_state()->set_clearing_next_bitmap(false); @@ -656,7 +656,7 @@ void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) { assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now"); - collector_state()->set_in_initial_mark_gc(false); + collector_state()->set_in_concurrent_start_gc(false); } void G1Policy::record_concurrent_mark_remark_start() { @@ -744,8 +744,7 @@ double end_time_sec = os::elapsedTime(); - bool this_pause_included_initial_mark = false; - bool this_pause_was_young_only = collector_state()->in_young_only_phase(); + PauseKind this_pause = young_gc_pause_kind(); bool update_stats = !_g1h->evacuation_failed(); @@ -753,8 +752,7 @@ _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; - this_pause_included_initial_mark = collector_state()->in_initial_mark_gc(); - if (this_pause_included_initial_mark) { + if (is_concurrent_start_pause(this_pause)) { record_concurrent_mark_init_end(0.0); } else { maybe_start_marking(); @@ -785,14 +783,14 @@ _analytics->update_recent_gc_times(end_time_sec, pause_time_ms); } - if (collector_state()->in_young_gc_before_mixed()) { - assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC"); + if (is_last_young_pause(this_pause)) { + assert(!is_concurrent_start_pause(this_pause), "The young GC before mixed is not allowed to be an initial mark GC"); // This has been the young GC before we start doing mixed GCs. We already // decided to start mixed GCs much earlier, so there is nothing to do except // advancing the state. collector_state()->set_in_young_only_phase(false); collector_state()->set_in_young_gc_before_mixed(false); - } else if (!this_pause_was_young_only) { + } else if (!is_young_only_pause(this_pause)) { // This is a mixed GC. Here we decide whether to continue doing more // mixed GCs or not. if (!next_gc_should_be_mixed("continue mixed GCs", @@ -825,7 +823,8 @@ average_time_ms(G1GCPhaseTimes::MergeHCC) + average_time_ms(G1GCPhaseTimes::MergeLB) + average_time_ms(G1GCPhaseTimes::OptMergeRS); - _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only); + _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, + is_young_only_pause(this_pause)); } // Update prediction for card scan @@ -836,7 +835,8 @@ double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR); - _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only); + _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, + is_young_only_pause(this_pause)); } // Update prediction for the ratio between cards from the remembered @@ -850,7 +850,8 @@ if (total_cards_scanned > 0) { merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned; } - _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only); + _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, + is_young_only_pause(this_pause)); const size_t recorded_rs_length = _collection_set->recorded_rs_length(); const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0; @@ -880,15 +881,15 @@ // these are is wildly different to during young only gc and mess up young gen sizing right // after the mixed gc phase. // During mixed gc we do not use them for young gen sizing. - if (this_pause_was_young_only) { + if (is_young_only_pause(this_pause)) { _analytics->report_pending_cards((double) _pending_cards_at_gc_start); _analytics->report_rs_length((double) _rs_length); } } - assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()), - "If the last pause has been an initial mark, we should not have been in the marking window"); - if (this_pause_included_initial_mark) { + assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()), + "If the last pause has been concurrent start, we should not have been in the marking window"); + if (is_concurrent_start_pause(this_pause)) { collector_state()->set_mark_or_rebuild_in_progress(true); } @@ -904,7 +905,7 @@ _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0); update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(), _old_gen_alloc_tracker.last_cycle_old_bytes(), - this_pause_was_young_only); + is_young_only_pause(this_pause)); _ihop_control->send_trace_event(_g1h->gc_tracer_stw()); } else { @@ -914,7 +915,7 @@ // for completing the marking, i.e. are faster than expected. // This skews the predicted marking length towards smaller values which might cause // the mark start being too late. - _initial_mark_to_mixed.reset(); + _concurrent_start_to_mixed.reset(); } // Note that _mmu_tracker->max_gc_time() returns the time in seconds. @@ -964,10 +965,10 @@ bool report = false; double marking_to_mixed_time = -1.0; - if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) { - marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time(); + if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) { + marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time(); assert(marking_to_mixed_time > 0.0, - "Initial mark to mixed time must be larger than zero but is %.3f", + "Concurrent start to mixed time must be larger than zero but is %.3f", marking_to_mixed_time); if (marking_to_mixed_time > min_valid_time) { _ihop_control->update_marking_length(marking_to_mixed_time); @@ -1129,35 +1130,39 @@ _g1h->num_free_or_available_regions()); } -bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) { +bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause) { // We actually check whether we are marking here and not if we are in a // reclamation phase. This means that we will schedule a concurrent mark // even while we are still in the process of reclaiming memory. bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle(); if (!during_cycle) { - log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause)); + log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). " + "GC cause: %s", + GCCause::to_string(gc_cause)); collector_state()->set_initiate_conc_mark_if_possible(true); return true; } else { - log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause)); + log_debug(gc, ergo)("Do not request concurrent cycle initiation " + "(concurrent cycle already in progress). GC cause: %s", + GCCause::to_string(gc_cause)); return false; } } void G1Policy::initiate_conc_mark() { - collector_state()->set_in_initial_mark_gc(true); + collector_state()->set_in_concurrent_start_gc(true); collector_state()->set_initiate_conc_mark_if_possible(false); } void G1Policy::decide_on_conc_mark_initiation() { - // We are about to decide on whether this pause will be an - // initial-mark pause. + // We are about to decide on whether this pause will be a + // concurrent start pause. - // First, collector_state()->in_initial_mark_gc() should not be already set. We + // First, collector_state()->in_concurrent_start_gc() should not be already set. We // will set it here if we have to. However, it should be cleared by - // the end of the pause (it's only set for the duration of an - // initial-mark pause). - assert(!collector_state()->in_initial_mark_gc(), "pre-condition"); + // the end of the pause (it's only set for the duration of a + // concurrent start pause). + assert(!collector_state()->in_concurrent_start_gc(), "pre-condition"); if (collector_state()->initiate_conc_mark_if_possible()) { // We had noticed on a previous pause that the heap occupancy has @@ -1171,13 +1176,13 @@ ConcurrentGCBreakpoints::is_controlled()) { log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)"); } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) { - // Initiate a new initial mark if there is no marking or reclamation going on. + // Initiate a new concurrent start if there is no marking or reclamation going on. initiate_conc_mark(); log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)"); } else if (_g1h->is_user_requested_concurrent_full_gc(cause) || (cause == GCCause::_wb_breakpoint)) { - // Initiate a user requested initial mark or run_to a breakpoint. - // An initial mark must be young only GC, so the collector state + // Initiate a user requested concurrent start or run to a breakpoint. + // A concurrent start must be young only GC, so the collector state // must be updated to reflect this. collector_state()->set_in_young_only_phase(true); collector_state()->set_in_young_gc_before_mixed(false); @@ -1257,20 +1262,35 @@ } } +bool G1Policy::is_young_only_pause(PauseKind kind) { + assert(kind != FullGC, "must be"); + assert(kind != Remark, "must be"); + assert(kind != Cleanup, "must be"); + return kind == ConcurrentStartGC || kind == LastYoungGC || kind == YoungOnlyGC; +} + +bool G1Policy::is_last_young_pause(PauseKind kind) { + return kind == LastYoungGC; +} + +bool G1Policy::is_concurrent_start_pause(PauseKind kind) { + return kind == ConcurrentStartGC; +} + G1Policy::PauseKind G1Policy::young_gc_pause_kind() const { assert(!collector_state()->in_full_gc(), "must be"); - if (collector_state()->in_initial_mark_gc()) { + if (collector_state()->in_concurrent_start_gc()) { assert(!collector_state()->in_young_gc_before_mixed(), "must be"); - return InitialMarkGC; + return ConcurrentStartGC; } else if (collector_state()->in_young_gc_before_mixed()) { - assert(!collector_state()->in_initial_mark_gc(), "must be"); + assert(!collector_state()->in_concurrent_start_gc(), "must be"); return LastYoungGC; } else if (collector_state()->in_mixed_phase()) { - assert(!collector_state()->in_initial_mark_gc(), "must be"); + assert(!collector_state()->in_concurrent_start_gc(), "must be"); assert(!collector_state()->in_young_gc_before_mixed(), "must be"); return MixedGC; } else { - assert(!collector_state()->in_initial_mark_gc(), "must be"); + assert(!collector_state()->in_concurrent_start_gc(), "must be"); assert(!collector_state()->in_young_gc_before_mixed(), "must be"); return YoungOnlyGC; } @@ -1281,7 +1301,7 @@ if (kind != FullGC) { _mmu_tracker->add_pause(start, end); } - // Manage the mutator time tracking from initial mark to first mixed gc. + // Manage the mutator time tracking from concurrent start to first mixed gc. switch (kind) { case FullGC: abort_time_to_mixed_tracking(); @@ -1290,15 +1310,15 @@ case Remark: case YoungOnlyGC: case LastYoungGC: - _initial_mark_to_mixed.add_pause(end - start); + _concurrent_start_to_mixed.add_pause(end - start); break; - case InitialMarkGC: + case ConcurrentStartGC: if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) { - _initial_mark_to_mixed.record_initial_mark_end(end); + _concurrent_start_to_mixed.record_concurrent_start_end(end); } break; case MixedGC: - _initial_mark_to_mixed.record_mixed_gc_start(start); + _concurrent_start_to_mixed.record_mixed_gc_start(start); break; default: ShouldNotReachHere(); @@ -1306,7 +1326,7 @@ } void G1Policy::abort_time_to_mixed_tracking() { - _initial_mark_to_mixed.reset(); + _concurrent_start_to_mixed.reset(); } bool G1Policy::next_gc_should_be_mixed(const char* true_action_str, --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2020-06-30 15:59:27.867270041 +0200 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2020-06-30 15:59:27.775268939 +0200 @@ -28,7 +28,7 @@ #include "gc/g1/g1CollectorState.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1HeapRegionAttr.hpp" -#include "gc/g1/g1InitialMarkToMixedTimeTracker.hpp" +#include "gc/g1/g1ConcurrentStartToMixedTimeTracker.hpp" #include "gc/g1/g1MMUTracker.hpp" #include "gc/g1/g1OldGenAllocationTracker.hpp" #include "gc/g1/g1RemSetTrackingPolicy.hpp" @@ -106,7 +106,7 @@ // two GCs. G1OldGenAllocationTracker _old_gen_alloc_tracker; - G1InitialMarkToMixedTimeTracker _initial_mark_to_mixed; + G1ConcurrentStartToMixedTimeTracker _concurrent_start_to_mixed; bool should_update_surv_rate_group_predictors() { return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress(); @@ -268,11 +268,14 @@ YoungOnlyGC, MixedGC, LastYoungGC, - InitialMarkGC, + ConcurrentStartGC, Cleanup, Remark }; + static bool is_young_only_pause(PauseKind kind); + static bool is_last_young_pause(PauseKind kind); + static bool is_concurrent_start_pause(PauseKind kind); // Calculate PauseKind from internal state. PauseKind young_gc_pause_kind() const; // Record the given STW pause with the given start and end times (in s). @@ -358,14 +361,14 @@ // new cycle, as long as we are not already in one. It's best if it // is called during a safepoint when the test whether a cycle is in // progress or not is stable. - bool force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause); + bool force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause); // This is called at the very beginning of an evacuation pause (it // has to be the first thing that the pause does). If // initiate_conc_mark_if_possible() is true, and the concurrent // marking thread has completed its work during the previous cycle, - // it will set in_initial_mark_gc() to so that the pause does - // the initial-mark work and start a marking cycle. + // it will set in_concurrent_start_gc() to so that the pause does + // the concurrent start work and start a marking cycle. void decide_on_conc_mark_initiation(); uint young_list_desired_length() const { return _young_list_desired_length; } --- old/src/hotspot/share/gc/g1/g1RootClosures.cpp 2020-06-30 15:59:28.335275644 +0200 +++ new/src/hotspot/share/gc/g1/g1RootClosures.cpp 2020-06-30 15:59:28.239274496 +0200 @@ -47,17 +47,17 @@ CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; } }; -// Closures used during initial mark. +// Closures used during concurrent start. // The treatment of "weak" roots is selectable through the template parameter, // this is usually used to control unloading of classes and interned strings. template -class G1InitialMarkClosures : public G1EvacuationRootClosures { +class G1ConcurrentStartMarkClosures : public G1EvacuationRootClosures { G1SharedClosures _strong; G1SharedClosures _weak; public: - G1InitialMarkClosures(G1CollectedHeap* g1h, - G1ParScanThreadState* pss) : + G1ConcurrentStartMarkClosures(G1CollectedHeap* g1h, + G1ParScanThreadState* pss) : _strong(g1h, pss, /* process_only_dirty_klasses */ false), _weak(g1h, pss, /* process_only_dirty_klasses */ false) {} @@ -73,11 +73,11 @@ G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) { G1EvacuationRootClosures* res = NULL; - if (g1h->collector_state()->in_initial_mark_gc()) { + if (g1h->collector_state()->in_concurrent_start_gc()) { if (ClassUnloadingWithConcurrentMark) { - res = new G1InitialMarkClosures(g1h, pss); + res = new G1ConcurrentStartMarkClosures(g1h, pss); } else { - res = new G1InitialMarkClosures(g1h, pss); + res = new G1ConcurrentStartMarkClosures(g1h, pss); } } else { res = new G1EvacuationClosures(g1h, pss, g1h->collector_state()->in_young_only_phase()); --- old/src/hotspot/share/gc/g1/g1SharedClosures.hpp 2020-06-30 15:59:28.775280914 +0200 +++ new/src/hotspot/share/gc/g1/g1SharedClosures.hpp 2020-06-30 15:59:28.679279763 +0200 @@ -34,7 +34,7 @@ class G1SharedClosures { static bool needs_strong_processing() { // Request strong code root processing when G1MarkFromRoot is passed in during - // initial mark. + // concurrent start. return Mark == G1MarkFromRoot; } public: --- old/src/hotspot/share/gc/g1/g1VMOperations.cpp 2020-06-30 15:59:29.227286325 +0200 +++ new/src/hotspot/share/gc/g1/g1VMOperations.cpp 2020-06-30 15:59:29.127285128 +0200 @@ -57,11 +57,11 @@ bool VM_G1TryInitiateConcMark::doit_prologue() { bool result = VM_GC_Operation::doit_prologue(); // The prologue can fail for a couple of reasons. The first is that another GC - // got scheduled and prevented the scheduling of the initial mark GC. The + // got scheduled and prevented the scheduling of the concurrent start GC. The // second is that the GC locker may be active and the heap can't be expanded. - // In both cases we want to retry the GC so that the initial mark pause is + // In both cases we want to retry the GC so that the concurrent start pause is // actually scheduled. In the second case, however, we should stall until - // until the GC locker is no longer active and then retry the initial mark GC. + // until the GC locker is no longer active and then retry the concurrent start GC. if (!result) _transient_failure = true; return result; } @@ -80,15 +80,15 @@ // a young-only or mixed GC (depending on phase). For a user request // there's no point in even doing that much, so done. For some non-user // requests the alternative GC might still be needed. - } else if (!g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause)) { - // Failure to force the next GC pause to be an initial mark indicates + } else if (!g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause)) { + // Failure to force the next GC pause to be a concurrent start indicates // there is already a concurrent marking cycle in progress. Set flag // to notify the caller and return immediately. _cycle_already_in_progress = true; } else if ((_gc_cause != GCCause::_wb_breakpoint) && ConcurrentGCBreakpoints::is_controlled()) { // WhiteBox wants to be in control of concurrent cycles, so don't try to - // start one. This check is after the force_initial_mark_xxx so that a + // start one. This check is after the force_concurrent_start_xxx so that a // request will be remembered for a later partial collection, even though // we've rejected this request. _whitebox_attached = true; --- old/src/hotspot/share/gc/g1/g1YCTypes.hpp 2020-06-30 15:59:29.679291737 +0200 +++ new/src/hotspot/share/gc/g1/g1YCTypes.hpp 2020-06-30 15:59:29.583290589 +0200 @@ -29,7 +29,7 @@ enum G1YCType { Normal, - InitialMark, + ConcurrentStart, DuringMarkOrRebuild, Mixed, G1YCTypeEndSentinel @@ -40,7 +40,7 @@ static const char* to_string(G1YCType type) { switch(type) { case Normal: return "Normal"; - case InitialMark: return "Initial Mark"; + case ConcurrentStart: return "Concurrent Start"; case DuringMarkOrRebuild: return "During Mark"; case Mixed: return "Mixed"; default: ShouldNotReachHere(); return NULL; --- old/src/hotspot/share/gc/g1/g1_globals.hpp 2020-06-30 15:59:30.119297007 +0200 +++ new/src/hotspot/share/gc/g1/g1_globals.hpp 2020-06-30 15:59:30.027295905 +0200 @@ -52,7 +52,7 @@ "behavior.") \ \ experimental(size_t, G1AdaptiveIHOPNumInitialSamples, 3, \ - "How many completed time periods from initial mark to first " \ + "How many completed time periods from concurrent start to first " \ "mixed gc are required to use the input values for prediction " \ "of the optimal occupancy to start marking.") \ range(1, max_intx) \ @@ -272,8 +272,8 @@ "pauses when marking is in progress") \ \ develop(bool, G1EvacuationFailureALotDuringInitialMark, true, \ - "Force use of evacuation failure handling during initial mark " \ - "evacuation pauses") \ + "Force use of evacuation failure handling during concurrent " \ + "start evacuation pauses") \ \ develop(bool, G1EvacuationFailureALotDuringYoungGC, true, \ "Force use of evacuation failure handling during young " \ --- old/src/hotspot/share/gc/g1/heapRegion.cpp 2020-06-30 15:59:30.583302561 +0200 +++ new/src/hotspot/share/gc/g1/heapRegion.cpp 2020-06-30 15:59:30.483301363 +0200 @@ -285,15 +285,15 @@ used()); } -void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, +void HeapRegion::note_self_forwarding_removal_start(bool during_concurrent_start, bool during_conc_mark) { // We always recreate the prev marking info and we'll explicitly // mark all objects we find to be self-forwarded on the prev // bitmap. So all objects need to be below PTAMS. _prev_marked_bytes = 0; - if (during_initial_mark) { - // During initial-mark, we'll also explicitly mark all objects + if (during_concurrent_start) { + // During concurrent start, we'll also explicitly mark all objects // we find to be self-forwarded on the next bitmap. So all // objects need to be below NTAMS. _next_top_at_mark_start = top(); --- old/src/hotspot/share/gc/g1/heapRegion.hpp 2020-06-30 15:59:31.039308019 +0200 +++ new/src/hotspot/share/gc/g1/heapRegion.hpp 2020-06-30 15:59:30.939306822 +0200 @@ -511,7 +511,7 @@ // Notify the region that we are about to start processing // self-forwarded objects during evac failure handling. - void note_self_forwarding_removal_start(bool during_initial_mark, + void note_self_forwarding_removal_start(bool during_concurrent_start, bool during_conc_mark); // Notify the region that we have finished processing self-forwarded --- old/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-06-30 15:59:31.507313623 +0200 +++ new/src/hotspot/share/gc/shared/gcVMOperations.cpp 2020-06-30 15:59:31.407312426 +0200 @@ -198,7 +198,7 @@ // At this point we are supposed to start a concurrent cycle. We // will do so if one is not already in progress. - bool should_start = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause); + bool should_start = g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause); if (should_start) { double pause_target = g1h->policy()->max_pause_time_ms(); --- old/src/hotspot/share/jfr/metadata/metadata.xml 2020-06-30 15:59:31.959319033 +0200 +++ new/src/hotspot/share/jfr/metadata/metadata.xml 2020-06-30 15:59:31.859317836 +0200 @@ -368,7 +368,7 @@ description="Mutator allocation during mutator operation in the most recent interval" /> - + + description="Current predicted time from the end of the last concurrent start to the first mixed GC" /> --- old/test/hotspot/jtreg/gc/g1/TestEagerReclaimHumongousRegionsClearMarkBits.java 2020-06-30 15:59:32.447324875 +0200 +++ new/test/hotspot/jtreg/gc/g1/TestEagerReclaimHumongousRegionsClearMarkBits.java 2020-06-30 15:59:32.343323631 +0200 @@ -124,7 +124,7 @@ "-Xmx128M", "-Xmn2M", "-XX:G1HeapRegionSize=1M", - "-XX:InitiatingHeapOccupancyPercent=0", // Want to have as much as possible initial marks. + "-XX:InitiatingHeapOccupancyPercent=0", // Want to have as much as possible mark cycles. "-Xlog:gc", "-XX:+UnlockDiagnosticVMOptions", "-XX:+VerifyAfterGC", --- old/test/hotspot/jtreg/gc/g1/TestRemsetLoggingTools.java 2020-06-30 15:59:32.895330238 +0200 +++ new/test/hotspot/jtreg/gc/g1/TestRemsetLoggingTools.java 2020-06-30 15:59:32.795329041 +0200 @@ -63,7 +63,7 @@ "-Xms20m", "-Xmx20m", "-XX:ParallelGCThreads=1", - "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to initial marking + "-XX:InitiatingHeapOccupancyPercent=100", // we don't want the additional GCs due to marking "-XX:+UnlockDiagnosticVMOptions", "-XX:G1HeapRegionSize=1M", }; --- old/src/hotspot/share/gc/g1/g1InitialMarkToMixedTimeTracker.hpp 2020-06-30 15:59:33.383336078 +0200 +++ /dev/null 2020-06-30 09:13:23.392971575 +0200 @@ -1,87 +0,0 @@ -/* - * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP -#define SHARE_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP - -#include "utilities/globalDefinitions.hpp" -#include "utilities/debug.hpp" - -// Used to track time from the end of initial mark to the first mixed GC. -// After calling the initial mark/mixed gc notifications, the result can be -// obtained in last_marking_time() once, after which the tracking resets. -// Any pauses recorded by add_pause() will be subtracted from that results. -class G1InitialMarkToMixedTimeTracker { -private: - bool _active; - double _initial_mark_end_time; - double _mixed_start_time; - double _total_pause_time; - - double wall_time() const { - return _mixed_start_time - _initial_mark_end_time; - } -public: - G1InitialMarkToMixedTimeTracker() { reset(); } - - // Record initial mark pause end, starting the time tracking. - void record_initial_mark_end(double end_time) { - assert(!_active, "Initial mark out of order."); - _initial_mark_end_time = end_time; - _active = true; - } - - // Record the first mixed gc pause start, ending the time tracking. - void record_mixed_gc_start(double start_time) { - if (_active) { - _mixed_start_time = start_time; - _active = false; - } - } - - double last_marking_time() { - assert(has_result(), "Do not have all measurements yet."); - double result = (_mixed_start_time - _initial_mark_end_time) - _total_pause_time; - reset(); - return result; - } - - void reset() { - _active = false; - _total_pause_time = 0.0; - _initial_mark_end_time = -1.0; - _mixed_start_time = -1.0; - } - - void add_pause(double time) { - if (_active) { - _total_pause_time += time; - } - } - - // Returns whether we have a result that can be retrieved. - bool has_result() const { return _mixed_start_time > 0.0 && _initial_mark_end_time > 0.0; } -}; - -#endif // SHARE_GC_G1_G1INITIALMARKTOMIXEDTIMETRACKER_HPP --- /dev/null 2020-06-30 09:13:23.392971575 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentStartToMixedTimeTracker.hpp 2020-06-30 15:59:33.239334355 +0200 @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_G1_G1CONCURRENTSTARTTOMIXEDTIMETRACKER_HPP +#define SHARE_GC_G1_G1CONCURRENTSTARTTOMIXEDTIMETRACKER_HPP + +#include "utilities/globalDefinitions.hpp" +#include "utilities/debug.hpp" + +// Used to track time from the end of concurrent start to the first mixed GC. +// After calling the concurrent start/mixed gc notifications, the result can be +// obtained in last_marking_time() once, after which the tracking resets. +// Any pauses recorded by add_pause() will be subtracted from that results. +class G1ConcurrentStartToMixedTimeTracker { +private: + bool _active; + double _concurrent_start_end_time; + double _mixed_start_time; + double _total_pause_time; + + double wall_time() const { + return _mixed_start_time - _concurrent_start_end_time; + } +public: + G1ConcurrentStartToMixedTimeTracker() { reset(); } + + // Record concurrent start pause end, starting the time tracking. + void record_concurrent_start_end(double end_time) { + assert(!_active, "Concurrent start out of order."); + _concurrent_start_end_time = end_time; + _active = true; + } + + // Record the first mixed gc pause start, ending the time tracking. + void record_mixed_gc_start(double start_time) { + if (_active) { + _mixed_start_time = start_time; + _active = false; + } + } + + double last_marking_time() { + assert(has_result(), "Do not have all measurements yet."); + double result = (_mixed_start_time - _concurrent_start_end_time) - _total_pause_time; + reset(); + return result; + } + + void reset() { + _active = false; + _total_pause_time = 0.0; + _concurrent_start_end_time = -1.0; + _mixed_start_time = -1.0; + } + + void add_pause(double time) { + if (_active) { + _total_pause_time += time; + } + } + + // Returns whether we have a result that can be retrieved. + bool has_result() const { return _mixed_start_time > 0.0 && _concurrent_start_end_time > 0.0; } +}; + +#endif // SHARE_GC_G1_G1CONCURRENTSTARTTOMIXEDTIMETRACKER_HPP