--- old/src/hotspot/share/gc/g1/g1Analytics.cpp 2019-08-23 19:41:51.271172895 -0400 +++ new/src/hotspot/share/gc/g1/g1Analytics.cpp 2019-08-23 19:41:51.071162143 -0400 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,7 +38,7 @@ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; -static double cost_per_log_buffer_entry_ms_defaults[] = { +static double cost_per_logged_card_ms_defaults[] = { 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015 }; @@ -77,7 +77,7 @@ _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _prev_collection_pause_end_ms(0.0), _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)), - _cost_per_log_buffer_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)), + _cost_per_logged_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)), _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)), _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)), @@ -101,7 +101,7 @@ int index = MIN2(ParallelGCThreads - 1, 7u); _rs_length_diff_seq->add(rs_length_diff_defaults[index]); - _cost_per_log_buffer_entry_ms_seq->add(cost_per_log_buffer_entry_ms_defaults[index]); + _cost_per_logged_card_ms_seq->add(cost_per_logged_card_ms_defaults[index]); _cost_scan_hcc_seq->add(0.0); _young_cards_per_entry_ratio_seq->add(young_cards_per_entry_ratio_defaults[index]); _young_only_cost_per_remset_card_ms_seq->add(young_only_cost_per_remset_card_ms_defaults[index]); @@ -158,8 +158,8 @@ (pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms; } -void G1Analytics::report_cost_per_log_buffer_entry_ms(double cost_per_log_buffer_entry_ms) { - _cost_per_log_buffer_entry_ms_seq->add(cost_per_log_buffer_entry_ms); +void G1Analytics::report_cost_per_logged_card_ms(double cost_per_logged_card_ms) { + _cost_per_logged_card_ms_seq->add(cost_per_logged_card_ms); } void G1Analytics::report_cost_scan_hcc(double cost_scan_hcc) { @@ -222,8 +222,8 @@ return get_new_prediction(_alloc_rate_ms_seq); } -double G1Analytics::predict_cost_per_log_buffer_entry_ms() const { - return get_new_prediction(_cost_per_log_buffer_entry_ms_seq); +double G1Analytics::predict_cost_per_logged_card_ms() const { + return get_new_prediction(_cost_per_logged_card_ms_seq); } double G1Analytics::predict_scan_hcc_ms() const { @@ -231,7 +231,7 @@ } double G1Analytics::predict_rs_update_time_ms(size_t pending_cards) const { - return pending_cards * predict_cost_per_log_buffer_entry_ms() + predict_scan_hcc_ms(); + return pending_cards * predict_cost_per_logged_card_ms() + predict_scan_hcc_ms(); } double G1Analytics::predict_young_cards_per_entry_ratio() const { --- old/src/hotspot/share/gc/g1/g1Analytics.hpp 2019-08-23 19:41:52.191222355 -0400 +++ new/src/hotspot/share/gc/g1/g1Analytics.hpp 2019-08-23 19:41:51.983211173 -0400 @@ -46,7 +46,7 @@ double _prev_collection_pause_end_ms; TruncatedSeq* _rs_length_diff_seq; - TruncatedSeq* _cost_per_log_buffer_entry_ms_seq; + TruncatedSeq* _cost_per_logged_card_ms_seq; TruncatedSeq* _cost_scan_hcc_seq; TruncatedSeq* _young_cards_per_entry_ratio_seq; TruncatedSeq* _mixed_cards_per_entry_ratio_seq; @@ -99,7 +99,7 @@ void report_concurrent_mark_remark_times_ms(double ms); void report_concurrent_mark_cleanup_times_ms(double ms); void report_alloc_rate_ms(double alloc_rate); - void report_cost_per_log_buffer_entry_ms(double cost_per_log_buffer_entry_ms); + void report_cost_per_logged_card_ms(double cost_per_logged_card_ms); void report_cost_scan_hcc(double cost_scan_hcc); void report_cost_per_remset_card_ms(double cost_per_remset_card_ms, bool for_young_gc); void report_cards_per_entry_ratio(double cards_per_entry_ratio, bool for_young_gc); @@ -116,7 +116,7 @@ double predict_alloc_rate_ms() const; int num_alloc_rate_ms() const; - double predict_cost_per_log_buffer_entry_ms() const; + double predict_cost_per_logged_card_ms() const; double predict_scan_hcc_ms() const; --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2019-08-23 19:41:53.111271814 -0400 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2019-08-23 19:41:52.899260417 -0400 @@ -1080,7 +1080,7 @@ // Discard all remembered set updates. G1BarrierSet::dirty_card_queue_set().abandon_logs(); - assert(G1BarrierSet::dirty_card_queue_set().num_completed_buffers() == 0, + assert(G1BarrierSet::dirty_card_queue_set().num_cards() == 0, "DCQS should be empty"); } @@ -1683,7 +1683,7 @@ G1SATBProcessCompletedThreshold, G1SATBBufferEnqueueingThresholdPercent); - // process_completed_buffers_threshold and max_completed_buffers are updated + // process_cards_threshold and max_cards are updated // later, based on the concurrent refinement object. G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon, &bs->dirty_card_queue_buffer_allocator(), @@ -1812,8 +1812,8 @@ { G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); - dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone()); - dcqs.set_max_completed_buffers(concurrent_refine()->red_zone()); + dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone()); + dcqs.set_max_cards(concurrent_refine()->red_zone()); } // Here we allocate the dummy HeapRegion that is required by the @@ -1952,7 +1952,7 @@ while (dcqs.apply_closure_during_gc(cl, worker_i)) { n_completed_buffers++; } - assert(dcqs.num_completed_buffers() == 0, "Completed buffers exist!"); + assert(dcqs.num_cards() == 0, "Completed buffers exist!"); phase_times()->record_thread_work_item(G1GCPhaseTimes::MergeLB, worker_i, n_completed_buffers, G1GCPhaseTimes::MergeLBProcessedBuffers); } @@ -2614,9 +2614,9 @@ Threads::threads_do(&count_from_threads); G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); - dcqs.verify_num_entries_in_completed_buffers(); + dcqs.verify_num_cards(); - return dcqs.num_entries_in_completed_buffers() + count_from_threads._cards; + return dcqs.num_cards() + count_from_threads._cards; } bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const { --- old/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp 2019-08-23 19:41:54.119326005 -0400 +++ new/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp 2019-08-23 19:41:53.907314608 -0400 @@ -145,7 +145,7 @@ STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2)); const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort); const size_t max_green_zone = max_yellow_zone / 2; -const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_buffers. +const size_t max_red_zone = INT_MAX; // For dcqs.set_max_cards. STATIC_ASSERT(max_yellow_zone <= max_red_zone); // Range check assertions for green zone values. @@ -232,8 +232,12 @@ return _thread_control.initialize(this, max_num_threads()); } +static size_t buffers_to_cards(size_t value) { + return value * G1UpdateBufferSize; +} + static size_t calc_min_yellow_zone_size() { - size_t step = G1ConcRefinementThresholdStep; + size_t step = buffers_to_cards(G1ConcRefinementThresholdStep); uint n_workers = G1ConcurrentRefine::max_num_threads(); if ((max_yellow_zone / step) < n_workers) { return max_yellow_zone; @@ -247,11 +251,12 @@ if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) { green = ParallelGCThreads; } + green = buffers_to_cards(green); return MIN2(green, max_green_zone); } static size_t calc_init_yellow_zone(size_t green, size_t min_size) { - size_t config = G1ConcRefinementYellowZone; + size_t config = buffers_to_cards(G1ConcRefinementYellowZone); size_t size = 0; if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) { size = green * 2; @@ -266,7 +271,7 @@ static size_t calc_init_red_zone(size_t green, size_t yellow) { size_t size = yellow - green; if (!FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) { - size_t config = G1ConcRefinementRedZone; + size_t config = buffers_to_cards(G1ConcRefinementRedZone); if (yellow < config) { size = MAX2(size, config - yellow); } @@ -322,18 +327,18 @@ } static size_t calc_new_green_zone(size_t green, - double log_buffer_scan_time, - size_t processed_log_buffers, + double logged_cards_scan_time, + size_t processed_logged_cards, double goal_ms) { // Adjust green zone based on whether we're meeting the time goal. // Limit to max_green_zone. const double inc_k = 1.1, dec_k = 0.9; - if (log_buffer_scan_time > goal_ms) { + if (logged_cards_scan_time > goal_ms) { if (green > 0) { green = static_cast(green * dec_k); } - } else if (log_buffer_scan_time < goal_ms && - processed_log_buffers > green) { + } else if (logged_cards_scan_time < goal_ms && + processed_logged_cards > green) { green = static_cast(MAX2(green * inc_k, green + 1.0)); green = MIN2(green, max_green_zone); } @@ -350,20 +355,20 @@ return MIN2(yellow + (yellow - green), max_red_zone); } -void G1ConcurrentRefine::update_zones(double log_buffer_scan_time, - size_t processed_log_buffers, +void G1ConcurrentRefine::update_zones(double logged_cards_scan_time, + size_t processed_logged_cards, double goal_ms) { log_trace( CTRL_TAGS )("Updating Refinement Zones: " - "log buffer scan time: %.3fms, " - "processed buffers: " SIZE_FORMAT ", " + "logged cards scan time: %.3fms, " + "processed cards: " SIZE_FORMAT ", " "goal time: %.3fms", - log_buffer_scan_time, - processed_log_buffers, + logged_cards_scan_time, + processed_logged_cards, goal_ms); _green_zone = calc_new_green_zone(_green_zone, - log_buffer_scan_time, - processed_log_buffers, + logged_cards_scan_time, + processed_logged_cards, goal_ms); _yellow_zone = calc_new_yellow_zone(_green_zone, _min_yellow_zone_size); _red_zone = calc_new_red_zone(_green_zone, _yellow_zone); @@ -376,33 +381,33 @@ _green_zone, _yellow_zone, _red_zone); } -void G1ConcurrentRefine::adjust(double log_buffer_scan_time, - size_t processed_log_buffers, +void G1ConcurrentRefine::adjust(double logged_cards_scan_time, + size_t processed_logged_cards, double goal_ms) { G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); if (G1UseAdaptiveConcRefinement) { - update_zones(log_buffer_scan_time, processed_log_buffers, goal_ms); + update_zones(logged_cards_scan_time, processed_logged_cards, goal_ms); // Change the barrier params if (max_num_threads() == 0) { // Disable dcqs notification when there are no threads to notify. - dcqs.set_process_completed_buffers_threshold(G1DirtyCardQueueSet::ProcessCompletedBuffersThresholdNever); + dcqs.set_process_cards_threshold(G1DirtyCardQueueSet::ProcessCardsThresholdNever); } else { // Worker 0 is the primary; wakeup is via dcqs notification. STATIC_ASSERT(max_yellow_zone <= INT_MAX); size_t activate = activation_threshold(0); - dcqs.set_process_completed_buffers_threshold(activate); + dcqs.set_process_cards_threshold(activate); } - dcqs.set_max_completed_buffers(red_zone()); + dcqs.set_max_cards(red_zone()); } - size_t curr_queue_size = dcqs.num_completed_buffers(); - if ((dcqs.max_completed_buffers() > 0) && + size_t curr_queue_size = dcqs.num_cards(); + if ((dcqs.max_cards() > 0) && (curr_queue_size >= yellow_zone())) { - dcqs.set_completed_buffers_padding(curr_queue_size); + dcqs.set_max_cards_padding(curr_queue_size); } else { - dcqs.set_completed_buffers_padding(0); + dcqs.set_max_cards_padding(0); } dcqs.notify_if_necessary(); } @@ -430,16 +435,16 @@ bool G1ConcurrentRefine::do_refinement_step(uint worker_id) { G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); - size_t curr_buffer_num = dcqs.num_completed_buffers(); - // If the number of the buffers falls down into the yellow zone, + size_t curr_cards = dcqs.num_cards(); + // If the number of the cards falls down into the yellow zone, // that means that the transition period after the evacuation pause has ended. // Since the value written to the DCQS is the same for all threads, there is no // need to synchronize. - if (dcqs.completed_buffers_padding() > 0 && curr_buffer_num <= yellow_zone()) { - dcqs.set_completed_buffers_padding(0); + if (dcqs.max_cards_padding() > 0 && curr_cards <= yellow_zone()) { + dcqs.set_max_cards_padding(0); } - maybe_activate_more_threads(worker_id, curr_buffer_num); + maybe_activate_more_threads(worker_id, curr_cards); // Process the next buffer, if there are enough left. return dcqs.refine_completed_buffer_concurrently(worker_id + worker_id_offset(), --- old/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp 2019-08-23 19:41:55.039375464 -0400 +++ new/src/hotspot/share/gc/g1/g1ConcurrentRefine.hpp 2019-08-23 19:41:54.827364067 -0400 @@ -60,28 +60,29 @@ void stop(); }; -// Controls refinement threads and their activation based on the number of completed -// buffers currently available in the global dirty card queue. -// Refinement threads pick work from the queue based on these thresholds. They are activated -// gradually based on the amount of work to do. +// Controls refinement threads and their activation based on the number of +// cards currently available in the global dirty card queue. +// Refinement threads obtain work from the queue (a buffer at a time) based +// on these thresholds. They are activated gradually based on the amount of +// work to do. // Refinement thread n activates thread n+1 if the instance of this class determines there // is enough work available. Threads deactivate themselves if the current amount of -// completed buffers falls below their individual threshold. +// available cards falls below their individual threshold. class G1ConcurrentRefine : public CHeapObj { G1ConcurrentRefineThreadControl _thread_control; /* * The value of the completed dirty card queue length falls into one of 3 zones: * green, yellow, red. If the value is in [0, green) nothing is - * done, the buffers are left unprocessed to enable the caching effect of the + * done, the buffered cards are left unprocessed to enable the caching effect of the * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement * threads are gradually activated. In [yellow, red) all threads are * running. If the length becomes red (max queue length) the mutators start - * processing the buffers. + * processing cards too. * * There are some interesting cases (when G1UseAdaptiveConcRefinement * is turned off): * 1) green = yellow = red = 0. In this case the mutator will process all - * buffers. Except for those that are created by the deferred updates + * cards. Except for those that are created by the deferred updates * machinery during a collection. * 2) green = 0. Means no caching. Can be a good way to minimize the * amount of time spent updating remembered sets during a collection. @@ -97,12 +98,12 @@ size_t min_yellow_zone_size); // Update green/yellow/red zone values based on how well goals are being met. - void update_zones(double log_buffer_scan_time, - size_t processed_log_buffers, + void update_zones(double logged_cards_scan_time, + size_t processed_logged_cards, double goal_ms); static uint worker_id_offset(); - void maybe_activate_more_threads(uint worker_id, size_t num_cur_buffers); + void maybe_activate_more_threads(uint worker_id, size_t num_cur_cards); jint initialize(); public: @@ -115,8 +116,9 @@ void stop(); // Adjust refinement thresholds based on work done during the pause and the goal time. - void adjust(double log_buffer_scan_time, size_t processed_log_buffers, double goal_ms); + void adjust(double logged_cards_scan_time, size_t processed_logged_cards, double goal_ms); + // Cards in the dirty card queue set. size_t activation_threshold(uint worker_id) const; size_t deactivation_threshold(uint worker_id) const; // Perform a single refinement step. Called by the refinement threads when woken up. @@ -130,6 +132,7 @@ void print_threads_on(outputStream* st) const; + // Cards in the dirty card queue set. size_t green_zone() const { return _green_zone; } size_t yellow_zone() const { return _yellow_zone; } size_t red_zone() const { return _red_zone; } --- old/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp 2019-08-23 19:41:55.947424279 -0400 +++ new/src/hotspot/share/gc/g1/g1ConcurrentRefineThread.cpp 2019-08-23 19:41:55.731412666 -0400 @@ -104,7 +104,7 @@ size_t buffers_processed = 0; log_debug(gc, refine)("Activated worker %d, on threshold: " SIZE_FORMAT ", current: " SIZE_FORMAT, _worker_id, _cr->activation_threshold(_worker_id), - G1BarrierSet::dirty_card_queue_set().num_completed_buffers()); + G1BarrierSet::dirty_card_queue_set().num_cards()); { SuspendibleThreadSetJoiner sts_join; @@ -124,9 +124,10 @@ deactivate(); log_debug(gc, refine)("Deactivated worker %d, off threshold: " SIZE_FORMAT - ", current: " SIZE_FORMAT ", processed: " SIZE_FORMAT, + ", current: " SIZE_FORMAT ", buffers processed: " + SIZE_FORMAT, _worker_id, _cr->deactivation_threshold(_worker_id), - G1BarrierSet::dirty_card_queue_set().num_completed_buffers(), + G1BarrierSet::dirty_card_queue_set().num_cards(), buffers_processed); if (os::supports_vtime()) { --- old/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp 2019-08-23 19:41:56.835472018 -0400 +++ new/src/hotspot/share/gc/g1/g1DirtyCardQueue.cpp 2019-08-23 19:41:56.627460836 -0400 @@ -84,12 +84,12 @@ _cbl_mon(NULL), _completed_buffers_head(NULL), _completed_buffers_tail(NULL), - _num_entries_in_completed_buffers(0), - _process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever), + _num_cards(0), + _process_cards_threshold(ProcessCardsThresholdNever), _process_completed_buffers(false), _notify_when_complete(notify_when_complete), - _max_completed_buffers(MaxCompletedBuffersUnlimited), - _completed_buffers_padding(0), + _max_cards(MaxCardsUnlimited), + _max_cards_padding(0), _free_ids(NULL), _processed_buffers_mut(0), _processed_buffers_rs_thread(0) @@ -133,53 +133,53 @@ _completed_buffers_tail->set_next(cbn); _completed_buffers_tail = cbn; } - _num_entries_in_completed_buffers += buffer_size() - cbn->index(); + _num_cards += buffer_size() - cbn->index(); if (!process_completed_buffers() && - (num_completed_buffers() > process_completed_buffers_threshold())) { + (num_cards() > process_cards_threshold())) { set_process_completed_buffers(true); if (_notify_when_complete) { _cbl_mon->notify_all(); } } - verify_num_entries_in_completed_buffers(); + verify_num_cards(); } BufferNode* G1DirtyCardQueueSet::get_completed_buffer(size_t stop_at) { MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); - if (num_completed_buffers() <= stop_at) { + if (num_cards() <= stop_at) { return NULL; } - assert(num_completed_buffers() > 0, "invariant"); + assert(num_cards() > 0, "invariant"); assert(_completed_buffers_head != NULL, "invariant"); assert(_completed_buffers_tail != NULL, "invariant"); BufferNode* bn = _completed_buffers_head; - _num_entries_in_completed_buffers -= buffer_size() - bn->index(); + _num_cards -= buffer_size() - bn->index(); _completed_buffers_head = bn->next(); if (_completed_buffers_head == NULL) { - assert(num_completed_buffers() == 0, "invariant"); + assert(num_cards() == 0, "invariant"); _completed_buffers_tail = NULL; set_process_completed_buffers(false); } - verify_num_entries_in_completed_buffers(); + verify_num_cards(); bn->set_next(NULL); return bn; } #ifdef ASSERT -void G1DirtyCardQueueSet::verify_num_entries_in_completed_buffers() const { +void G1DirtyCardQueueSet::verify_num_cards() const { size_t actual = 0; BufferNode* cur = _completed_buffers_head; while (cur != NULL) { actual += buffer_size() - cur->index(); cur = cur->next(); } - assert(actual == _num_entries_in_completed_buffers, + assert(actual == _num_cards, "Num entries in completed buffers should be " SIZE_FORMAT " but are " SIZE_FORMAT, - _num_entries_in_completed_buffers, actual); + _num_cards, actual); } #endif @@ -190,7 +190,7 @@ buffers_to_delete = _completed_buffers_head; _completed_buffers_head = NULL; _completed_buffers_tail = NULL; - _num_entries_in_completed_buffers = 0; + _num_cards = 0; set_process_completed_buffers(false); } while (buffers_to_delete != NULL) { @@ -203,7 +203,7 @@ void G1DirtyCardQueueSet::notify_if_necessary() { MutexLocker x(_cbl_mon, Mutex::_no_safepoint_check_flag); - if (num_completed_buffers() > process_completed_buffers_threshold()) { + if (num_cards() > process_cards_threshold()) { set_process_completed_buffers(true); if (_notify_when_complete) _cbl_mon->notify(); @@ -228,12 +228,12 @@ _completed_buffers_tail->set_next(from._head); _completed_buffers_tail = from._tail; } - _num_entries_in_completed_buffers += from._entry_count; + _num_cards += from._entry_count; assert(_completed_buffers_head == NULL && _completed_buffers_tail == NULL || _completed_buffers_head != NULL && _completed_buffers_tail != NULL, "Sanity"); - verify_num_entries_in_completed_buffers(); + verify_num_cards(); } bool G1DirtyCardQueueSet::apply_closure_to_buffer(G1CardTableEntryClosure* cl, @@ -277,9 +277,8 @@ // thread do the processing itself. We don't lock to access // buffer count or padding; it is fine to be imprecise here. The // add of padding could overflow, which is treated as unlimited. - size_t max_buffers = max_completed_buffers(); - size_t limit = max_buffers + completed_buffers_padding(); - if ((num_completed_buffers() > limit) && (limit >= max_buffers)) { + size_t limit = max_cards() + max_cards_padding(); + if ((num_cards() > limit) && (limit >= max_cards())) { if (mut_process_buffer(node)) { return true; } @@ -358,8 +357,8 @@ // the global list of logs. Temporarily turn off the limit on the number // of outstanding buffers. assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); - size_t old_limit = max_completed_buffers(); - set_max_completed_buffers(MaxCompletedBuffersUnlimited); + size_t old_limit = max_cards(); + set_max_cards(MaxCardsUnlimited); struct ConcatenateThreadLogClosure : public ThreadClosure { virtual void do_thread(Thread* t) { @@ -372,5 +371,5 @@ Threads::threads_do(&closure); G1BarrierSet::shared_dirty_card_queue().flush(); - set_max_completed_buffers(old_limit); + set_max_cards(old_limit); } --- old/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp 2019-08-23 19:41:57.775522552 -0400 +++ new/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp 2019-08-23 19:41:57.563511155 -0400 @@ -66,14 +66,14 @@ }; class G1DirtyCardQueueSet: public PtrQueueSet { - Monitor* _cbl_mon; // Protects the fields below. + Monitor* _cbl_mon; // Protects the list and count members. BufferNode* _completed_buffers_head; BufferNode* _completed_buffers_tail; - // Number of actual entries in the list of completed buffers. - volatile size_t _num_entries_in_completed_buffers; + // Number of actual cards in the list of completed buffers. + volatile size_t _num_cards; - size_t _process_completed_buffers_threshold; + size_t _process_cards_threshold; volatile bool _process_completed_buffers; // If true, notify_all on _cbl_mon when the threshold is reached. @@ -112,11 +112,11 @@ bool mut_process_buffer(BufferNode* node); - // If the queue contains more buffers than configured here, the - // mutator must start doing some of the concurrent refinement work, - size_t _max_completed_buffers; - size_t _completed_buffers_padding; - static const size_t MaxCompletedBuffersUnlimited = SIZE_MAX; + // If the queue contains more cards than configured here, the + // mutator must start doing some of the concurrent refinement work. + size_t _max_cards; + size_t _max_cards_padding; + static const size_t MaxCardsUnlimited = SIZE_MAX; G1FreeIdSet* _free_ids; @@ -150,31 +150,26 @@ // return a completed buffer from the list. Otherwise, return NULL. BufferNode* get_completed_buffer(size_t stop_at = 0); - // The number of buffers in the list. Derived as an approximation from the number - // of entries in the buffers. Racy. - size_t num_completed_buffers() const { - return (num_entries_in_completed_buffers() + buffer_size() - 1) / buffer_size(); - } - // The number of entries in completed buffers. Read without synchronization. - size_t num_entries_in_completed_buffers() const { return _num_entries_in_completed_buffers; } + // The number of cards in completed buffers. Read without synchronization. + size_t num_cards() const { return _num_cards; } - // Verify that _num_entries_in_completed_buffers is equal to the sum of actual entries + // Verify that _num_cards is equal to the sum of actual cards // in the completed buffers. - void verify_num_entries_in_completed_buffers() const NOT_DEBUG_RETURN; + void verify_num_cards() const NOT_DEBUG_RETURN; bool process_completed_buffers() { return _process_completed_buffers; } void set_process_completed_buffers(bool x) { _process_completed_buffers = x; } - // Get/Set the number of completed buffers that triggers log processing. - // Log processing should be done when the number of buffers exceeds the + // Get/Set the number of cards that triggers log processing. + // Log processing should be done when the number of cards exceeds the // threshold. - void set_process_completed_buffers_threshold(size_t sz) { - _process_completed_buffers_threshold = sz; + void set_process_cards_threshold(size_t sz) { + _process_cards_threshold = sz; } - size_t process_completed_buffers_threshold() const { - return _process_completed_buffers_threshold; + size_t process_cards_threshold() const { + return _process_cards_threshold; } - static const size_t ProcessCompletedBuffersThresholdNever = SIZE_MAX; + static const size_t ProcessCardsThresholdNever = SIZE_MAX; // Notify the consumer if the number of buffers crossed the threshold void notify_if_necessary(); @@ -196,18 +191,18 @@ // If any threads have partial logs, add them to the global list of logs. void concatenate_logs(); - void set_max_completed_buffers(size_t m) { - _max_completed_buffers = m; + void set_max_cards(size_t m) { + _max_cards = m; } - size_t max_completed_buffers() const { - return _max_completed_buffers; + size_t max_cards() const { + return _max_cards; } - void set_completed_buffers_padding(size_t padding) { - _completed_buffers_padding = padding; + void set_max_cards_padding(size_t padding) { + _max_cards_padding = padding; } - size_t completed_buffers_padding() const { - return _completed_buffers_padding; + size_t max_cards_padding() const { + return _max_cards_padding; } jint processed_buffers_mut() { --- old/src/hotspot/share/gc/g1/g1Policy.cpp 2019-08-23 19:41:58.703572442 -0400 +++ new/src/hotspot/share/gc/g1/g1Policy.cpp 2019-08-23 19:41:58.491561045 -0400 @@ -572,16 +572,16 @@ return result; } -double G1Policy::log_buffer_processing_time() const { +double G1Policy::logged_cards_processing_time() const { double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR); - size_t log_buffer_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); + size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) + phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards); // This may happen if there are duplicate cards in different log buffers. - if (log_buffer_dirty_cards > scan_heap_roots_cards) { + if (logged_dirty_cards > scan_heap_roots_cards) { return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB); } - return (all_cards_processing_time * log_buffer_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB); + return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB); } // Anything below that is considered to be zero @@ -662,11 +662,11 @@ double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::MergeHCC) : 0.0; if (update_stats) { - double cost_per_log_buffer_entry = 0.0; - size_t const pending_log_buffer_entries = p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); - if (pending_log_buffer_entries > 0) { - cost_per_log_buffer_entry = log_buffer_processing_time() / pending_log_buffer_entries; - _analytics->report_cost_per_log_buffer_entry_ms(cost_per_log_buffer_entry); + double cost_per_logged_card = 0.0; + size_t const pending_logged_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards); + if (pending_logged_cards > 0) { + cost_per_logged_card = logged_cards_processing_time() / pending_logged_cards; + _analytics->report_cost_per_logged_card_ms(cost_per_logged_card); } _analytics->report_cost_scan_hcc(scan_hcc_time_ms); @@ -676,8 +676,8 @@ // There might have been duplicate log buffer entries in the queues which could // increase this value beyond the cards scanned. In this case attribute all cards // to the log buffers. - if (pending_log_buffer_entries <= total_cards_scanned) { - remset_cards_scanned = total_cards_scanned - pending_log_buffer_entries; + if (pending_logged_cards <= total_cards_scanned) { + remset_cards_scanned = total_cards_scanned - pending_logged_cards; } double cost_per_remset_card_ms = 0.0; @@ -786,26 +786,26 @@ } // Note that _mmu_tracker->max_gc_time() returns the time in seconds. - double scan_log_buffer_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; + double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; - if (scan_log_buffer_time_goal_ms < scan_hcc_time_ms) { + if (scan_logged_cards_time_goal_ms < scan_hcc_time_ms) { log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)." - "Log Buffer Scan time goal: %1.2fms Scan HCC time: %1.2fms", - scan_log_buffer_time_goal_ms, scan_hcc_time_ms); + "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms", + scan_logged_cards_time_goal_ms, scan_hcc_time_ms); - scan_log_buffer_time_goal_ms = 0; + scan_logged_cards_time_goal_ms = 0; } else { - scan_log_buffer_time_goal_ms -= scan_hcc_time_ms; + scan_logged_cards_time_goal_ms -= scan_hcc_time_ms; } - double const log_buffer_time = log_buffer_processing_time(); + double const logged_cards_time = logged_cards_processing_time(); - log_debug(gc, ergo, refine)("Concurrent refinement times: Log Buffer Scan time goal: %1.2fms Log Buffer Scan time: %1.2fms HCC time: %1.2fms", - scan_log_buffer_time_goal_ms, log_buffer_time, scan_hcc_time_ms); + log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms", + scan_logged_cards_time_goal_ms, logged_cards_time, scan_hcc_time_ms); - _g1h->concurrent_refine()->adjust(log_buffer_time, - phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBProcessedBuffers), - scan_log_buffer_time_goal_ms); + _g1h->concurrent_refine()->adjust(logged_cards_time, + phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards), + scan_logged_cards_time_goal_ms); } G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){ --- old/src/hotspot/share/gc/g1/g1Policy.hpp 2019-08-23 19:41:59.659623837 -0400 +++ new/src/hotspot/share/gc/g1/g1Policy.hpp 2019-08-23 19:41:59.451612654 -0400 @@ -112,7 +112,7 @@ return collector_state()->in_young_only_phase() && !collector_state()->mark_or_rebuild_in_progress(); } - double log_buffer_processing_time() const; + double logged_cards_processing_time() const; public: const G1Predictions& predictor() const { return _predictor; } const G1Analytics* analytics() const { return const_cast(_analytics); }