# HG changeset patch # User brutisso # Date 1424347013 -3600 # Thu Feb 19 12:56:53 2015 +0100 # Node ID 8ad354e769619d93ec41b748fc4bf97f59b4a49c # Parent 759a167d23811f9cec9e71e9b144ae675809bc18 [mq]: formatstring diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -81,6 +81,22 @@ } }; +template <> +void WorkerDataArray::log_value(LineBuffer& buf, double value) { + buf.append("%.1lf", value); +} + +template <> +void WorkerDataArray::log_value(LineBuffer& buf, int value) { + buf.append("%d", value); +} + +template <> +void WorkerDataArray::log_value(LineBuffer& buf, size_t value) { + buf.append(SIZE_FORMAT, value); +} + + PRAGMA_DIAG_PUSH PRAGMA_FORMAT_NONLITERAL_IGNORED template @@ -89,7 +105,7 @@ // No need for min, max, average and sum for only one worker LineBuffer buf(level); buf.append("[%s: ", title); - buf.append(_print_format, _data[0]); + log_value(buf, _data[0]); buf.append_and_print_cr("]"); return; } @@ -107,7 +123,7 @@ sum += val; if (G1Log::finest()) { buf.append(" "); - buf.append(_print_format, val); + log_value(buf, val); } } @@ -117,18 +133,18 @@ double avg = (double)sum / (double)_length; buf.append(" Min: "); - buf.append(_print_format, min); + log_value(buf, min); buf.append(", Avg: "); buf.append("%.1lf", avg); // Always print average as a double buf.append(", Max: "); - buf.append(_print_format, max); + log_value(buf, max); buf.append(", Diff: "); - buf.append(_print_format, max - min); + log_value(buf, max - min); if (_print_sum) { // for things like the start and end times the sum is not // that relevant buf.append(", Sum: "); - buf.append(_print_format, sum); + log_value(buf, sum); } buf.append_and_print_cr("]"); } @@ -160,23 +176,23 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : _max_gc_threads(max_gc_threads), - _last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false), - _last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"), - _last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"), - _last_update_rs_times_ms(_max_gc_threads, "%.1lf"), - _last_update_rs_processed_buffers(_max_gc_threads, "%d"), - _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"), - _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"), - _last_obj_copy_times_ms(_max_gc_threads, "%.1lf"), - _last_termination_times_ms(_max_gc_threads, "%.1lf"), - _last_termination_attempts(_max_gc_threads, SIZE_FORMAT), - _last_gc_worker_end_times_ms(_max_gc_threads, "%.1lf", false), - _last_gc_worker_times_ms(_max_gc_threads, "%.1lf"), - _last_gc_worker_other_times_ms(_max_gc_threads, "%.1lf"), - _last_redirty_logged_cards_time_ms(_max_gc_threads, "%.1lf"), - _last_redirty_logged_cards_processed_cards(_max_gc_threads, SIZE_FORMAT), - _cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"), - _cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf") + _last_gc_worker_start_times_ms(_max_gc_threads, false), + _last_ext_root_scan_times_ms(_max_gc_threads), + _last_satb_filtering_times_ms(_max_gc_threads), + _last_update_rs_times_ms(_max_gc_threads), + _last_update_rs_processed_buffers(_max_gc_threads), + _last_scan_rs_times_ms(_max_gc_threads), + _last_strong_code_root_scan_times_ms(_max_gc_threads), + _last_obj_copy_times_ms(_max_gc_threads), + _last_termination_times_ms(_max_gc_threads), + _last_termination_attempts(_max_gc_threads), + _last_gc_worker_end_times_ms(_max_gc_threads, false), + _last_gc_worker_times_ms(_max_gc_threads), + _last_gc_worker_other_times_ms(_max_gc_threads), + _last_redirty_logged_cards_time_ms(_max_gc_threads), + _last_redirty_logged_cards_processed_cards(_max_gc_threads), + _cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads), + _cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads) { assert(max_gc_threads > 0, "Must have some GC threads"); } diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -28,11 +28,12 @@ #include "memory/allocation.hpp" #include "gc_interface/gcCause.hpp" +class LineBuffer; + template class WorkerDataArray : public CHeapObj { T* _data; uint _length; - const char* _print_format; bool _print_sum; NOT_PRODUCT(static const T _uninitialized;) @@ -45,9 +46,11 @@ T _sum; double _average; + void log_value(LineBuffer& buf, T value); + public: - WorkerDataArray(uint length, const char* print_format, bool print_sum = true) : - _length(length), _print_format(print_format), _print_sum(print_sum), _has_new_data(true) { + WorkerDataArray(uint length, bool print_sum = true) : + _length(length), _print_sum(print_sum), _has_new_data(true) { assert(length > 0, "Must have some workers to store data for"); _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); } # HG changeset patch # User brutisso # Date 1424425514 -3600 # Fri Feb 20 10:45:14 2015 +0100 # Node ID 4287849418de18f23c64add89a6251c0e6795256 # Parent 8ad354e769619d93ec41b748fc4bf97f59b4a49c [mq]: firststeps diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -2206,7 +2206,7 @@ hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); - int n_completed_buffers = 0; + size_t n_completed_buffers = 0; while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { n_completed_buffers++; } diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -28,6 +28,7 @@ #include "gc_implementation/g1/g1GCPhaseTimes.hpp" #include "gc_implementation/g1/g1Log.hpp" #include "gc_implementation/g1/g1StringDedup.hpp" +#include "memory/allocation.hpp" #include "runtime/atomic.inline.hpp" // Helper class for avoiding interleaved logging @@ -81,30 +82,72 @@ } }; +template +WorkerDataArray::WorkerDataArray(uint length) : + _title(NULL), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL) { + assert(length > 0, "Must have some workers to store data for"); + _length = length; + _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); +} + +template +WorkerDataArray::WorkerDataArray(uint length, bool print_sum) : + _title(NULL), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL) { + assert(length > 0, "Must have some workers to store data for"); + _length = length; + _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); +} + +template +WorkerDataArray::WorkerDataArray(uint length, const char* title) : + _title(title), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL) { + assert(length > 0, "Must have some workers to store data for"); + _length = length; + _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); +} + +template +WorkerDataArray::WorkerDataArray(uint length, const char* title, bool print_sum) : + _title(title), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL) { + assert(length > 0, "Must have some workers to store data for"); + _length = length; + _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); +} + +template +WorkerDataArray::~WorkerDataArray() { + FREE_C_HEAP_ARRAY(T, _data); +} + template <> void WorkerDataArray::log_value(LineBuffer& buf, double value) { buf.append("%.1lf", value); } template <> -void WorkerDataArray::log_value(LineBuffer& buf, int value) { - buf.append("%d", value); -} - -template <> void WorkerDataArray::log_value(LineBuffer& buf, size_t value) { buf.append(SIZE_FORMAT, value); } +template <> +size_t WorkerDataArray::uninitialized() { + return (size_t)-5; +} + +template <> +double WorkerDataArray::uninitialized() { + return -3.0; +} PRAGMA_DIAG_PUSH PRAGMA_FORMAT_NONLITERAL_IGNORED template void WorkerDataArray::print(int level, const char* title) { + const char* t_title = _title == NULL ? title : _title; if (_length == 1) { // No need for min, max, average and sum for only one worker LineBuffer buf(level); - buf.append("[%s: ", title); + buf.append("[%s: ", t_title); log_value(buf, _data[0]); buf.append_and_print_cr("]"); return; @@ -115,7 +158,7 @@ T sum = 0; LineBuffer buf(level); - buf.append("[%s:", title); + buf.append("[%s:", t_title); for (uint i = 0; i < _length; ++i) { T val = _data[i]; min = MIN2(val, min); @@ -147,61 +190,85 @@ log_value(buf, sum); } buf.append_and_print_cr("]"); + + if (_sub_count != NULL) { + _sub_count->print(level + 1, "Dummy2"); + } } PRAGMA_DIAG_POP #ifndef PRODUCT -template <> const int WorkerDataArray::_uninitialized = -1; -template <> const double WorkerDataArray::_uninitialized = -1.0; -template <> const size_t WorkerDataArray::_uninitialized = (size_t)-1; +//template <> const int WorkerDataArray::_uninitialized = -1; +//template <> const double WorkerDataArray::_uninitialized = -1.0; +//template <> const size_t WorkerDataArray::_uninitialized = (size_t)-1; template void WorkerDataArray::reset() { for (uint i = 0; i < _length; i++) { - _data[i] = (T)_uninitialized; + _data[i] = WorkerDataArray::uninitialized(); + } + if (_sub_count != NULL) { + _sub_count->reset(); } } template void WorkerDataArray::verify() { for (uint i = 0; i < _length; i++) { - assert(_data[i] != _uninitialized, + assert(_data[i] != WorkerDataArray::uninitialized(), err_msg("Invalid data for worker %u, data: %lf, uninitialized: %lf", - i, (double)_data[i], (double)_uninitialized)); + i, (double)_data[i], (double)WorkerDataArray::uninitialized())); + } + if (_sub_count != NULL) { + _sub_count->verify(); } } #endif +template class WorkerDataArray; +template class WorkerDataArray; + G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : _max_gc_threads(max_gc_threads), - _last_gc_worker_start_times_ms(_max_gc_threads, false), - _last_ext_root_scan_times_ms(_max_gc_threads), - _last_satb_filtering_times_ms(_max_gc_threads), - _last_update_rs_times_ms(_max_gc_threads), - _last_update_rs_processed_buffers(_max_gc_threads), - _last_scan_rs_times_ms(_max_gc_threads), - _last_strong_code_root_scan_times_ms(_max_gc_threads), - _last_obj_copy_times_ms(_max_gc_threads), - _last_termination_times_ms(_max_gc_threads), - _last_termination_attempts(_max_gc_threads), - _last_gc_worker_end_times_ms(_max_gc_threads, false), - _last_gc_worker_times_ms(_max_gc_threads), - _last_gc_worker_other_times_ms(_max_gc_threads), - _last_redirty_logged_cards_time_ms(_max_gc_threads), - _last_redirty_logged_cards_processed_cards(_max_gc_threads), - _cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads), - _cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads) + _last_gc_worker_start_times_ms(max_gc_threads, false), + _last_ext_root_scan_times_ms(max_gc_threads), + _last_satb_filtering_times_ms(max_gc_threads), + _last_update_rs_times_ms(max_gc_threads), + _last_update_rs_processed_buffers(max_gc_threads), + _last_scan_rs_times_ms(max_gc_threads), + _last_strong_code_root_scan_times_ms(max_gc_threads), + _last_obj_copy_times_ms(max_gc_threads), + _last_termination_times_ms(max_gc_threads), + _last_termination_attempts(max_gc_threads), + _last_gc_worker_end_times_ms(max_gc_threads, false), + _last_gc_worker_times_ms(max_gc_threads), + _last_gc_worker_other_times_ms(max_gc_threads), + _last_redirty_logged_cards_time_ms(max_gc_threads), + _last_redirty_logged_cards_processed_cards(max_gc_threads), + _cur_string_dedup_queue_fixup_worker_times_ms(max_gc_threads), + _cur_string_dedup_table_fixup_worker_times_ms(max_gc_threads) { assert(max_gc_threads > 0, "Must have some GC threads"); + + _gc_phases[GC_WORKER_START] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false); + _gc_phases[EXT_ROOT_SCAN] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)"); + _gc_phases[UPDATE_RS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)"); + + _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); + _gc_phases[UPDATE_RS]->link_sub_count_array(_update_rs_processed_buffers); } void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) { assert(active_gc_threads > 0, "The number of threads must be > 0"); - assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads"); + assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads"); _active_gc_threads = active_gc_threads; + for (int i = 0; i < Sentinel; i++) { + _gc_phases[i]->reset(); + } + _last_gc_worker_start_times_ms.reset(); _last_ext_root_scan_times_ms.reset(); _last_satb_filtering_times_ms.reset(); @@ -222,6 +289,10 @@ } void G1GCPhaseTimes::note_gc_end() { + for (int i = 0; i < Sentinel; i++) { + _gc_phases[i]->verify(); + } + _last_gc_worker_start_times_ms.verify(); _last_ext_root_scan_times_ms.verify(); _last_satb_filtering_times_ms.verify(); @@ -309,6 +380,9 @@ print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms); } print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); + for (int i = 0; i < Sentinel; i++) { + _gc_phases[i]->print(2, "Dummy"); + } _last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)"); _last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)"); if (_last_satb_filtering_times_ms.sum() > 0.0) { diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP -#include "memory/allocation.hpp" #include "gc_interface/gcCause.hpp" class LineBuffer; @@ -34,9 +33,12 @@ class WorkerDataArray : public CHeapObj { T* _data; uint _length; + const char* _title; bool _print_sum; - NOT_PRODUCT(static const T _uninitialized;) + WorkerDataArray* _sub_count; + + NOT_PRODUCT(T uninitialized();) // We are caching the sum and average to only have to calculate them once. // This is not done in an MT-safe way. It is intended to allow single @@ -49,32 +51,41 @@ void log_value(LineBuffer& buf, T value); public: - WorkerDataArray(uint length, bool print_sum = true) : - _length(length), _print_sum(print_sum), _has_new_data(true) { - assert(length > 0, "Must have some workers to store data for"); - _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); - } + WorkerDataArray(uint length); + WorkerDataArray(uint length, bool print_sum); - ~WorkerDataArray() { - FREE_C_HEAP_ARRAY(T, _data); + WorkerDataArray(uint length, const char* title); + WorkerDataArray(uint length, const char* title, bool print_sum); + + ~WorkerDataArray(); + + void link_sub_count_array(WorkerDataArray* sub_count) { + _sub_count = sub_count; } void set(uint worker_i, T value) { assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); - assert(_data[worker_i] == (T)-1, err_msg("Overwriting data for worker %d", worker_i)); + T ui = WorkerDataArray::uninitialized(); + double uii = -3.0; + assert(_data[worker_i] == ui, err_msg("Overwriting data for worker %d", worker_i)); _data[worker_i] = value; _has_new_data = true; } + void set_sub_count(uint worker_i, size_t value) { + assert(_sub_count != NULL, "No sub count"); + _sub_count->set(worker_i, value); + } + T get(uint worker_i) { assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); - assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i)); + assert(_data[worker_i] != WorkerDataArray::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); return _data[worker_i]; } void add(uint worker_i, T value) { assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); - assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i)); + assert(_data[worker_i] != WorkerDataArray::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); _data[worker_i] += value; _has_new_data = true; } @@ -111,16 +122,24 @@ }; class G1GCPhaseTimes : public CHeapObj { - - private: uint _active_gc_threads; uint _max_gc_threads; + enum GCPhases { + GC_WORKER_START, + EXT_ROOT_SCAN, + UPDATE_RS, + Sentinel + }; + WorkerDataArray* _gc_phases[Sentinel]; + WorkerDataArray* _update_rs_processed_buffers; + + WorkerDataArray _last_gc_worker_start_times_ms; WorkerDataArray _last_ext_root_scan_times_ms; WorkerDataArray _last_satb_filtering_times_ms; WorkerDataArray _last_update_rs_times_ms; - WorkerDataArray _last_update_rs_processed_buffers; + WorkerDataArray _last_update_rs_processed_buffers; WorkerDataArray _last_scan_rs_times_ms; WorkerDataArray _last_strong_code_root_scan_times_ms; WorkerDataArray _last_obj_copy_times_ms; @@ -180,10 +199,12 @@ void print(double pause_time_sec); void record_gc_worker_start_time(uint worker_i, double ms) { + _gc_phases[GC_WORKER_START]->set(worker_i, ms); _last_gc_worker_start_times_ms.set(worker_i, ms); } void record_ext_root_scan_time(uint worker_i, double ms) { + _gc_phases[EXT_ROOT_SCAN]->set(worker_i, ms); _last_ext_root_scan_times_ms.set(worker_i, ms); } @@ -193,10 +214,12 @@ void record_update_rs_time(uint worker_i, double ms) { _last_update_rs_times_ms.set(worker_i, ms); + _gc_phases[UPDATE_RS]->set(worker_i, ms); } - void record_update_rs_processed_buffers(uint worker_i, int processed_buffers) { + void record_update_rs_processed_buffers(uint worker_i, size_t processed_buffers) { _last_update_rs_processed_buffers.set(worker_i, processed_buffers); + _gc_phases[UPDATE_RS]->set_sub_count(worker_i, processed_buffers); } void record_scan_rs_time(uint worker_i, double ms) { # HG changeset patch # User brutisso # Date 1424435149 -3600 # Fri Feb 20 13:25:49 2015 +0100 # Node ID 8a168138d6d555f4201e1b71736b3e8912138881 # Parent 4287849418de18f23c64add89a6251c0e6795256 [mq]: secondpart diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -2211,6 +2211,7 @@ n_completed_buffers++; } g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers); + g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::UPDATE_RS, worker_i, n_completed_buffers); dcqs.clear_n_completed_buffers(); assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); } @@ -4488,6 +4489,7 @@ double start_time_ms = os::elapsedTime() * 1000.0; _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms); + _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GC_WORKER_START, worker_id, os::elapsedTime()); { ResourceMark rm; @@ -4656,6 +4658,7 @@ ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms); + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::EXT_ROOT_SCAN, worker_i, (os::elapsedTime() - ext_roots_start) - obj_copy_time_sec); // During conc marking we have to filter the per-thread SATB buffers // to make sure we remove any oops into the CSet (which will show up diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -30,6 +30,7 @@ #include "gc_implementation/g1/g1StringDedup.hpp" #include "memory/allocation.hpp" #include "runtime/atomic.inline.hpp" +#include "runtime/os.hpp" // Helper class for avoiding interleaved logging class LineBuffer: public StackObj { @@ -456,3 +457,12 @@ print_stats(2, "Verify After", _cur_verify_after_time_ms); } } + +G1GCPhaseTimesTracker::G1GCPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCPhases phase, uint worker_id) : + _phase_times(phase_times), _phase(phase), _worker_id(worker_id) { + _start_time = os::elapsedTime(); +} + +G1GCPhaseTimesTracker::~G1GCPhaseTimesTracker() { + _phase_times->record_time(_phase, _worker_id, os::elapsedTime() - _start_time); +} diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -125,12 +125,15 @@ uint _active_gc_threads; uint _max_gc_threads; + public: enum GCPhases { GC_WORKER_START, EXT_ROOT_SCAN, UPDATE_RS, Sentinel }; + + private: WorkerDataArray* _gc_phases[Sentinel]; WorkerDataArray* _update_rs_processed_buffers; @@ -198,13 +201,19 @@ void note_gc_end(); void print(double pause_time_sec); + void record_time(GCPhases phase, uint worker_i, double secs) { + _gc_phases[phase]->set(worker_i, secs * 1000); + } + + void record_sub_count(GCPhases phase, uint worker_i, size_t count) { + _gc_phases[phase]->set_sub_count(worker_i, count); + } + void record_gc_worker_start_time(uint worker_i, double ms) { - _gc_phases[GC_WORKER_START]->set(worker_i, ms); _last_gc_worker_start_times_ms.set(worker_i, ms); } void record_ext_root_scan_time(uint worker_i, double ms) { - _gc_phases[EXT_ROOT_SCAN]->set(worker_i, ms); _last_ext_root_scan_times_ms.set(worker_i, ms); } @@ -214,12 +223,10 @@ void record_update_rs_time(uint worker_i, double ms) { _last_update_rs_times_ms.set(worker_i, ms); - _gc_phases[UPDATE_RS]->set(worker_i, ms); } void record_update_rs_processed_buffers(uint worker_i, size_t processed_buffers) { _last_update_rs_processed_buffers.set(worker_i, processed_buffers); - _gc_phases[UPDATE_RS]->set_sub_count(worker_i, processed_buffers); } void record_scan_rs_time(uint worker_i, double ms) { @@ -424,4 +431,14 @@ } }; +class G1GCPhaseTimesTracker : public StackObj { + double _start_time; + G1GCPhaseTimes::GCPhases _phase; + G1GCPhaseTimes* _phase_times; + uint _worker_id; +public: + G1GCPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCPhases phase, uint worker_id); + ~G1GCPhaseTimesTracker(); +}; + #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -287,6 +287,7 @@ }; void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) { + G1GCPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UPDATE_RS, worker_i); double start = os::elapsedTime(); // Apply the given closure to all remaining log entries. RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); # HG changeset patch # User brutisso # Date 1424436616 -3600 # Fri Feb 20 13:50:16 2015 +0100 # Node ID 23725d9435ce0cf25ae000c7f8c3166685f73af7 # Parent 8a168138d6d555f4201e1b71736b3e8912138881 [mq]: satbfiltering diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4663,9 +4663,12 @@ // During conc marking we have to filter the per-thread SATB buffers // to make sure we remove any oops into the CSet (which will show up // as implicitly live). + g1_policy()->phase_times()->disable(G1GCPhaseTimes::SATB_FILTERING); double satb_filtering_ms = 0.0; if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) { if (mark_in_progress()) { + g1_policy()->phase_times()->enable(G1GCPhaseTimes::SATB_FILTERING); + G1GCPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATB_FILTERING, worker_i); double satb_filter_start = os::elapsedTime(); JavaThread::satb_mark_queue_set().filter_thread_buffers(); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -85,7 +85,7 @@ template WorkerDataArray::WorkerDataArray(uint length) : - _title(NULL), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL) { + _title(NULL), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL), _enabled(true) { assert(length > 0, "Must have some workers to store data for"); _length = length; _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); @@ -93,7 +93,7 @@ template WorkerDataArray::WorkerDataArray(uint length, bool print_sum) : - _title(NULL), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL) { + _title(NULL), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL), _enabled(true) { assert(length > 0, "Must have some workers to store data for"); _length = length; _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); @@ -101,7 +101,7 @@ template WorkerDataArray::WorkerDataArray(uint length, const char* title) : - _title(title), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL) { + _title(title), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL), _enabled(true) { assert(length > 0, "Must have some workers to store data for"); _length = length; _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); @@ -109,7 +109,7 @@ template WorkerDataArray::WorkerDataArray(uint length, const char* title, bool print_sum) : - _title(title), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL) { + _title(title), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL), _enabled(true) { assert(length > 0, "Must have some workers to store data for"); _length = length; _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); @@ -255,6 +255,7 @@ _gc_phases[GC_WORKER_START] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false); _gc_phases[EXT_ROOT_SCAN] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)"); + _gc_phases[SATB_FILTERING] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)"); _gc_phases[UPDATE_RS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)"); _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); @@ -291,7 +292,9 @@ void G1GCPhaseTimes::note_gc_end() { for (int i = 0; i < Sentinel; i++) { - _gc_phases[i]->verify(); + if (_gc_phases[i]->enabled()) { + _gc_phases[i]->verify(); + } } _last_gc_worker_start_times_ms.verify(); @@ -382,7 +385,9 @@ } print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); for (int i = 0; i < Sentinel; i++) { - _gc_phases[i]->print(2, "Dummy"); + if (_gc_phases[i]->enabled()) { + _gc_phases[i]->print(2, "Dummy"); + } } _last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)"); _last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)"); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -35,6 +35,7 @@ uint _length; const char* _title; bool _print_sum; + bool _enabled; WorkerDataArray* _sub_count; @@ -109,6 +110,10 @@ void reset() PRODUCT_RETURN; void verify() PRODUCT_RETURN; + void enable() { _enabled = true; } + void disable() { _enabled = false; } + bool enabled() { return _enabled; } + private: void calculate_totals(){ @@ -129,6 +134,7 @@ enum GCPhases { GC_WORKER_START, EXT_ROOT_SCAN, + SATB_FILTERING, UPDATE_RS, Sentinel }; @@ -209,6 +215,15 @@ _gc_phases[phase]->set_sub_count(worker_i, count); } + void enable(GCPhases phase) { + _gc_phases[phase]->enable(); + } + + void disable(GCPhases phase) { + _gc_phases[phase]->disable(); + } + + void record_gc_worker_start_time(uint worker_i, double ms) { _last_gc_worker_start_times_ms.set(worker_i, ms); } # HG changeset patch # User brutisso # Date 1424681528 -3600 # Mon Feb 23 09:52:08 2015 +0100 # Node ID 8f0c989b0504f885956985617dffa016b0df8a29 # Parent 23725d9435ce0cf25ae000c7f8c3166685f73af7 [mq]: scan-root-objcopy diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4569,9 +4569,10 @@ double start = os::elapsedTime(); G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); evac.do_void(); - double elapsed_ms = (os::elapsedTime()-start)*1000.0; + double elapsed_sec = os::elapsedTime()-start; double term_ms = pss.term_time()*1000.0; - _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms); + _g1h->g1_policy()->phase_times()->add_time(G1GCPhaseTimes::OBJ_COPY, worker_id, elapsed_sec); + _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, (elapsed_sec * 1000.0) - term_ms); _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts()); } _g1h->g1_policy()->record_thread_age_table(pss.age_table()); @@ -4653,6 +4654,7 @@ + buf_scan_non_heap_weak_roots.closure_app_seconds(); g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::OBJ_COPY, worker_i, obj_copy_time_sec); double ext_root_time_ms = ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -257,6 +257,9 @@ _gc_phases[EXT_ROOT_SCAN] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)"); _gc_phases[SATB_FILTERING] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)"); _gc_phases[UPDATE_RS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)"); + _gc_phases[SCAN_RS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)"); + _gc_phases[CODE_ROOTS] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)"); + _gc_phases[OBJ_COPY] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)"); _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); _gc_phases[UPDATE_RS]->link_sub_count_array(_update_rs_processed_buffers); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -136,6 +136,9 @@ EXT_ROOT_SCAN, SATB_FILTERING, UPDATE_RS, + SCAN_RS, + CODE_ROOTS, + OBJ_COPY, Sentinel }; @@ -211,6 +214,10 @@ _gc_phases[phase]->set(worker_i, secs * 1000); } + void add_time(GCPhases phase, uint worker_i, double secs) { + _gc_phases[phase]->add(worker_i, secs * 1000); + } + void record_sub_count(GCPhases phase, uint worker_i, size_t count) { _gc_phases[phase]->set_sub_count(worker_i, count); } diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -249,8 +249,10 @@ _cards_scanned[worker_i] = scanRScl.cards_done(); _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); + _g1p->phase_times()->record_time(G1GCPhaseTimes::SCAN_RS, worker_i, scan_rs_time_sec); _g1p->phase_times()->record_strong_code_root_scan_time(worker_i, scanRScl.strong_code_root_scan_time_sec() * 1000.0); + _g1p->phase_times()->record_time(G1GCPhaseTimes::CODE_ROOTS, worker_i, scanRScl.strong_code_root_scan_time_sec()); } // Closure used for updating RSets and recording references that # HG changeset patch # User brutisso # Date 1424682978 -3600 # Mon Feb 23 10:16:18 2015 +0100 # Node ID f385600ee2aa9998881dcd02ba1fd49856562473 # Parent 8f0c989b0504f885956985617dffa016b0df8a29 [mq]: termination diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4570,10 +4570,12 @@ G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); evac.do_void(); double elapsed_sec = os::elapsedTime()-start; - double term_ms = pss.term_time()*1000.0; - _g1h->g1_policy()->phase_times()->add_time(G1GCPhaseTimes::OBJ_COPY, worker_id, elapsed_sec); - _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, (elapsed_sec * 1000.0) - term_ms); - _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts()); + double term_sec = pss.term_time(); + _g1h->g1_policy()->phase_times()->add_time(G1GCPhaseTimes::OBJ_COPY, worker_id, elapsed_sec - term_sec); + _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, (elapsed_sec * 1000.0) - (term_sec*1000.0)); + _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::TERMINATION, worker_id, term_sec); + _g1h->g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::TERMINATION, worker_id, pss.term_attempts()); + _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_sec*1000.0, pss.term_attempts()); } _g1h->g1_policy()->record_thread_age_table(pss.age_table()); _g1h->update_surviving_young_words(pss.surviving_young_words()+1); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -260,9 +260,12 @@ _gc_phases[SCAN_RS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)"); _gc_phases[CODE_ROOTS] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)"); _gc_phases[OBJ_COPY] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)"); + _gc_phases[TERMINATION] = new WorkerDataArray(max_gc_threads, "Termination (ms)"); _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); _gc_phases[UPDATE_RS]->link_sub_count_array(_update_rs_processed_buffers); + _termination_attmpts = new WorkerDataArray(max_gc_threads, "Termination Attempts"); + _gc_phases[TERMINATION]->link_sub_count_array(_termination_attmpts); } void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) { @@ -392,6 +395,9 @@ _gc_phases[i]->print(2, "Dummy"); } } + + gclog_or_tty->print_cr("===================="); + _last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)"); _last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)"); if (_last_satb_filtering_times_ms.sum() > 0.0) { diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -139,12 +139,14 @@ SCAN_RS, CODE_ROOTS, OBJ_COPY, + TERMINATION, Sentinel }; private: WorkerDataArray* _gc_phases[Sentinel]; WorkerDataArray* _update_rs_processed_buffers; + WorkerDataArray* _termination_attmpts; WorkerDataArray _last_gc_worker_start_times_ms; # HG changeset patch # User brutisso # Date 1424687362 -3600 # Mon Feb 23 11:29:22 2015 +0100 # Node ID be9140530c63fb4ccb3f94b157cf6f8770195a8d # Parent f385600ee2aa9998881dcd02ba1fd49856562473 [mq]: end-times diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4592,8 +4592,9 @@ // "GC Worker Time". } - double end_time_ms = os::elapsedTime() * 1000.0; - _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms); + double end_time_sec = os::elapsedTime(); + _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_sec * 1000.0); + _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GC_WORKER_END, worker_id, end_time_sec); } }; diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -122,11 +122,21 @@ template <> void WorkerDataArray::log_value(LineBuffer& buf, double value) { + buf.append("%.1lf", value * 1000); +} + +template <> +void WorkerDataArray::log_value(LineBuffer& buf, size_t value) { + buf.append(SIZE_FORMAT, value); +} + +template <> +void WorkerDataArray::log_value_old(LineBuffer& buf, double value) { buf.append("%.1lf", value); } template <> -void WorkerDataArray::log_value(LineBuffer& buf, size_t value) { +void WorkerDataArray::log_value_old(LineBuffer& buf, size_t value) { buf.append(SIZE_FORMAT, value); } @@ -144,11 +154,67 @@ PRAGMA_FORMAT_NONLITERAL_IGNORED template void WorkerDataArray::print(int level, const char* title) { - const char* t_title = _title == NULL ? title : _title; if (_length == 1) { // No need for min, max, average and sum for only one worker LineBuffer buf(level); - buf.append("[%s: ", t_title); + buf.append("[%s: ", title); + log_value_old(buf, _data[0]); + buf.append_and_print_cr("]"); + return; + } + + T min = _data[0]; + T max = _data[0]; + T sum = 0; + + LineBuffer buf(level); + buf.append("[%s:", title); + for (uint i = 0; i < _length; ++i) { + T val = _data[i]; + min = MIN2(val, min); + max = MAX2(val, max); + sum += val; + if (G1Log::finest()) { + buf.append(" "); + log_value_old(buf, val); + } + } + + if (G1Log::finest()) { + buf.append_and_print_cr("%s", ""); + } + + double avg = (double)sum / (double)_length; + buf.append(" Min: "); + log_value_old(buf, min); + buf.append(", Avg: "); + buf.append("%.1lf", avg); // Always print average as a double + buf.append(", Max: "); + log_value_old(buf, max); + buf.append(", Diff: "); + log_value_old(buf, max - min); + if (_print_sum) { + // for things like the start and end times the sum is not + // that relevant + buf.append(", Sum: "); + log_value_old(buf, sum); + } + buf.append_and_print_cr("]"); + + if (_sub_count != NULL) { + _sub_count->print(level + 1, "Dummy2"); + } +} +PRAGMA_DIAG_POP + +PRAGMA_DIAG_PUSH +PRAGMA_FORMAT_NONLITERAL_IGNORED +template +void WorkerDataArray::print(int level) { + if (_length == 1) { + // No need for min, max, average and sum for only one worker + LineBuffer buf(level); + buf.append("[%s: ", _title); log_value(buf, _data[0]); buf.append_and_print_cr("]"); return; @@ -159,7 +225,7 @@ T sum = 0; LineBuffer buf(level); - buf.append("[%s:", t_title); + buf.append("[%s:", _title); for (uint i = 0; i < _length; ++i) { T val = _data[i]; min = MIN2(val, min); @@ -193,7 +259,7 @@ buf.append_and_print_cr("]"); if (_sub_count != NULL) { - _sub_count->print(level + 1, "Dummy2"); + _sub_count->print(level + 1); } } PRAGMA_DIAG_POP @@ -261,6 +327,9 @@ _gc_phases[CODE_ROOTS] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)"); _gc_phases[OBJ_COPY] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)"); _gc_phases[TERMINATION] = new WorkerDataArray(max_gc_threads, "Termination (ms)"); + _gc_phases[GC_WORKER_TOTAL] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)"); + _gc_phases[GC_WORKER_END] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false); + _gc_phases[OTHER] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)"); _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); _gc_phases[UPDATE_RS]->link_sub_count_array(_update_rs_processed_buffers); @@ -297,6 +366,25 @@ } void G1GCPhaseTimes::note_gc_end() { + for (uint i = 0; i < _active_gc_threads; i++) { + double worker_time = _gc_phases[GC_WORKER_END]->get(i) - _gc_phases[GC_WORKER_START]->get(i); + record_time(GC_WORKER_TOTAL, i , worker_time); + + double worker_known_time = + _gc_phases[EXT_ROOT_SCAN]->get(i) + + _gc_phases[UPDATE_RS]->get(i) + + _gc_phases[SCAN_RS]->get(i) + + _gc_phases[CODE_ROOTS]->get(i) + + _gc_phases[OBJ_COPY]->get(i) + + _gc_phases[TERMINATION]->get(i); + + if (_gc_phases[SATB_FILTERING]->enabled()) { + worker_known_time += _gc_phases[SATB_FILTERING]->get(i); + } + + record_time(OTHER, i, worker_time - worker_known_time); + } + for (int i = 0; i < Sentinel; i++) { if (_gc_phases[i]->enabled()) { _gc_phases[i]->verify(); @@ -392,7 +480,7 @@ print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); for (int i = 0; i < Sentinel; i++) { if (_gc_phases[i]->enabled()) { - _gc_phases[i]->print(2, "Dummy"); + _gc_phases[i]->print(2); } } diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -50,6 +50,7 @@ double _average; void log_value(LineBuffer& buf, T value); + void log_value_old(LineBuffer& buf, T value); public: WorkerDataArray(uint length); @@ -106,6 +107,7 @@ } void print(int level, const char* title); + void print(int level); void reset() PRODUCT_RETURN; void verify() PRODUCT_RETURN; @@ -140,6 +142,9 @@ CODE_ROOTS, OBJ_COPY, TERMINATION, + OTHER, + GC_WORKER_TOTAL, + GC_WORKER_END, Sentinel }; @@ -212,12 +217,12 @@ void note_gc_end(); void print(double pause_time_sec); - void record_time(GCPhases phase, uint worker_i, double secs) { - _gc_phases[phase]->set(worker_i, secs * 1000); + void record_time(GCPhases phase, uint worker_i, double time) { + _gc_phases[phase]->set(worker_i, time); } - void add_time(GCPhases phase, uint worker_i, double secs) { - _gc_phases[phase]->add(worker_i, secs * 1000); + void add_time(GCPhases phase, uint worker_i, double time) { + _gc_phases[phase]->add(worker_i, time); } void record_sub_count(GCPhases phase, uint worker_i, size_t count) { # HG changeset patch # User brutisso # Date 1424696618 -3600 # Mon Feb 23 14:03:38 2015 +0100 # Node ID 872683b007d925e9a253680e044f6a6714ed61ca # Parent be9140530c63fb4ccb3f94b157cf6f8770195a8d [mq]: strdedup diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -335,6 +335,11 @@ _gc_phases[UPDATE_RS]->link_sub_count_array(_update_rs_processed_buffers); _termination_attmpts = new WorkerDataArray(max_gc_threads, "Termination Attempts"); _gc_phases[TERMINATION]->link_sub_count_array(_termination_attmpts); + + _gc_phases[STR_DEDUP_QUEUE_FIXUP] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)"); + _gc_phases[STR_DEDUP_TABLE_FIXUP] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)"); + + _gc_phases[REDIRTY_CARDS] = new WorkerDataArray(max_gc_threads, "Parallel Redirty"); } void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) { @@ -346,6 +351,11 @@ _gc_phases[i]->reset(); } + if (!G1StringDedup::is_enabled()) { + _gc_phases[STR_DEDUP_QUEUE_FIXUP]->disable(); + _gc_phases[STR_DEDUP_TABLE_FIXUP]->disable(); + } + _last_gc_worker_start_times_ms.reset(); _last_ext_root_scan_times_ms.reset(); _last_satb_filtering_times_ms.reset(); @@ -478,7 +488,7 @@ print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms); } print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); - for (int i = 0; i < Sentinel; i++) { + for (int i = 0; i <= _gc_main_phases_last; i++) { if (_gc_phases[i]->enabled()) { _gc_phases[i]->print(2); } @@ -508,6 +518,11 @@ print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms); if (G1StringDedup::is_enabled()) { print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads); + for (int i = _string_dedup_phases_first; i <= _string_dedup_phases_last; i++) { + if (_gc_phases[i]->enabled()) { + _gc_phases[i]->print(2); + } + } _cur_string_dedup_queue_fixup_worker_times_ms.print(2, "Queue Fixup (ms)"); _cur_string_dedup_table_fixup_worker_times_ms.print(2, "Table Fixup (ms)"); } diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -145,6 +145,9 @@ OTHER, GC_WORKER_TOTAL, GC_WORKER_END, + STR_DEDUP_QUEUE_FIXUP, + STR_DEDUP_TABLE_FIXUP, + REDIRTY_CARDS, Sentinel }; @@ -153,6 +156,9 @@ WorkerDataArray* _update_rs_processed_buffers; WorkerDataArray* _termination_attmpts; + static const int _gc_main_phases_last = GC_WORKER_END; + static const int _string_dedup_phases_first = STR_DEDUP_QUEUE_FIXUP; + static const int _string_dedup_phases_last = STR_DEDUP_TABLE_FIXUP; WorkerDataArray _last_gc_worker_start_times_ms; WorkerDataArray _last_ext_root_scan_times_ms; @@ -177,6 +183,7 @@ double _cur_evac_fail_remove_self_forwards; double _cur_string_dedup_fixup_time_ms; + WorkerDataArray _cur_string_dedup_queue_fixup_worker_times_ms; WorkerDataArray _cur_string_dedup_table_fixup_worker_times_ms; diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp --- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp @@ -139,11 +139,13 @@ double table_fixup_start = os::elapsedTime(); G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id); - double queue_fixup_time_ms = (table_fixup_start - queue_fixup_start) * 1000.0; - double table_fixup_time_ms = (os::elapsedTime() - table_fixup_start) * 1000.0; + double queue_fixup_time_sec = (table_fixup_start - queue_fixup_start); + double table_fixup_time_sec = (os::elapsedTime() - table_fixup_start); G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); - g1p->phase_times()->record_string_dedup_queue_fixup_worker_time(worker_id, queue_fixup_time_ms); - g1p->phase_times()->record_string_dedup_table_fixup_worker_time(worker_id, table_fixup_time_ms); + g1p->phase_times()->record_time(G1GCPhaseTimes::STR_DEDUP_QUEUE_FIXUP, worker_id, queue_fixup_time_sec); + g1p->phase_times()->record_time(G1GCPhaseTimes::STR_DEDUP_TABLE_FIXUP, worker_id, table_fixup_time_sec); + g1p->phase_times()->record_string_dedup_queue_fixup_worker_time(worker_id, queue_fixup_time_sec * 1000.0); + g1p->phase_times()->record_string_dedup_table_fixup_worker_time(worker_id, table_fixup_time_sec * 1000.0); } }; # HG changeset patch # User brutisso # Date 1424697225 -3600 # Mon Feb 23 14:13:45 2015 +0100 # Node ID 24d00c5487b47e360eea16c8f94072e496466519 # Parent 872683b007d925e9a253680e044f6a6714ed61ca [mq]: redirtied-cards diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -5090,6 +5090,8 @@ _queue->par_apply_closure_to_all_completed_buffers(&cl); G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times(); + timer->record_time(G1GCPhaseTimes::REDIRTY_CARDS, worker_id, os::elapsedTime() - start_time); + timer->record_sub_count(G1GCPhaseTimes::REDIRTY_CARDS, worker_id, cl.num_processed()); timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0); timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed()); } diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -340,6 +340,8 @@ _gc_phases[STR_DEDUP_TABLE_FIXUP] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)"); _gc_phases[REDIRTY_CARDS] = new WorkerDataArray(max_gc_threads, "Parallel Redirty"); + _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards"); + _gc_phases[REDIRTY_CARDS]->link_sub_count_array(_redirtied_cards); } void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) { @@ -549,6 +551,7 @@ print_stats(2, "Ref Enq", _cur_ref_enq_time_ms); print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms); if (G1Log::finest()) { + _gc_phases[REDIRTY_CARDS]->print(3); _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty"); _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards"); } diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -155,6 +155,7 @@ WorkerDataArray* _gc_phases[Sentinel]; WorkerDataArray* _update_rs_processed_buffers; WorkerDataArray* _termination_attmpts; + WorkerDataArray* _redirtied_cards; static const int _gc_main_phases_last = GC_WORKER_END; static const int _string_dedup_phases_first = STR_DEDUP_QUEUE_FIXUP; # HG changeset patch # User brutisso # Date 1424698651 -3600 # Mon Feb 23 14:37:31 2015 +0100 # Node ID 6c35a69a5637780f56da66da785691788899f069 # Parent 24d00c5487b47e360eea16c8f94072e496466519 [mq]: loglevel diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -258,7 +258,7 @@ } buf.append_and_print_cr("]"); - if (_sub_count != NULL) { + if (_sub_count != NULL && _sub_count->enabled()) { _sub_count->print(level + 1); } } @@ -333,8 +333,9 @@ _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); _gc_phases[UPDATE_RS]->link_sub_count_array(_update_rs_processed_buffers); - _termination_attmpts = new WorkerDataArray(max_gc_threads, "Termination Attempts"); - _gc_phases[TERMINATION]->link_sub_count_array(_termination_attmpts); + + _termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts"); + _gc_phases[TERMINATION]->link_sub_count_array(_termination_attempts); _gc_phases[STR_DEDUP_QUEUE_FIXUP] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)"); _gc_phases[STR_DEDUP_TABLE_FIXUP] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)"); @@ -353,6 +354,10 @@ _gc_phases[i]->reset(); } + if (!G1Log::finest()) { + _termination_attempts->disable(); + } + if (!G1StringDedup::is_enabled()) { _gc_phases[STR_DEDUP_QUEUE_FIXUP]->disable(); _gc_phases[STR_DEDUP_TABLE_FIXUP]->disable(); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -154,7 +154,7 @@ private: WorkerDataArray* _gc_phases[Sentinel]; WorkerDataArray* _update_rs_processed_buffers; - WorkerDataArray* _termination_attmpts; + WorkerDataArray* _termination_attempts; WorkerDataArray* _redirtied_cards; static const int _gc_main_phases_last = GC_WORKER_END; # HG changeset patch # User brutisso # Date 1424772942 -3600 # Tue Feb 24 11:15:42 2015 +0100 # Node ID 6381f79fdd9bc65c221ce8bc9329bbbcb9ebeffc # Parent 6c35a69a5637780f56da66da785691788899f069 [mq]: remove-old diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -2210,7 +2210,6 @@ while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { n_completed_buffers++; } - g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers); g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::UPDATE_RS, worker_i, n_completed_buffers); dcqs.clear_n_completed_buffers(); assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); @@ -3754,7 +3753,7 @@ int active_workers = workers()->active_workers(); double pause_start_sec = os::elapsedTime(); - g1_policy()->phase_times()->note_gc_start(active_workers); + g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress()); log_gc_header(); TraceCollectorStats tcs(g1mm()->incremental_collection_counters()); @@ -4487,8 +4486,6 @@ void work(uint worker_id) { if (worker_id >= _n_workers) return; // no work needed this round - double start_time_ms = os::elapsedTime() * 1000.0; - _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms); _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GC_WORKER_START, worker_id, os::elapsedTime()); { @@ -4569,13 +4566,11 @@ double start = os::elapsedTime(); G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator); evac.do_void(); - double elapsed_sec = os::elapsedTime()-start; + double elapsed_sec = os::elapsedTime() - start; double term_sec = pss.term_time(); _g1h->g1_policy()->phase_times()->add_time(G1GCPhaseTimes::OBJ_COPY, worker_id, elapsed_sec - term_sec); - _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, (elapsed_sec * 1000.0) - (term_sec*1000.0)); _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::TERMINATION, worker_id, term_sec); _g1h->g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::TERMINATION, worker_id, pss.term_attempts()); - _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_sec*1000.0, pss.term_attempts()); } _g1h->g1_policy()->record_thread_age_table(pss.age_table()); _g1h->update_surviving_young_words(pss.surviving_young_words()+1); @@ -4591,10 +4586,7 @@ // destructors are executed here and are included as part of the // "GC Worker Time". } - - double end_time_sec = os::elapsedTime(); - _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_sec * 1000.0); - _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GC_WORKER_END, worker_id, end_time_sec); + _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GC_WORKER_END, worker_id, os::elapsedTime()); } }; @@ -4656,32 +4648,22 @@ double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds() + buf_scan_non_heap_weak_roots.closure_app_seconds(); - g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0); g1_policy()->phase_times()->record_time(G1GCPhaseTimes::OBJ_COPY, worker_i, obj_copy_time_sec); - double ext_root_time_ms = - ((os::elapsedTime() - ext_roots_start) - obj_copy_time_sec) * 1000.0; - - g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms); - g1_policy()->phase_times()->record_time(G1GCPhaseTimes::EXT_ROOT_SCAN, worker_i, (os::elapsedTime() - ext_roots_start) - obj_copy_time_sec); + double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec; + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::EXT_ROOT_SCAN, worker_i, ext_root_time_sec); // During conc marking we have to filter the per-thread SATB buffers // to make sure we remove any oops into the CSet (which will show up // as implicitly live). - g1_policy()->phase_times()->disable(G1GCPhaseTimes::SATB_FILTERING); - double satb_filtering_ms = 0.0; - if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) { - if (mark_in_progress()) { - g1_policy()->phase_times()->enable(G1GCPhaseTimes::SATB_FILTERING); + if (mark_in_progress()) { + if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) { G1GCPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATB_FILTERING, worker_i); - double satb_filter_start = os::elapsedTime(); - JavaThread::satb_mark_queue_set().filter_thread_buffers(); - - satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0; + } else { + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::SATB_FILTERING, worker_i, 0.0); } } - g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); // Now scan the complement of the collection set. G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots); @@ -5084,16 +5066,13 @@ G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { } virtual void work(uint worker_id) { - double start_time = os::elapsedTime(); + G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times(); + G1GCPhaseTimesTracker x(timer, G1GCPhaseTimes::REDIRTY_CARDS, worker_id); RedirtyLoggedCardTableEntryClosure cl; _queue->par_apply_closure_to_all_completed_buffers(&cl); - G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times(); - timer->record_time(G1GCPhaseTimes::REDIRTY_CARDS, worker_id, os::elapsedTime() - start_time); timer->record_sub_count(G1GCPhaseTimes::REDIRTY_CARDS, worker_id, cl.num_processed()); - timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0); - timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed()); } }; diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -1076,7 +1076,7 @@ if (update_stats) { double cost_per_card_ms = 0.0; if (_pending_cards > 0) { - cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards; + cost_per_card_ms = phase_times()->average_time(G1GCPhaseTimes::UPDATE_RS) / (double) _pending_cards; _cost_per_card_ms_seq->add(cost_per_card_ms); } @@ -1084,7 +1084,7 @@ double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { - cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned; + cost_per_entry_ms = phase_times()->average_time(G1GCPhaseTimes::SCAN_RS) / (double) cards_scanned; if (_last_gc_was_young) { _cost_per_entry_ms_seq->add(cost_per_entry_ms); } else { @@ -1126,7 +1126,7 @@ double cost_per_byte_ms = 0.0; if (copied_bytes > 0) { - cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes; + cost_per_byte_ms = phase_times()->average_time(G1GCPhaseTimes::OBJ_COPY) / (double) copied_bytes; if (_in_marking_window) { _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); } else { @@ -1135,8 +1135,8 @@ } double all_other_time_ms = pause_time_ms - - (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time() - + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time()); + (phase_times()->average_time(G1GCPhaseTimes::UPDATE_RS) + phase_times()->average_time(G1GCPhaseTimes::SCAN_RS) + + phase_times()->average_time(G1GCPhaseTimes::OBJ_COPY) + phase_times()->average_time(G1GCPhaseTimes::TERMINATION)); double young_other_time_ms = 0.0; if (young_cset_region_length() > 0) { @@ -1177,8 +1177,8 @@ // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; - adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(), - phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms); + adjust_concurrent_refinement(phase_times()->average_time(G1GCPhaseTimes::UPDATE_RS), + phase_times()->sub_count_sum(G1GCPhaseTimes::UPDATE_RS), update_rs_time_goal_ms); _collectionSetChooser->verify(); } @@ -2120,19 +2120,24 @@ _other.add(pause_time_ms - phase_times->accounted_time_ms()); _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); _parallel.add(phase_times->cur_collection_par_time_ms()); - _ext_root_scan.add(phase_times->average_last_ext_root_scan_time()); - _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms()); - _update_rs.add(phase_times->average_last_update_rs_time()); - _scan_rs.add(phase_times->average_last_scan_rs_time()); - _obj_copy.add(phase_times->average_last_obj_copy_time()); - _termination.add(phase_times->average_last_termination_time()); + _ext_root_scan.add(phase_times->average_time(G1GCPhaseTimes::EXT_ROOT_SCAN)); + if (phase_times->enabled(G1GCPhaseTimes::SATB_FILTERING)) { + _satb_filtering.add(phase_times->average_time(G1GCPhaseTimes::SATB_FILTERING)); + } + _update_rs.add(phase_times->average_time(G1GCPhaseTimes::UPDATE_RS)); + _scan_rs.add(phase_times->average_time(G1GCPhaseTimes::SCAN_RS)); + _obj_copy.add(phase_times->average_time(G1GCPhaseTimes::OBJ_COPY)); + _termination.add(phase_times->average_time(G1GCPhaseTimes::TERMINATION)); - double parallel_known_time = phase_times->average_last_ext_root_scan_time() + - phase_times->average_last_satb_filtering_times_ms() + - phase_times->average_last_update_rs_time() + - phase_times->average_last_scan_rs_time() + - phase_times->average_last_obj_copy_time() + - + phase_times->average_last_termination_time(); + double parallel_known_time = phase_times->average_time(G1GCPhaseTimes::EXT_ROOT_SCAN) + + phase_times->average_time(G1GCPhaseTimes::UPDATE_RS) + + phase_times->average_time(G1GCPhaseTimes::SCAN_RS) + + phase_times->average_time(G1GCPhaseTimes::OBJ_COPY) + + phase_times->average_time(G1GCPhaseTimes::TERMINATION); + + if (phase_times->enabled(G1GCPhaseTimes::SATB_FILTERING)) { + parallel_known_time += phase_times->average_time(G1GCPhaseTimes::SATB_FILTERING); + } double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; _parallel_other.add(parallel_other_time); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -84,22 +84,6 @@ }; template -WorkerDataArray::WorkerDataArray(uint length) : - _title(NULL), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL), _enabled(true) { - assert(length > 0, "Must have some workers to store data for"); - _length = length; - _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); -} - -template -WorkerDataArray::WorkerDataArray(uint length, bool print_sum) : - _title(NULL), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL), _enabled(true) { - assert(length > 0, "Must have some workers to store data for"); - _length = length; - _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); -} - -template WorkerDataArray::WorkerDataArray(uint length, const char* title) : _title(title), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL), _enabled(true) { assert(length > 0, "Must have some workers to store data for"); @@ -131,16 +115,6 @@ } template <> -void WorkerDataArray::log_value_old(LineBuffer& buf, double value) { - buf.append("%.1lf", value); -} - -template <> -void WorkerDataArray::log_value_old(LineBuffer& buf, size_t value) { - buf.append(SIZE_FORMAT, value); -} - -template <> size_t WorkerDataArray::uninitialized() { return (size_t)-5; } @@ -153,63 +127,6 @@ PRAGMA_DIAG_PUSH PRAGMA_FORMAT_NONLITERAL_IGNORED template -void WorkerDataArray::print(int level, const char* title) { - if (_length == 1) { - // No need for min, max, average and sum for only one worker - LineBuffer buf(level); - buf.append("[%s: ", title); - log_value_old(buf, _data[0]); - buf.append_and_print_cr("]"); - return; - } - - T min = _data[0]; - T max = _data[0]; - T sum = 0; - - LineBuffer buf(level); - buf.append("[%s:", title); - for (uint i = 0; i < _length; ++i) { - T val = _data[i]; - min = MIN2(val, min); - max = MAX2(val, max); - sum += val; - if (G1Log::finest()) { - buf.append(" "); - log_value_old(buf, val); - } - } - - if (G1Log::finest()) { - buf.append_and_print_cr("%s", ""); - } - - double avg = (double)sum / (double)_length; - buf.append(" Min: "); - log_value_old(buf, min); - buf.append(", Avg: "); - buf.append("%.1lf", avg); // Always print average as a double - buf.append(", Max: "); - log_value_old(buf, max); - buf.append(", Diff: "); - log_value_old(buf, max - min); - if (_print_sum) { - // for things like the start and end times the sum is not - // that relevant - buf.append(", Sum: "); - log_value_old(buf, sum); - } - buf.append_and_print_cr("]"); - - if (_sub_count != NULL) { - _sub_count->print(level + 1, "Dummy2"); - } -} -PRAGMA_DIAG_POP - -PRAGMA_DIAG_PUSH -PRAGMA_FORMAT_NONLITERAL_IGNORED -template void WorkerDataArray::print(int level) { if (_length == 1) { // No need for min, max, average and sum for only one worker @@ -298,24 +215,7 @@ template class WorkerDataArray; G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : - _max_gc_threads(max_gc_threads), - _last_gc_worker_start_times_ms(max_gc_threads, false), - _last_ext_root_scan_times_ms(max_gc_threads), - _last_satb_filtering_times_ms(max_gc_threads), - _last_update_rs_times_ms(max_gc_threads), - _last_update_rs_processed_buffers(max_gc_threads), - _last_scan_rs_times_ms(max_gc_threads), - _last_strong_code_root_scan_times_ms(max_gc_threads), - _last_obj_copy_times_ms(max_gc_threads), - _last_termination_times_ms(max_gc_threads), - _last_termination_attempts(max_gc_threads), - _last_gc_worker_end_times_ms(max_gc_threads, false), - _last_gc_worker_times_ms(max_gc_threads), - _last_gc_worker_other_times_ms(max_gc_threads), - _last_redirty_logged_cards_time_ms(max_gc_threads), - _last_redirty_logged_cards_processed_cards(max_gc_threads), - _cur_string_dedup_queue_fixup_worker_times_ms(max_gc_threads), - _cur_string_dedup_table_fixup_worker_times_ms(max_gc_threads) + _max_gc_threads(max_gc_threads) { assert(max_gc_threads > 0, "Must have some GC threads"); @@ -345,7 +245,7 @@ _gc_phases[REDIRTY_CARDS]->link_sub_count_array(_redirtied_cards); } -void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) { +void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) { assert(active_gc_threads > 0, "The number of threads must be > 0"); assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads"); _active_gc_threads = active_gc_threads; @@ -358,28 +258,16 @@ _termination_attempts->disable(); } + if (mark_in_progress) { + _gc_phases[SATB_FILTERING]->enable(); + } else { + _gc_phases[SATB_FILTERING]->disable(); + } + if (!G1StringDedup::is_enabled()) { _gc_phases[STR_DEDUP_QUEUE_FIXUP]->disable(); _gc_phases[STR_DEDUP_TABLE_FIXUP]->disable(); } - - _last_gc_worker_start_times_ms.reset(); - _last_ext_root_scan_times_ms.reset(); - _last_satb_filtering_times_ms.reset(); - _last_update_rs_times_ms.reset(); - _last_update_rs_processed_buffers.reset(); - _last_scan_rs_times_ms.reset(); - _last_strong_code_root_scan_times_ms.reset(); - _last_obj_copy_times_ms.reset(); - _last_termination_times_ms.reset(); - _last_termination_attempts.reset(); - _last_gc_worker_end_times_ms.reset(); - _last_gc_worker_times_ms.reset(); - _last_gc_worker_other_times_ms.reset(); - - _last_redirty_logged_cards_time_ms.reset(); - _last_redirty_logged_cards_processed_cards.reset(); - } void G1GCPhaseTimes::note_gc_end() { @@ -407,50 +295,6 @@ _gc_phases[i]->verify(); } } - - _last_gc_worker_start_times_ms.verify(); - _last_ext_root_scan_times_ms.verify(); - _last_satb_filtering_times_ms.verify(); - _last_update_rs_times_ms.verify(); - _last_update_rs_processed_buffers.verify(); - _last_scan_rs_times_ms.verify(); - _last_strong_code_root_scan_times_ms.verify(); - _last_obj_copy_times_ms.verify(); - _last_termination_times_ms.verify(); - _last_termination_attempts.verify(); - _last_gc_worker_end_times_ms.verify(); - - for (uint i = 0; i < _active_gc_threads; i++) { - double worker_time = _last_gc_worker_end_times_ms.get(i) - _last_gc_worker_start_times_ms.get(i); - _last_gc_worker_times_ms.set(i, worker_time); - - double worker_known_time = _last_ext_root_scan_times_ms.get(i) + - _last_satb_filtering_times_ms.get(i) + - _last_update_rs_times_ms.get(i) + - _last_scan_rs_times_ms.get(i) + - _last_strong_code_root_scan_times_ms.get(i) + - _last_obj_copy_times_ms.get(i) + - _last_termination_times_ms.get(i); - - double worker_other_time = worker_time - worker_known_time; - _last_gc_worker_other_times_ms.set(i, worker_other_time); - } - - _last_gc_worker_times_ms.verify(); - _last_gc_worker_other_times_ms.verify(); - - _last_redirty_logged_cards_time_ms.verify(); - _last_redirty_logged_cards_processed_cards.verify(); -} - -void G1GCPhaseTimes::note_string_dedup_fixup_start() { - _cur_string_dedup_queue_fixup_worker_times_ms.reset(); - _cur_string_dedup_table_fixup_worker_times_ms.reset(); -} - -void G1GCPhaseTimes::note_string_dedup_fixup_end() { - _cur_string_dedup_queue_fixup_worker_times_ms.verify(); - _cur_string_dedup_table_fixup_worker_times_ms.verify(); } void G1GCPhaseTimes::print_stats(int level, const char* str, double value) { @@ -494,6 +338,7 @@ if (_root_region_scan_wait_time_ms > 0.0) { print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms); } + print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); for (int i = 0; i <= _gc_main_phases_last; i++) { if (_gc_phases[i]->enabled()) { @@ -501,26 +346,6 @@ } } - gclog_or_tty->print_cr("===================="); - - _last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)"); - _last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)"); - if (_last_satb_filtering_times_ms.sum() > 0.0) { - _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)"); - } - _last_update_rs_times_ms.print(2, "Update RS (ms)"); - _last_update_rs_processed_buffers.print(3, "Processed Buffers"); - _last_scan_rs_times_ms.print(2, "Scan RS (ms)"); - _last_strong_code_root_scan_times_ms.print(2, "Code Root Scanning (ms)"); - _last_obj_copy_times_ms.print(2, "Object Copy (ms)"); - _last_termination_times_ms.print(2, "Termination (ms)"); - if (G1Log::finest()) { - _last_termination_attempts.print(3, "Termination Attempts"); - } - _last_gc_worker_other_times_ms.print(2, "GC Worker Other (ms)"); - _last_gc_worker_times_ms.print(2, "GC Worker Total (ms)"); - _last_gc_worker_end_times_ms.print(2, "GC Worker End (ms)"); - print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms); if (G1StringDedup::is_enabled()) { @@ -530,8 +355,6 @@ _gc_phases[i]->print(2); } } - _cur_string_dedup_queue_fixup_worker_times_ms.print(2, "Queue Fixup (ms)"); - _cur_string_dedup_table_fixup_worker_times_ms.print(2, "Table Fixup (ms)"); } print_stats(1, "Clear CT", _cur_clear_ct_time_ms); double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms(); @@ -557,8 +380,6 @@ print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms); if (G1Log::finest()) { _gc_phases[REDIRTY_CARDS]->print(3); - _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty"); - _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards"); } if (G1EagerReclaimHumongousObjects) { print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -50,12 +50,8 @@ double _average; void log_value(LineBuffer& buf, T value); - void log_value_old(LineBuffer& buf, T value); public: - WorkerDataArray(uint length); - WorkerDataArray(uint length, bool print_sum); - WorkerDataArray(uint length, const char* title); WorkerDataArray(uint length, const char* title, bool print_sum); @@ -79,6 +75,11 @@ _sub_count->set(worker_i, value); } + size_t sub_count_sum() { + assert(_sub_count != NULL, "No sub count"); + return _sub_count->sum(); + } + T get(uint worker_i) { assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); assert(_data[worker_i] != WorkerDataArray::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); @@ -106,7 +107,6 @@ return _sum; } - void print(int level, const char* title); void print(int level); void reset() PRODUCT_RETURN; @@ -119,6 +119,7 @@ private: void calculate_totals(){ + assert(_enabled, "No data to base calculations on"); _sum = (T)0; for (uint i = 0; i < _length; ++i) { _sum += _data[i]; @@ -161,20 +162,6 @@ static const int _string_dedup_phases_first = STR_DEDUP_QUEUE_FIXUP; static const int _string_dedup_phases_last = STR_DEDUP_TABLE_FIXUP; - WorkerDataArray _last_gc_worker_start_times_ms; - WorkerDataArray _last_ext_root_scan_times_ms; - WorkerDataArray _last_satb_filtering_times_ms; - WorkerDataArray _last_update_rs_times_ms; - WorkerDataArray _last_update_rs_processed_buffers; - WorkerDataArray _last_scan_rs_times_ms; - WorkerDataArray _last_strong_code_root_scan_times_ms; - WorkerDataArray _last_obj_copy_times_ms; - WorkerDataArray _last_termination_times_ms; - WorkerDataArray _last_termination_attempts; - WorkerDataArray _last_gc_worker_end_times_ms; - WorkerDataArray _last_gc_worker_times_ms; - WorkerDataArray _last_gc_worker_other_times_ms; - double _cur_collection_par_time_ms; double _cur_collection_code_root_fixup_time_ms; double _cur_strong_code_root_purge_time_ms; @@ -183,10 +170,7 @@ double _cur_evac_fail_restore_remsets; double _cur_evac_fail_remove_self_forwards; - double _cur_string_dedup_fixup_time_ms; - - WorkerDataArray _cur_string_dedup_queue_fixup_worker_times_ms; - WorkerDataArray _cur_string_dedup_table_fixup_worker_times_ms; + double _cur_string_dedup_fixup_time_ms; double _cur_clear_ct_time_ms; double _cur_ref_proc_time_ms; @@ -198,8 +182,6 @@ double _recorded_young_cset_choice_time_ms; double _recorded_non_young_cset_choice_time_ms; - WorkerDataArray _last_redirty_logged_cards_time_ms; - WorkerDataArray _last_redirty_logged_cards_processed_cards; double _recorded_redirty_logged_cards_time_ms; double _recorded_young_free_cset_time_ms; @@ -221,7 +203,7 @@ public: G1GCPhaseTimes(uint max_gc_threads); - void note_gc_start(uint active_gc_threads); + void note_gc_start(uint active_gc_threads, bool mark_in_progress); void note_gc_end(); void print(double pause_time_sec); @@ -245,50 +227,16 @@ _gc_phases[phase]->disable(); } - - void record_gc_worker_start_time(uint worker_i, double ms) { - _last_gc_worker_start_times_ms.set(worker_i, ms); + bool enabled(GCPhases phase) { + return _gc_phases[phase]->enabled(); } - void record_ext_root_scan_time(uint worker_i, double ms) { - _last_ext_root_scan_times_ms.set(worker_i, ms); + double average_time(GCPhases phase) { + return _gc_phases[phase]->average(); } - void record_satb_filtering_time(uint worker_i, double ms) { - _last_satb_filtering_times_ms.set(worker_i, ms); - } - - void record_update_rs_time(uint worker_i, double ms) { - _last_update_rs_times_ms.set(worker_i, ms); - } - - void record_update_rs_processed_buffers(uint worker_i, size_t processed_buffers) { - _last_update_rs_processed_buffers.set(worker_i, processed_buffers); - } - - void record_scan_rs_time(uint worker_i, double ms) { - _last_scan_rs_times_ms.set(worker_i, ms); - } - - void record_strong_code_root_scan_time(uint worker_i, double ms) { - _last_strong_code_root_scan_times_ms.set(worker_i, ms); - } - - void record_obj_copy_time(uint worker_i, double ms) { - _last_obj_copy_times_ms.set(worker_i, ms); - } - - void add_obj_copy_time(uint worker_i, double ms) { - _last_obj_copy_times_ms.add(worker_i, ms); - } - - void record_termination(uint worker_i, double ms, size_t attempts) { - _last_termination_times_ms.set(worker_i, ms); - _last_termination_attempts.set(worker_i, attempts); - } - - void record_gc_worker_end_time(uint worker_i, double ms) { - _last_gc_worker_end_times_ms.set(worker_i, ms); + size_t sub_count_sum(GCPhases phase) { + return _gc_phases[phase]->sub_count_sum(); } void record_clear_ct_time(double ms) { @@ -319,21 +267,10 @@ _cur_evac_fail_remove_self_forwards = ms; } - void note_string_dedup_fixup_start(); - void note_string_dedup_fixup_end(); - void record_string_dedup_fixup_time(double ms) { _cur_string_dedup_fixup_time_ms = ms; } - void record_string_dedup_queue_fixup_worker_time(uint worker_id, double ms) { - _cur_string_dedup_queue_fixup_worker_times_ms.set(worker_id, ms); - } - - void record_string_dedup_table_fixup_worker_time(uint worker_id, double ms) { - _cur_string_dedup_table_fixup_worker_times_ms.set(worker_id, ms); - } - void record_ref_proc_time(double ms) { _cur_ref_proc_time_ms = ms; } @@ -373,14 +310,6 @@ _recorded_non_young_cset_choice_time_ms = time_ms; } - void record_redirty_logged_cards_time_ms(uint worker_i, double time_ms) { - _last_redirty_logged_cards_time_ms.set(worker_i, time_ms); - } - - void record_redirty_logged_cards_processed_cards(uint worker_i, size_t processed_buffers) { - _last_redirty_logged_cards_processed_cards.set(worker_i, processed_buffers); - } - void record_redirty_logged_cards_time_ms(double time_ms) { _recorded_redirty_logged_cards_time_ms = time_ms; } @@ -434,38 +363,6 @@ double fast_reclaim_humongous_time_ms() { return _cur_fast_reclaim_humongous_time_ms; } - - double average_last_update_rs_time() { - return _last_update_rs_times_ms.average(); - } - - int sum_last_update_rs_processed_buffers() { - return _last_update_rs_processed_buffers.sum(); - } - - double average_last_scan_rs_time(){ - return _last_scan_rs_times_ms.average(); - } - - double average_last_strong_code_root_scan_time(){ - return _last_strong_code_root_scan_times_ms.average(); - } - - double average_last_obj_copy_time() { - return _last_obj_copy_times_ms.average(); - } - - double average_last_termination_time() { - return _last_termination_times_ms.average(); - } - - double average_last_ext_root_scan_time() { - return _last_ext_root_scan_times_ms.average(); - } - - double average_last_satb_filtering_times_ms() { - return _last_satb_filtering_times_ms.average(); - } }; class G1GCPhaseTimesTracker : public StackObj { diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -248,10 +248,7 @@ assert(_cards_scanned != NULL, "invariant"); _cards_scanned[worker_i] = scanRScl.cards_done(); - _g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); _g1p->phase_times()->record_time(G1GCPhaseTimes::SCAN_RS, worker_i, scan_rs_time_sec); - _g1p->phase_times()->record_strong_code_root_scan_time(worker_i, - scanRScl.strong_code_root_scan_time_sec() * 1000.0); _g1p->phase_times()->record_time(G1GCPhaseTimes::CODE_ROOTS, worker_i, scanRScl.strong_code_root_scan_time_sec()); } @@ -290,13 +287,10 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) { G1GCPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UPDATE_RS, worker_i); - double start = os::elapsedTime(); // Apply the given closure to all remaining log entries. RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i); - - _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); } void G1RemSet::cleanupHRRS() { diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp --- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp @@ -139,20 +139,17 @@ double table_fixup_start = os::elapsedTime(); G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id); - double queue_fixup_time_sec = (table_fixup_start - queue_fixup_start); - double table_fixup_time_sec = (os::elapsedTime() - table_fixup_start); + double queue_fixup_time_sec = table_fixup_start - queue_fixup_start; + double table_fixup_time_sec = os::elapsedTime() - table_fixup_start; G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); g1p->phase_times()->record_time(G1GCPhaseTimes::STR_DEDUP_QUEUE_FIXUP, worker_id, queue_fixup_time_sec); g1p->phase_times()->record_time(G1GCPhaseTimes::STR_DEDUP_TABLE_FIXUP, worker_id, table_fixup_time_sec); - g1p->phase_times()->record_string_dedup_queue_fixup_worker_time(worker_id, queue_fixup_time_sec * 1000.0); - g1p->phase_times()->record_string_dedup_table_fixup_worker_time(worker_id, table_fixup_time_sec * 1000.0); } }; void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool allow_resize_and_rehash) { assert(is_enabled(), "String deduplication not enabled"); G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); - g1p->phase_times()->note_string_dedup_fixup_start(); double fixup_start = os::elapsedTime(); G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash); @@ -163,7 +160,6 @@ double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0; g1p->phase_times()->record_string_dedup_fixup_time(fixup_time_ms); - g1p->phase_times()->note_string_dedup_fixup_end(); } void G1StringDedup::threads_do(ThreadClosure* tc) { # HG changeset patch # User brutisso # Date 1424774313 -3600 # Tue Feb 24 11:38:33 2015 +0100 # Node ID 319ed87fe9b8278438cce50548eb573cdd96f296 # Parent 6381f79fdd9bc65c221ce8bc9329bbbcb9ebeffc [mq]: rename diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -2210,7 +2210,7 @@ while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { n_completed_buffers++; } - g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::UPDATE_RS, worker_i, n_completed_buffers); + g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers); dcqs.clear_n_completed_buffers(); assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); } @@ -4486,7 +4486,7 @@ void work(uint worker_id) { if (worker_id >= _n_workers) return; // no work needed this round - _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GC_WORKER_START, worker_id, os::elapsedTime()); + _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime()); { ResourceMark rm; @@ -4568,9 +4568,9 @@ evac.do_void(); double elapsed_sec = os::elapsedTime() - start; double term_sec = pss.term_time(); - _g1h->g1_policy()->phase_times()->add_time(G1GCPhaseTimes::OBJ_COPY, worker_id, elapsed_sec - term_sec); - _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::TERMINATION, worker_id, term_sec); - _g1h->g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::TERMINATION, worker_id, pss.term_attempts()); + _g1h->g1_policy()->phase_times()->add_time(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec); + _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::Termination, worker_id, term_sec); + _g1h->g1_policy()->phase_times()->record_sub_count(G1GCPhaseTimes::Termination, worker_id, pss.term_attempts()); } _g1h->g1_policy()->record_thread_age_table(pss.age_table()); _g1h->update_surviving_young_words(pss.surviving_young_words()+1); @@ -4586,7 +4586,7 @@ // destructors are executed here and are included as part of the // "GC Worker Time". } - _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GC_WORKER_END, worker_id, os::elapsedTime()); + _g1h->g1_policy()->phase_times()->record_time(G1GCPhaseTimes::GCWorkerEnd, worker_id, os::elapsedTime()); } }; @@ -4648,20 +4648,20 @@ double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds() + buf_scan_non_heap_weak_roots.closure_app_seconds(); - g1_policy()->phase_times()->record_time(G1GCPhaseTimes::OBJ_COPY, worker_i, obj_copy_time_sec); + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec); double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec; - g1_policy()->phase_times()->record_time(G1GCPhaseTimes::EXT_ROOT_SCAN, worker_i, ext_root_time_sec); + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec); // During conc marking we have to filter the per-thread SATB buffers // to make sure we remove any oops into the CSet (which will show up // as implicitly live). if (mark_in_progress()) { if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) { - G1GCPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATB_FILTERING, worker_i); + G1GCPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i); JavaThread::satb_mark_queue_set().filter_thread_buffers(); } else { - g1_policy()->phase_times()->record_time(G1GCPhaseTimes::SATB_FILTERING, worker_i, 0.0); + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::SATBFiltering, worker_i, 0.0); } } @@ -5067,12 +5067,12 @@ virtual void work(uint worker_id) { G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times(); - G1GCPhaseTimesTracker x(timer, G1GCPhaseTimes::REDIRTY_CARDS, worker_id); + G1GCPhaseTimesTracker x(timer, G1GCPhaseTimes::RedirtyCards, worker_id); RedirtyLoggedCardTableEntryClosure cl; _queue->par_apply_closure_to_all_completed_buffers(&cl); - timer->record_sub_count(G1GCPhaseTimes::REDIRTY_CARDS, worker_id, cl.num_processed()); + timer->record_sub_count(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed()); } }; diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -1076,7 +1076,7 @@ if (update_stats) { double cost_per_card_ms = 0.0; if (_pending_cards > 0) { - cost_per_card_ms = phase_times()->average_time(G1GCPhaseTimes::UPDATE_RS) / (double) _pending_cards; + cost_per_card_ms = phase_times()->average_time(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards; _cost_per_card_ms_seq->add(cost_per_card_ms); } @@ -1084,7 +1084,7 @@ double cost_per_entry_ms = 0.0; if (cards_scanned > 10) { - cost_per_entry_ms = phase_times()->average_time(G1GCPhaseTimes::SCAN_RS) / (double) cards_scanned; + cost_per_entry_ms = phase_times()->average_time(G1GCPhaseTimes::ScanRS) / (double) cards_scanned; if (_last_gc_was_young) { _cost_per_entry_ms_seq->add(cost_per_entry_ms); } else { @@ -1126,7 +1126,7 @@ double cost_per_byte_ms = 0.0; if (copied_bytes > 0) { - cost_per_byte_ms = phase_times()->average_time(G1GCPhaseTimes::OBJ_COPY) / (double) copied_bytes; + cost_per_byte_ms = phase_times()->average_time(G1GCPhaseTimes::ObjCopy) / (double) copied_bytes; if (_in_marking_window) { _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms); } else { @@ -1135,8 +1135,8 @@ } double all_other_time_ms = pause_time_ms - - (phase_times()->average_time(G1GCPhaseTimes::UPDATE_RS) + phase_times()->average_time(G1GCPhaseTimes::SCAN_RS) - + phase_times()->average_time(G1GCPhaseTimes::OBJ_COPY) + phase_times()->average_time(G1GCPhaseTimes::TERMINATION)); + (phase_times()->average_time(G1GCPhaseTimes::UpdateRS) + phase_times()->average_time(G1GCPhaseTimes::ScanRS) + + phase_times()->average_time(G1GCPhaseTimes::ObjCopy) + phase_times()->average_time(G1GCPhaseTimes::Termination)); double young_other_time_ms = 0.0; if (young_cset_region_length() > 0) { @@ -1177,8 +1177,8 @@ // Note that _mmu_tracker->max_gc_time() returns the time in seconds. double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0; - adjust_concurrent_refinement(phase_times()->average_time(G1GCPhaseTimes::UPDATE_RS), - phase_times()->sub_count_sum(G1GCPhaseTimes::UPDATE_RS), update_rs_time_goal_ms); + adjust_concurrent_refinement(phase_times()->average_time(G1GCPhaseTimes::UpdateRS), + phase_times()->sub_count_sum(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms); _collectionSetChooser->verify(); } @@ -2120,23 +2120,23 @@ _other.add(pause_time_ms - phase_times->accounted_time_ms()); _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); _parallel.add(phase_times->cur_collection_par_time_ms()); - _ext_root_scan.add(phase_times->average_time(G1GCPhaseTimes::EXT_ROOT_SCAN)); - if (phase_times->enabled(G1GCPhaseTimes::SATB_FILTERING)) { - _satb_filtering.add(phase_times->average_time(G1GCPhaseTimes::SATB_FILTERING)); + _ext_root_scan.add(phase_times->average_time(G1GCPhaseTimes::ExtRootScan)); + if (phase_times->enabled(G1GCPhaseTimes::SATBFiltering)) { + _satb_filtering.add(phase_times->average_time(G1GCPhaseTimes::SATBFiltering)); } - _update_rs.add(phase_times->average_time(G1GCPhaseTimes::UPDATE_RS)); - _scan_rs.add(phase_times->average_time(G1GCPhaseTimes::SCAN_RS)); - _obj_copy.add(phase_times->average_time(G1GCPhaseTimes::OBJ_COPY)); - _termination.add(phase_times->average_time(G1GCPhaseTimes::TERMINATION)); + _update_rs.add(phase_times->average_time(G1GCPhaseTimes::UpdateRS)); + _scan_rs.add(phase_times->average_time(G1GCPhaseTimes::ScanRS)); + _obj_copy.add(phase_times->average_time(G1GCPhaseTimes::ObjCopy)); + _termination.add(phase_times->average_time(G1GCPhaseTimes::Termination)); - double parallel_known_time = phase_times->average_time(G1GCPhaseTimes::EXT_ROOT_SCAN) + - phase_times->average_time(G1GCPhaseTimes::UPDATE_RS) + - phase_times->average_time(G1GCPhaseTimes::SCAN_RS) + - phase_times->average_time(G1GCPhaseTimes::OBJ_COPY) + - phase_times->average_time(G1GCPhaseTimes::TERMINATION); + double parallel_known_time = phase_times->average_time(G1GCPhaseTimes::ExtRootScan) + + phase_times->average_time(G1GCPhaseTimes::UpdateRS) + + phase_times->average_time(G1GCPhaseTimes::ScanRS) + + phase_times->average_time(G1GCPhaseTimes::ObjCopy) + + phase_times->average_time(G1GCPhaseTimes::Termination); - if (phase_times->enabled(G1GCPhaseTimes::SATB_FILTERING)) { - parallel_known_time += phase_times->average_time(G1GCPhaseTimes::SATB_FILTERING); + if (phase_times->enabled(G1GCPhaseTimes::SATBFiltering)) { + parallel_known_time += phase_times->average_time(G1GCPhaseTimes::SATBFiltering); } double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -219,30 +219,30 @@ { assert(max_gc_threads > 0, "Must have some GC threads"); - _gc_phases[GC_WORKER_START] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false); - _gc_phases[EXT_ROOT_SCAN] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)"); - _gc_phases[SATB_FILTERING] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)"); - _gc_phases[UPDATE_RS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)"); - _gc_phases[SCAN_RS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)"); - _gc_phases[CODE_ROOTS] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)"); - _gc_phases[OBJ_COPY] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)"); - _gc_phases[TERMINATION] = new WorkerDataArray(max_gc_threads, "Termination (ms)"); - _gc_phases[GC_WORKER_TOTAL] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)"); - _gc_phases[GC_WORKER_END] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false); - _gc_phases[OTHER] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)"); + _gc_phases[GCWorkerStart] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false); + _gc_phases[ExtRootScan] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)"); + _gc_phases[SATBFiltering] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)"); + _gc_phases[UpdateRS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)"); + _gc_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)"); + _gc_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)"); + _gc_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)"); + _gc_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms)"); + _gc_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)"); + _gc_phases[GCWorkerEnd] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false); + _gc_phases[Other] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)"); _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); - _gc_phases[UPDATE_RS]->link_sub_count_array(_update_rs_processed_buffers); + _gc_phases[UpdateRS]->link_sub_count_array(_update_rs_processed_buffers); _termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts"); - _gc_phases[TERMINATION]->link_sub_count_array(_termination_attempts); + _gc_phases[Termination]->link_sub_count_array(_termination_attempts); - _gc_phases[STR_DEDUP_QUEUE_FIXUP] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)"); - _gc_phases[STR_DEDUP_TABLE_FIXUP] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)"); + _gc_phases[StringDedupQueueFixup] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)"); + _gc_phases[StringDedupTableFixup] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)"); - _gc_phases[REDIRTY_CARDS] = new WorkerDataArray(max_gc_threads, "Parallel Redirty"); + _gc_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty"); _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards"); - _gc_phases[REDIRTY_CARDS]->link_sub_count_array(_redirtied_cards); + _gc_phases[RedirtyCards]->link_sub_count_array(_redirtied_cards); } void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) { @@ -259,35 +259,35 @@ } if (mark_in_progress) { - _gc_phases[SATB_FILTERING]->enable(); + _gc_phases[SATBFiltering]->enable(); } else { - _gc_phases[SATB_FILTERING]->disable(); + _gc_phases[SATBFiltering]->disable(); } if (!G1StringDedup::is_enabled()) { - _gc_phases[STR_DEDUP_QUEUE_FIXUP]->disable(); - _gc_phases[STR_DEDUP_TABLE_FIXUP]->disable(); + _gc_phases[StringDedupQueueFixup]->disable(); + _gc_phases[StringDedupTableFixup]->disable(); } } void G1GCPhaseTimes::note_gc_end() { for (uint i = 0; i < _active_gc_threads; i++) { - double worker_time = _gc_phases[GC_WORKER_END]->get(i) - _gc_phases[GC_WORKER_START]->get(i); - record_time(GC_WORKER_TOTAL, i , worker_time); + double worker_time = _gc_phases[GCWorkerEnd]->get(i) - _gc_phases[GCWorkerStart]->get(i); + record_time(GCWorkerTotal, i , worker_time); double worker_known_time = - _gc_phases[EXT_ROOT_SCAN]->get(i) + - _gc_phases[UPDATE_RS]->get(i) + - _gc_phases[SCAN_RS]->get(i) + - _gc_phases[CODE_ROOTS]->get(i) + - _gc_phases[OBJ_COPY]->get(i) + - _gc_phases[TERMINATION]->get(i); + _gc_phases[ExtRootScan]->get(i) + + _gc_phases[UpdateRS]->get(i) + + _gc_phases[ScanRS]->get(i) + + _gc_phases[CodeRoots]->get(i) + + _gc_phases[ObjCopy]->get(i) + + _gc_phases[Termination]->get(i); - if (_gc_phases[SATB_FILTERING]->enabled()) { - worker_known_time += _gc_phases[SATB_FILTERING]->get(i); + if (_gc_phases[SATBFiltering]->enabled()) { + worker_known_time += _gc_phases[SATBFiltering]->get(i); } - record_time(OTHER, i, worker_time - worker_known_time); + record_time(Other, i, worker_time - worker_known_time); } for (int i = 0; i < Sentinel; i++) { @@ -340,7 +340,7 @@ } print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); - for (int i = 0; i <= _gc_main_phases_last; i++) { + for (int i = 0; i <= GCMainPhasesLast; i++) { if (_gc_phases[i]->enabled()) { _gc_phases[i]->print(2); } @@ -350,7 +350,7 @@ print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms); if (G1StringDedup::is_enabled()) { print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads); - for (int i = _string_dedup_phases_first; i <= _string_dedup_phases_last; i++) { + for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) { if (_gc_phases[i]->enabled()) { _gc_phases[i]->print(2); } @@ -379,7 +379,7 @@ print_stats(2, "Ref Enq", _cur_ref_enq_time_ms); print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms); if (G1Log::finest()) { - _gc_phases[REDIRTY_CARDS]->print(3); + _gc_phases[RedirtyCards]->print(3); } if (G1EagerReclaimHumongousObjects) { print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -135,33 +135,34 @@ public: enum GCPhases { - GC_WORKER_START, - EXT_ROOT_SCAN, - SATB_FILTERING, - UPDATE_RS, - SCAN_RS, - CODE_ROOTS, - OBJ_COPY, - TERMINATION, - OTHER, - GC_WORKER_TOTAL, - GC_WORKER_END, - STR_DEDUP_QUEUE_FIXUP, - STR_DEDUP_TABLE_FIXUP, - REDIRTY_CARDS, + GCWorkerStart, + ExtRootScan, + SATBFiltering, + UpdateRS, + ScanRS, + CodeRoots, + ObjCopy, + Termination, + Other, + GCWorkerTotal, + GCWorkerEnd, + StringDedupQueueFixup, + StringDedupTableFixup, + RedirtyCards, Sentinel }; private: + // Markers for grouping the phases in the GCPhases enum above + static const int GCMainPhasesLast = GCWorkerEnd; + static const int StringDedupPhasesFirst = StringDedupQueueFixup; + static const int StringDedupPhasesLast = StringDedupTableFixup; + WorkerDataArray* _gc_phases[Sentinel]; WorkerDataArray* _update_rs_processed_buffers; WorkerDataArray* _termination_attempts; WorkerDataArray* _redirtied_cards; - static const int _gc_main_phases_last = GC_WORKER_END; - static const int _string_dedup_phases_first = STR_DEDUP_QUEUE_FIXUP; - static const int _string_dedup_phases_last = STR_DEDUP_TABLE_FIXUP; - double _cur_collection_par_time_ms; double _cur_collection_code_root_fixup_time_ms; double _cur_strong_code_root_purge_time_ms; diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -248,8 +248,8 @@ assert(_cards_scanned != NULL, "invariant"); _cards_scanned[worker_i] = scanRScl.cards_done(); - _g1p->phase_times()->record_time(G1GCPhaseTimes::SCAN_RS, worker_i, scan_rs_time_sec); - _g1p->phase_times()->record_time(G1GCPhaseTimes::CODE_ROOTS, worker_i, scanRScl.strong_code_root_scan_time_sec()); + _g1p->phase_times()->record_time(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec); + _g1p->phase_times()->record_time(G1GCPhaseTimes::CodeRoots, worker_i, scanRScl.strong_code_root_scan_time_sec()); } // Closure used for updating RSets and recording references that @@ -286,7 +286,7 @@ }; void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) { - G1GCPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UPDATE_RS, worker_i); + G1GCPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i); // Apply the given closure to all remaining log entries. RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp --- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp @@ -142,8 +142,8 @@ double queue_fixup_time_sec = table_fixup_start - queue_fixup_start; double table_fixup_time_sec = os::elapsedTime() - table_fixup_start; G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy(); - g1p->phase_times()->record_time(G1GCPhaseTimes::STR_DEDUP_QUEUE_FIXUP, worker_id, queue_fixup_time_sec); - g1p->phase_times()->record_time(G1GCPhaseTimes::STR_DEDUP_TABLE_FIXUP, worker_id, table_fixup_time_sec); + g1p->phase_times()->record_time(G1GCPhaseTimes::StringDedupQueueFixup, worker_id, queue_fixup_time_sec); + g1p->phase_times()->record_time(G1GCPhaseTimes::StringDedupTableFixup, worker_id, table_fixup_time_sec); } }; # HG changeset patch # User brutisso # Date 1424859649 -3600 # Wed Feb 25 11:20:49 2015 +0100 # Node ID 45b00522de9e1611e1161e3218dcf58c1fb3f4a7 # Parent 319ed87fe9b8278438cce50548eb573cdd96f296 [mq]: levels diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -84,16 +84,8 @@ }; template -WorkerDataArray::WorkerDataArray(uint length, const char* title) : - _title(title), _length(0), _print_sum(true), _has_new_data(true), _sub_count(NULL), _enabled(true) { - assert(length > 0, "Must have some workers to store data for"); - _length = length; - _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); -} - -template -WorkerDataArray::WorkerDataArray(uint length, const char* title, bool print_sum) : - _title(title), _length(0), _print_sum(print_sum), _has_new_data(true), _sub_count(NULL), _enabled(true) { +WorkerDataArray::WorkerDataArray(uint length, const char* title, bool print_sum, uint indent_level) : + _title(title), _length(0), _print_sum(print_sum), _indent_level(indent_level), _has_new_data(true), _sub_count(NULL), _enabled(true) { assert(length > 0, "Must have some workers to store data for"); _length = length; _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); @@ -127,10 +119,10 @@ PRAGMA_DIAG_PUSH PRAGMA_FORMAT_NONLITERAL_IGNORED template -void WorkerDataArray::print(int level) { +void WorkerDataArray::print() { if (_length == 1) { // No need for min, max, average and sum for only one worker - LineBuffer buf(level); + LineBuffer buf(_indent_level); buf.append("[%s: ", _title); log_value(buf, _data[0]); buf.append_and_print_cr("]"); @@ -141,7 +133,7 @@ T max = _data[0]; T sum = 0; - LineBuffer buf(level); + LineBuffer buf(_indent_level); buf.append("[%s:", _title); for (uint i = 0; i < _length; ++i) { T val = _data[i]; @@ -176,17 +168,13 @@ buf.append_and_print_cr("]"); if (_sub_count != NULL && _sub_count->enabled()) { - _sub_count->print(level + 1); + _sub_count->print(); } } PRAGMA_DIAG_POP #ifndef PRODUCT -//template <> const int WorkerDataArray::_uninitialized = -1; -//template <> const double WorkerDataArray::_uninitialized = -1.0; -//template <> const size_t WorkerDataArray::_uninitialized = (size_t)-1; - template void WorkerDataArray::reset() { for (uint i = 0; i < _length; i++) { @@ -219,29 +207,29 @@ { assert(max_gc_threads > 0, "Must have some GC threads"); - _gc_phases[GCWorkerStart] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false); - _gc_phases[ExtRootScan] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)"); - _gc_phases[SATBFiltering] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)"); - _gc_phases[UpdateRS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)"); - _gc_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)"); - _gc_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)"); - _gc_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)"); - _gc_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms)"); - _gc_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)"); - _gc_phases[GCWorkerEnd] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false); - _gc_phases[Other] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)"); + _gc_phases[GCWorkerStart] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false, 2); + _gc_phases[ExtRootScan] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)", true, 2); + _gc_phases[SATBFiltering] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)", true, 2); + _gc_phases[UpdateRS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)", true, 2); + _gc_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)", true, 2); + _gc_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)", true, 2); + _gc_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)", true, 2); + _gc_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms)", true, 2); + _gc_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)", true, 2); + _gc_phases[GCWorkerEnd] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false, 2); + _gc_phases[Other] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)", true, 2); - _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers"); + _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers", true, 3); _gc_phases[UpdateRS]->link_sub_count_array(_update_rs_processed_buffers); - _termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts"); + _termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts", true, 3); _gc_phases[Termination]->link_sub_count_array(_termination_attempts); - _gc_phases[StringDedupQueueFixup] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)"); - _gc_phases[StringDedupTableFixup] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)"); + _gc_phases[StringDedupQueueFixup] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)", true, 2); + _gc_phases[StringDedupTableFixup] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)", true, 2); - _gc_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty"); - _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards"); + _gc_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty", true, 3); + _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards", true, 3); _gc_phases[RedirtyCards]->link_sub_count_array(_redirtied_cards); } @@ -342,7 +330,7 @@ print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); for (int i = 0; i <= GCMainPhasesLast; i++) { if (_gc_phases[i]->enabled()) { - _gc_phases[i]->print(2); + _gc_phases[i]->print(); } } @@ -352,7 +340,7 @@ print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads); for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) { if (_gc_phases[i]->enabled()) { - _gc_phases[i]->print(2); + _gc_phases[i]->print(); } } } @@ -379,7 +367,7 @@ print_stats(2, "Ref Enq", _cur_ref_enq_time_ms); print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms); if (G1Log::finest()) { - _gc_phases[RedirtyCards]->print(3); + _gc_phases[RedirtyCards]->print(); } if (G1EagerReclaimHumongousObjects) { print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -35,6 +35,7 @@ uint _length; const char* _title; bool _print_sum; + uint _indent_level; bool _enabled; WorkerDataArray* _sub_count; @@ -52,8 +53,7 @@ void log_value(LineBuffer& buf, T value); public: - WorkerDataArray(uint length, const char* title); - WorkerDataArray(uint length, const char* title, bool print_sum); + WorkerDataArray(uint length, const char* title, bool print_sum, uint indent_level); ~WorkerDataArray(); @@ -107,7 +107,7 @@ return _sum; } - void print(int level); + void print(); void reset() PRODUCT_RETURN; void verify() PRODUCT_RETURN; # HG changeset patch # User brutisso # Date 1424859864 -3600 # Wed Feb 25 11:24:24 2015 +0100 # Node ID 5a0f4b493e83ae13460ac734b86bf17b604a6b12 # Parent 45b00522de9e1611e1161e3218dcf58c1fb3f4a7 [mq]: PRAGMA_FORMAT diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -116,8 +116,6 @@ return -3.0; } -PRAGMA_DIAG_PUSH -PRAGMA_FORMAT_NONLITERAL_IGNORED template void WorkerDataArray::print() { if (_length == 1) { @@ -171,7 +169,6 @@ _sub_count->print(); } } -PRAGMA_DIAG_POP #ifndef PRODUCT # HG changeset patch # User brutisso # Date 1424865203 -3600 # Wed Feb 25 12:53:23 2015 +0100 # Node ID 6b8c7d8d8fec2e4e8d76271ff5a7920fa9967b2f # Parent 5a0f4b493e83ae13460ac734b86bf17b604a6b12 [mq]: assertfix diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -186,8 +186,7 @@ void WorkerDataArray::verify() { for (uint i = 0; i < _length; i++) { assert(_data[i] != WorkerDataArray::uninitialized(), - err_msg("Invalid data for worker %u, data: %lf, uninitialized: %lf", - i, (double)_data[i], (double)WorkerDataArray::uninitialized())); + err_msg("Invalid data for worker %u in '%s'", i, _title)); } if (_sub_count != NULL) { _sub_count->verify(); # HG changeset patch # User brutisso # Date 1424868607 -3600 # Wed Feb 25 13:50:07 2015 +0100 # Node ID afa642a38f8e961400bb8bf3b3d399e45d7a9c96 # Parent 6b8c7d8d8fec2e4e8d76271ff5a7920fa9967b2f [mq]: fixdefaults diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -108,12 +108,12 @@ template <> size_t WorkerDataArray::uninitialized() { - return (size_t)-5; + return (size_t)-1; } template <> double WorkerDataArray::uninitialized() { - return -3.0; + return -1.0; } template # HG changeset patch # User brutisso # Date 1425040965 -3600 # Fri Feb 27 13:42:45 2015 +0100 # Node ID 156b733c58631b3b6e152832b1c6d4ab5e1d089b # Parent afa642a38f8e961400bb8bf3b3d399e45d7a9c96 [mq]: satbfix diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -2123,6 +2123,8 @@ _ext_root_scan.add(phase_times->average_time(G1GCPhaseTimes::ExtRootScan)); if (phase_times->enabled(G1GCPhaseTimes::SATBFiltering)) { _satb_filtering.add(phase_times->average_time(G1GCPhaseTimes::SATBFiltering)); + } else { + _satb_filtering.add(0.0); } _update_rs.add(phase_times->average_time(G1GCPhaseTimes::UpdateRS)); _scan_rs.add(phase_times->average_time(G1GCPhaseTimes::ScanRS)); # HG changeset patch # User brutisso # Date 1425041971 -3600 # Fri Feb 27 13:59:31 2015 +0100 # Node ID 874de78bc5346d309647e1458111c4284e94b793 # Parent 156b733c58631b3b6e152832b1c6d4ab5e1d089b [mq]: firstLogLevelAttempt diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -84,8 +84,9 @@ }; template -WorkerDataArray::WorkerDataArray(uint length, const char* title, bool print_sum, uint indent_level) : - _title(title), _length(0), _print_sum(print_sum), _indent_level(indent_level), _has_new_data(true), _sub_count(NULL), _enabled(true) { +WorkerDataArray::WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level) : + _title(title), _length(0), _print_sum(print_sum), _log_level(log_level), _indent_level(indent_level), + _has_new_data(true), _sub_count(NULL), _enabled(true) { assert(length > 0, "Must have some workers to store data for"); _length = length; _data = NEW_C_HEAP_ARRAY(T, _length, mtGC); @@ -170,16 +171,26 @@ } } +template +void WorkerDataArray::reset() { + if (_log_level <= G1Log::level()) { + enable(); + reset_array(); + } else { + disable(); + } + if (_sub_count != NULL) { + _sub_count->reset(); + } +} + #ifndef PRODUCT template -void WorkerDataArray::reset() { +void WorkerDataArray::reset_array() { for (uint i = 0; i < _length; i++) { _data[i] = WorkerDataArray::uninitialized(); } - if (_sub_count != NULL) { - _sub_count->reset(); - } } template @@ -203,29 +214,31 @@ { assert(max_gc_threads > 0, "Must have some GC threads"); - _gc_phases[GCWorkerStart] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false, 2); - _gc_phases[ExtRootScan] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)", true, 2); - _gc_phases[SATBFiltering] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)", true, 2); - _gc_phases[UpdateRS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)", true, 2); - _gc_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)", true, 2); - _gc_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)", true, 2); - _gc_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)", true, 2); - _gc_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms)", true, 2); - _gc_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)", true, 2); - _gc_phases[GCWorkerEnd] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false, 2); - _gc_phases[Other] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)", true, 2); + // Phases with G1Log::LevelNone are used for collecting statistics and need to be enabled + // independently of the the GC logging. + _gc_phases[GCWorkerStart] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2); + _gc_phases[ExtRootScan] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelNone, 2); + _gc_phases[SATBFiltering] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFiner, 2); + _gc_phases[UpdateRS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)", true, G1Log::LevelNone, 2); + _gc_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelNone, 2); + _gc_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2); + _gc_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelNone, 2); + _gc_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms)", true, G1Log::LevelNone, 2); + _gc_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2); + _gc_phases[GCWorkerEnd] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2); + _gc_phases[Other] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2); - _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers", true, 3); + _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers", true, G1Log::LevelNone, 3); _gc_phases[UpdateRS]->link_sub_count_array(_update_rs_processed_buffers); - _termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts", true, 3); + _termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3); _gc_phases[Termination]->link_sub_count_array(_termination_attempts); - _gc_phases[StringDedupQueueFixup] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)", true, 2); - _gc_phases[StringDedupTableFixup] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)", true, 2); + _gc_phases[StringDedupQueueFixup] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2); + _gc_phases[StringDedupTableFixup] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2); - _gc_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty", true, 3); - _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards", true, 3); + _gc_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFiner, 3); + _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFiner, 3); _gc_phases[RedirtyCards]->link_sub_count_array(_redirtied_cards); } @@ -238,13 +251,7 @@ _gc_phases[i]->reset(); } - if (!G1Log::finest()) { - _termination_attempts->disable(); - } - - if (mark_in_progress) { - _gc_phases[SATBFiltering]->enable(); - } else { + if (!mark_in_progress) { _gc_phases[SATBFiltering]->disable(); } diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -35,6 +35,7 @@ uint _length; const char* _title; bool _print_sum; + int _log_level; uint _indent_level; bool _enabled; @@ -53,7 +54,7 @@ void log_value(LineBuffer& buf, T value); public: - WorkerDataArray(uint length, const char* title, bool print_sum, uint indent_level); + WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level); ~WorkerDataArray(); @@ -62,10 +63,9 @@ } void set(uint worker_i, T value) { + assert(enabled(), err_msg("Logging value to disabled phase: %s", _title)); assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); - T ui = WorkerDataArray::uninitialized(); - double uii = -3.0; - assert(_data[worker_i] == ui, err_msg("Overwriting data for worker %d", worker_i)); + assert(_data[worker_i] == WorkerDataArray::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title)); _data[worker_i] = value; _has_new_data = true; } @@ -81,12 +81,14 @@ } T get(uint worker_i) { + assert(enabled(), err_msg("Getting value from disabled phase: %s", _title)); assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); assert(_data[worker_i] != WorkerDataArray::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); return _data[worker_i]; } void add(uint worker_i, T value) { + assert(enabled(), err_msg("Adding value to disabled phase: %s", _title)); assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); assert(_data[worker_i] != WorkerDataArray::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); _data[worker_i] += value; @@ -109,12 +111,16 @@ void print(); - void reset() PRODUCT_RETURN; + void reset(); + void reset_array() PRODUCT_RETURN; void verify() PRODUCT_RETURN; void enable() { _enabled = true; } void disable() { _enabled = false; } bool enabled() { return _enabled; } + bool sub_count_enabled() { return _sub_count->enabled(); } + + int log_level() { return _log_level; } private: @@ -209,15 +215,21 @@ void print(double pause_time_sec); void record_time(GCPhases phase, uint worker_i, double time) { - _gc_phases[phase]->set(worker_i, time); + if (_gc_phases[phase]->enabled()) { + _gc_phases[phase]->set(worker_i, time); + } } void add_time(GCPhases phase, uint worker_i, double time) { - _gc_phases[phase]->add(worker_i, time); + if (_gc_phases[phase]->enabled()) { + _gc_phases[phase]->add(worker_i, time); + } } void record_sub_count(GCPhases phase, uint worker_i, size_t count) { - _gc_phases[phase]->set_sub_count(worker_i, count); + if (_gc_phases[phase]->sub_count_enabled()) { + _gc_phases[phase]->set_sub_count(worker_i, count); + } } void enable(GCPhases phase) { diff --git a/src/share/vm/gc_implementation/g1/g1Log.hpp b/src/share/vm/gc_implementation/g1/g1Log.hpp --- a/src/share/vm/gc_implementation/g1/g1Log.hpp +++ b/src/share/vm/gc_implementation/g1/g1Log.hpp @@ -28,6 +28,7 @@ #include "memory/allocation.hpp" class G1Log : public AllStatic { + public: typedef enum { LevelNone, LevelFine, @@ -35,6 +36,7 @@ LevelFinest } LogLevel; + private: static LogLevel _level; public: @@ -50,6 +52,10 @@ return _level == LevelFinest; } + static LogLevel level() { + return _level; + } + static void init(); }; # HG changeset patch # User brutisso # Date 1425046725 -3600 # Fri Feb 27 15:18:45 2015 +0100 # Node ID 4bc8e231ed165c7a1a5540f40c81877fcfacdfae # Parent 874de78bc5346d309647e1458111c4284e94b793 [mq]: thomasComments diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4656,13 +4656,11 @@ // During conc marking we have to filter the per-thread SATB buffers // to make sure we remove any oops into the CSet (which will show up // as implicitly live). - if (mark_in_progress()) { - if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) { - G1GCPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i); - JavaThread::satb_mark_queue_set().filter_thread_buffers(); - } else { - g1_policy()->phase_times()->record_time(G1GCPhaseTimes::SATBFiltering, worker_i, 0.0); - } + if (mark_in_progress() && !_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) { + G1GCPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i); + JavaThread::satb_mark_queue_set().filter_thread_buffers(); + } else { + g1_policy()->phase_times()->record_time(G1GCPhaseTimes::SATBFiltering, worker_i, 0.0); } // Now scan the complement of the collection set. diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp @@ -2121,26 +2121,19 @@ _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms()); _parallel.add(phase_times->cur_collection_par_time_ms()); _ext_root_scan.add(phase_times->average_time(G1GCPhaseTimes::ExtRootScan)); - if (phase_times->enabled(G1GCPhaseTimes::SATBFiltering)) { - _satb_filtering.add(phase_times->average_time(G1GCPhaseTimes::SATBFiltering)); - } else { - _satb_filtering.add(0.0); - } + _satb_filtering.add(phase_times->average_time(G1GCPhaseTimes::SATBFiltering)); _update_rs.add(phase_times->average_time(G1GCPhaseTimes::UpdateRS)); _scan_rs.add(phase_times->average_time(G1GCPhaseTimes::ScanRS)); _obj_copy.add(phase_times->average_time(G1GCPhaseTimes::ObjCopy)); _termination.add(phase_times->average_time(G1GCPhaseTimes::Termination)); double parallel_known_time = phase_times->average_time(G1GCPhaseTimes::ExtRootScan) + + phase_times->average_time(G1GCPhaseTimes::SATBFiltering) + phase_times->average_time(G1GCPhaseTimes::UpdateRS) + phase_times->average_time(G1GCPhaseTimes::ScanRS) + phase_times->average_time(G1GCPhaseTimes::ObjCopy) + phase_times->average_time(G1GCPhaseTimes::Termination); - if (phase_times->enabled(G1GCPhaseTimes::SATBFiltering)) { - parallel_known_time += phase_times->average_time(G1GCPhaseTimes::SATBFiltering); - } - double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time; _parallel_other.add(parallel_other_time); _clear_ct.add(phase_times->cur_clear_ct_time_ms()); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -119,6 +119,10 @@ template void WorkerDataArray::print() { + if (_log_level > G1Log::level() || !_enabled) { + return; + } + if (_length == 1) { // No need for min, max, average and sum for only one worker LineBuffer buf(_indent_level); @@ -166,31 +170,21 @@ } buf.append_and_print_cr("]"); - if (_sub_count != NULL && _sub_count->enabled()) { + if (_sub_count != NULL) { _sub_count->print(); } } -template -void WorkerDataArray::reset() { - if (_log_level <= G1Log::level()) { - enable(); - reset_array(); - } else { - disable(); - } - if (_sub_count != NULL) { - _sub_count->reset(); - } -} - #ifndef PRODUCT template -void WorkerDataArray::reset_array() { +void WorkerDataArray::reset() { for (uint i = 0; i < _length; i++) { _data[i] = WorkerDataArray::uninitialized(); } + if (_sub_count != NULL) { + _sub_count->reset(); + } } template @@ -214,21 +208,19 @@ { assert(max_gc_threads > 0, "Must have some GC threads"); - // Phases with G1Log::LevelNone are used for collecting statistics and need to be enabled - // independently of the the GC logging. _gc_phases[GCWorkerStart] = new WorkerDataArray(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2); - _gc_phases[ExtRootScan] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelNone, 2); + _gc_phases[ExtRootScan] = new WorkerDataArray(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2); _gc_phases[SATBFiltering] = new WorkerDataArray(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFiner, 2); - _gc_phases[UpdateRS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)", true, G1Log::LevelNone, 2); - _gc_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelNone, 2); + _gc_phases[UpdateRS] = new WorkerDataArray(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2); + _gc_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2); _gc_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2); - _gc_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelNone, 2); - _gc_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms)", true, G1Log::LevelNone, 2); + _gc_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2); + _gc_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms)", true, G1Log::LevelFiner, 2); _gc_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2); _gc_phases[GCWorkerEnd] = new WorkerDataArray(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2); _gc_phases[Other] = new WorkerDataArray(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2); - _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers", true, G1Log::LevelNone, 3); + _update_rs_processed_buffers = new WorkerDataArray(max_gc_threads, "Processed Buffers", true, G1Log::LevelFiner, 3); _gc_phases[UpdateRS]->link_sub_count_array(_update_rs_processed_buffers); _termination_attempts = new WorkerDataArray(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3); @@ -237,8 +229,8 @@ _gc_phases[StringDedupQueueFixup] = new WorkerDataArray(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2); _gc_phases[StringDedupTableFixup] = new WorkerDataArray(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2); - _gc_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFiner, 3); - _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFiner, 3); + _gc_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3); + _redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3); _gc_phases[RedirtyCards]->link_sub_count_array(_redirtied_cards); } @@ -251,14 +243,10 @@ _gc_phases[i]->reset(); } - if (!mark_in_progress) { - _gc_phases[SATBFiltering]->disable(); - } + _gc_phases[SATBFiltering]->set_enabled(mark_in_progress); - if (!G1StringDedup::is_enabled()) { - _gc_phases[StringDedupQueueFixup]->disable(); - _gc_phases[StringDedupTableFixup]->disable(); - } + _gc_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled); + _gc_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled); } void G1GCPhaseTimes::note_gc_end() { @@ -268,23 +256,18 @@ double worker_known_time = _gc_phases[ExtRootScan]->get(i) + + _gc_phases[SATBFiltering]->get(i) + _gc_phases[UpdateRS]->get(i) + _gc_phases[ScanRS]->get(i) + _gc_phases[CodeRoots]->get(i) + _gc_phases[ObjCopy]->get(i) + _gc_phases[Termination]->get(i); - if (_gc_phases[SATBFiltering]->enabled()) { - worker_known_time += _gc_phases[SATBFiltering]->get(i); - } - record_time(Other, i, worker_time - worker_known_time); } for (int i = 0; i < Sentinel; i++) { - if (_gc_phases[i]->enabled()) { - _gc_phases[i]->verify(); - } + _gc_phases[i]->verify(); } } @@ -332,9 +315,7 @@ print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads); for (int i = 0; i <= GCMainPhasesLast; i++) { - if (_gc_phases[i]->enabled()) { - _gc_phases[i]->print(); - } + _gc_phases[i]->print(); } print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); @@ -342,9 +323,7 @@ if (G1StringDedup::is_enabled()) { print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads); for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) { - if (_gc_phases[i]->enabled()) { - _gc_phases[i]->print(); - } + _gc_phases[i]->print(); } } print_stats(1, "Clear CT", _cur_clear_ct_time_ms); @@ -369,9 +348,7 @@ print_stats(2, "Ref Proc", _cur_ref_proc_time_ms); print_stats(2, "Ref Enq", _cur_ref_enq_time_ms); print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms); - if (G1Log::finest()) { - _gc_phases[RedirtyCards]->print(); - } + _gc_phases[RedirtyCards]->print(); if (G1EagerReclaimHumongousObjects) { print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms); if (G1Log::finest()) { diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -63,7 +63,6 @@ } void set(uint worker_i, T value) { - assert(enabled(), err_msg("Logging value to disabled phase: %s", _title)); assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); assert(_data[worker_i] == WorkerDataArray::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title)); _data[worker_i] = value; @@ -81,14 +80,12 @@ } T get(uint worker_i) { - assert(enabled(), err_msg("Getting value from disabled phase: %s", _title)); assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); assert(_data[worker_i] != WorkerDataArray::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); return _data[worker_i]; } void add(uint worker_i, T value) { - assert(enabled(), err_msg("Adding value to disabled phase: %s", _title)); assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); assert(_data[worker_i] != WorkerDataArray::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); _data[worker_i] += value; @@ -111,21 +108,16 @@ void print(); - void reset(); - void reset_array() PRODUCT_RETURN; + void reset() PRODUCT_RETURN; void verify() PRODUCT_RETURN; - void enable() { _enabled = true; } - void disable() { _enabled = false; } - bool enabled() { return _enabled; } - bool sub_count_enabled() { return _sub_count->enabled(); } + void set_enabled(bool enabled) { _enabled = enabled; } int log_level() { return _log_level; } private: void calculate_totals(){ - assert(_enabled, "No data to base calculations on"); _sum = (T)0; for (uint i = 0; i < _length; ++i) { _sum += _data[i]; @@ -215,33 +207,15 @@ void print(double pause_time_sec); void record_time(GCPhases phase, uint worker_i, double time) { - if (_gc_phases[phase]->enabled()) { - _gc_phases[phase]->set(worker_i, time); - } + _gc_phases[phase]->set(worker_i, time); } void add_time(GCPhases phase, uint worker_i, double time) { - if (_gc_phases[phase]->enabled()) { - _gc_phases[phase]->add(worker_i, time); - } + _gc_phases[phase]->add(worker_i, time); } void record_sub_count(GCPhases phase, uint worker_i, size_t count) { - if (_gc_phases[phase]->sub_count_enabled()) { - _gc_phases[phase]->set_sub_count(worker_i, count); - } - } - - void enable(GCPhases phase) { - _gc_phases[phase]->enable(); - } - - void disable(GCPhases phase) { - _gc_phases[phase]->disable(); - } - - bool enabled(GCPhases phase) { - return _gc_phases[phase]->enabled(); + _gc_phases[phase]->set_sub_count(worker_i, count); } double average_time(GCPhases phase) {