1 /* 2 * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP 27 28 #include "gc_interface/gcCause.hpp" 29 30 class LineBuffer; 31 32 template <class T> 33 class WorkerDataArray : public CHeapObj<mtGC> { 34 friend class G1GCPhasePrinter; 35 T* _data; 36 uint _length; 37 const char* _title; 38 bool _print_sum; 39 int _log_level; 40 uint _indent_level; 41 bool _enabled; 42 43 WorkerDataArray<size_t>* _sub_count; 44 45 NOT_PRODUCT(T uninitialized();) 46 47 // We are caching the sum and average to only have to calculate them once. 48 // This is not done in an MT-safe way. It is intended to allow single 49 // threaded code to call sum() and average() multiple times in any order 50 // without having to worry about the cost. 51 bool _has_new_data; 52 T _sum; 53 T _min; 54 T _max; 55 double _average; 56 57 public: 58 WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level); 59 60 ~WorkerDataArray(); 61 62 void link_sub_count_array(WorkerDataArray<size_t>* sub_count) { 63 _sub_count = sub_count; 64 } 65 66 WorkerDataArray<size_t>* sub_count() { return _sub_count; } 67 68 void set(uint worker_i, T value) { 69 assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); 70 assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title)); 71 _data[worker_i] = value; 72 _has_new_data = true; 73 } 74 75 void set_sub_count(uint worker_i, size_t value) { 76 assert(_sub_count != NULL, "No sub count"); 77 _sub_count->set(worker_i, value); 78 } 79 80 T get(uint worker_i) { 81 assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); 82 assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); 83 return _data[worker_i]; 84 } 85 86 void add(uint worker_i, T value) { 87 assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length)); 88 assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i)); 89 _data[worker_i] += value; 90 _has_new_data = true; 91 } 92 93 double average(){ 94 calculate_totals(); 95 return _average; 96 } 97 98 T sum() { 99 calculate_totals(); 100 return _sum; 101 } 102 103 T minimum() { 104 calculate_totals(); 105 return _min; 106 } 107 108 T maximum() { 109 calculate_totals(); 110 return _max; 111 } 112 113 void reset() PRODUCT_RETURN; 114 void verify() PRODUCT_RETURN; 115 116 void set_enabled(bool enabled) { _enabled = enabled; } 117 118 int log_level() { return _log_level; } 119 120 private: 121 122 void calculate_totals(){ 123 if (!_has_new_data) { 124 return; 125 } 126 127 _sum = (T)0; 128 _min = _data[0]; 129 _max = _min; 130 for (uint i = 0; i < _length; ++i) { 131 T val = _data[i]; 132 _sum += val; 133 _min = MIN2(_min, val); 134 _max = MAX2(_max, val); 135 } 136 _average = (double)_sum / (double)_length; 137 _has_new_data = false; 138 } 139 }; 140 141 class G1GCPhaseTimes : public CHeapObj<mtGC> { 142 friend class G1GCPhasePrinter; 143 144 uint _active_gc_threads; 145 uint _max_gc_threads; 146 147 public: 148 enum GCPhases { 149 GCWorkerStart, 150 ExtRootScan, 151 SATBFiltering, 152 UpdateRS, 153 ScanRS, 154 CodeRoots, 155 ObjCopy, 156 Termination, 157 Other, 158 GCWorkerTotal, 159 GCWorkerEnd, 160 StringDedupQueueFixup, 161 StringDedupTableFixup, 162 RedirtyCards, 163 Sentinel 164 }; 165 166 private: 167 // Markers for grouping the phases in the GCPhases enum above 168 static const int GCMainPhasesLast = GCWorkerEnd; 169 static const int StringDedupPhasesFirst = StringDedupQueueFixup; 170 static const int StringDedupPhasesLast = StringDedupTableFixup; 171 172 WorkerDataArray<double>* _gc_phases[Sentinel]; 173 WorkerDataArray<size_t>* _update_rs_processed_buffers; 174 WorkerDataArray<size_t>* _termination_attempts; 175 WorkerDataArray<size_t>* _redirtied_cards; 176 177 double _cur_collection_par_time_ms; 178 double _cur_collection_code_root_fixup_time_ms; 179 double _cur_strong_code_root_purge_time_ms; 180 181 double _cur_evac_fail_recalc_used; 182 double _cur_evac_fail_restore_remsets; 183 double _cur_evac_fail_remove_self_forwards; 184 185 double _cur_string_dedup_fixup_time_ms; 186 187 double _cur_clear_ct_time_ms; 188 double _cur_ref_proc_time_ms; 189 double _cur_ref_enq_time_ms; 190 191 double _cur_collection_start_sec; 192 double _root_region_scan_wait_time_ms; 193 194 double _recorded_young_cset_choice_time_ms; 195 double _recorded_non_young_cset_choice_time_ms; 196 197 double _recorded_redirty_logged_cards_time_ms; 198 199 double _recorded_young_free_cset_time_ms; 200 double _recorded_non_young_free_cset_time_ms; 201 202 double _cur_fast_reclaim_humongous_time_ms; 203 double _cur_fast_reclaim_humongous_register_time_ms; 204 size_t _cur_fast_reclaim_humongous_total; 205 size_t _cur_fast_reclaim_humongous_candidates; 206 size_t _cur_fast_reclaim_humongous_reclaimed; 207 208 double _cur_verify_before_time_ms; 209 double _cur_verify_after_time_ms; 210 211 // Helper methods for detailed logging 212 void print_stats(int level, const char* str, double value); 213 void print_stats(int level, const char* str, size_t value); 214 void print_stats(int level, const char* str, double value, uint workers); 215 216 public: 217 G1GCPhaseTimes(uint max_gc_threads); 218 void note_gc_start(uint active_gc_threads, bool mark_in_progress); 219 void note_gc_end(); 220 void print(double pause_time_sec); 221 222 // record the time a phase took in seconds 223 void record_time_secs(GCPhases phase, uint worker_i, double secs) { 224 _gc_phases[phase]->set(worker_i, secs); 225 } 226 227 // add a number of seconds to a phase 228 void add_time_secs(GCPhases phase, uint worker_i, double secs) { 229 _gc_phases[phase]->add(worker_i, secs); 230 } 231 232 void record_sub_count(GCPhases phase, uint worker_i, size_t count) { 233 _gc_phases[phase]->set_sub_count(worker_i, count); 234 } 235 236 // return the average time for a phase in milliseconds 237 double average_time_ms(GCPhases phase) { 238 return _gc_phases[phase]->average() * 1000.0; 239 } 240 241 size_t sub_count_sum(GCPhases phase) { 242 assert(_gc_phases[phase]->sub_count() != NULL, "No sub count"); 243 return _gc_phases[phase]->sub_count()->sum(); 244 } 245 246 private: 247 248 double get_time_ms(GCPhases phase, uint worker_i) { 249 return _gc_phases[phase]->get(worker_i) * 1000.0; 250 } 251 252 double sum_time_ms(GCPhases phase) { 253 return _gc_phases[phase]->sum() * 1000.0; 254 } 255 256 double min_time_ms(GCPhases phase) { 257 return _gc_phases[phase]->minimum() * 1000.0; 258 } 259 260 double max_time_ms(GCPhases phase) { 261 return _gc_phases[phase]->maximum() * 1000.0; 262 } 263 264 size_t get_sub_count(GCPhases phase, uint worker_i) { 265 assert(_gc_phases[phase]->sub_count() != NULL, "No sub count"); 266 return _gc_phases[phase]->sub_count()->get(worker_i); 267 } 268 269 size_t sum_sub_count(GCPhases phase) { 270 assert(_gc_phases[phase]->sub_count() != NULL, "No sub count"); 271 return _gc_phases[phase]->sub_count()->sum(); 272 } 273 274 double average_sub_count(GCPhases phase) { 275 assert(_gc_phases[phase]->sub_count() != NULL, "No sub count"); 276 return _gc_phases[phase]->sub_count()->average(); 277 } 278 279 size_t min_sub_count(GCPhases phase) { 280 assert(_gc_phases[phase]->sub_count() != NULL, "No sub count"); 281 return _gc_phases[phase]->sub_count()->minimum(); 282 } 283 284 size_t max_sub_count(GCPhases phase) { 285 assert(_gc_phases[phase]->sub_count() != NULL, "No sub count"); 286 return _gc_phases[phase]->sub_count()->maximum(); 287 } 288 289 public: 290 291 void record_clear_ct_time(double ms) { 292 _cur_clear_ct_time_ms = ms; 293 } 294 295 void record_par_time(double ms) { 296 _cur_collection_par_time_ms = ms; 297 } 298 299 void record_code_root_fixup_time(double ms) { 300 _cur_collection_code_root_fixup_time_ms = ms; 301 } 302 303 void record_strong_code_root_purge_time(double ms) { 304 _cur_strong_code_root_purge_time_ms = ms; 305 } 306 307 void record_evac_fail_recalc_used_time(double ms) { 308 _cur_evac_fail_recalc_used = ms; 309 } 310 311 void record_evac_fail_restore_remsets(double ms) { 312 _cur_evac_fail_restore_remsets = ms; 313 } 314 315 void record_evac_fail_remove_self_forwards(double ms) { 316 _cur_evac_fail_remove_self_forwards = ms; 317 } 318 319 void record_string_dedup_fixup_time(double ms) { 320 _cur_string_dedup_fixup_time_ms = ms; 321 } 322 323 void record_ref_proc_time(double ms) { 324 _cur_ref_proc_time_ms = ms; 325 } 326 327 void record_ref_enq_time(double ms) { 328 _cur_ref_enq_time_ms = ms; 329 } 330 331 void record_root_region_scan_wait_time(double time_ms) { 332 _root_region_scan_wait_time_ms = time_ms; 333 } 334 335 void record_young_free_cset_time_ms(double time_ms) { 336 _recorded_young_free_cset_time_ms = time_ms; 337 } 338 339 void record_non_young_free_cset_time_ms(double time_ms) { 340 _recorded_non_young_free_cset_time_ms = time_ms; 341 } 342 343 void record_fast_reclaim_humongous_stats(double time_ms, size_t total, size_t candidates) { 344 _cur_fast_reclaim_humongous_register_time_ms = time_ms; 345 _cur_fast_reclaim_humongous_total = total; 346 _cur_fast_reclaim_humongous_candidates = candidates; 347 } 348 349 void record_fast_reclaim_humongous_time_ms(double value, size_t reclaimed) { 350 _cur_fast_reclaim_humongous_time_ms = value; 351 _cur_fast_reclaim_humongous_reclaimed = reclaimed; 352 } 353 354 void record_young_cset_choice_time_ms(double time_ms) { 355 _recorded_young_cset_choice_time_ms = time_ms; 356 } 357 358 void record_non_young_cset_choice_time_ms(double time_ms) { 359 _recorded_non_young_cset_choice_time_ms = time_ms; 360 } 361 362 void record_redirty_logged_cards_time_ms(double time_ms) { 363 _recorded_redirty_logged_cards_time_ms = time_ms; 364 } 365 366 void record_cur_collection_start_sec(double time_ms) { 367 _cur_collection_start_sec = time_ms; 368 } 369 370 void record_verify_before_time_ms(double time_ms) { 371 _cur_verify_before_time_ms = time_ms; 372 } 373 374 void record_verify_after_time_ms(double time_ms) { 375 _cur_verify_after_time_ms = time_ms; 376 } 377 378 double accounted_time_ms(); 379 380 double cur_collection_start_sec() { 381 return _cur_collection_start_sec; 382 } 383 384 double cur_collection_par_time_ms() { 385 return _cur_collection_par_time_ms; 386 } 387 388 double cur_clear_ct_time_ms() { 389 return _cur_clear_ct_time_ms; 390 } 391 392 double root_region_scan_wait_time_ms() { 393 return _root_region_scan_wait_time_ms; 394 } 395 396 double young_cset_choice_time_ms() { 397 return _recorded_young_cset_choice_time_ms; 398 } 399 400 double young_free_cset_time_ms() { 401 return _recorded_young_free_cset_time_ms; 402 } 403 404 double non_young_cset_choice_time_ms() { 405 return _recorded_non_young_cset_choice_time_ms; 406 } 407 408 double non_young_free_cset_time_ms() { 409 return _recorded_non_young_free_cset_time_ms; 410 } 411 412 double fast_reclaim_humongous_time_ms() { 413 return _cur_fast_reclaim_humongous_time_ms; 414 } 415 }; 416 417 class G1GCPhaseTimesTracker : public StackObj { 418 double _start_time; 419 G1GCPhaseTimes::GCPhases _phase; 420 G1GCPhaseTimes* _phase_times; 421 uint _worker_id; 422 public: 423 G1GCPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCPhases phase, uint worker_id); 424 ~G1GCPhaseTimesTracker(); 425 }; 426 427 class G1GCPhasePrinter : public StackObj { 428 G1GCPhaseTimes* _phase_times; 429 void print_single_length(G1GCPhaseTimes::GCPhases phase_id, WorkerDataArray<double>* phase); 430 void print_multi_length(G1GCPhaseTimes::GCPhases phase_id, WorkerDataArray<double>* phase); 431 void print_sub_count(G1GCPhaseTimes::GCPhases phase_id, WorkerDataArray<size_t>* sub_count); 432 void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCPhases phase_id, WorkerDataArray<double>* phase); 433 void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCPhases phase_id, WorkerDataArray<size_t>* phase); 434 public: 435 G1GCPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {} 436 void print(G1GCPhaseTimes::GCPhases phase_id); 437 }; 438 439 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP