1 /*
   2  * Copyright (c) 2013, 2014 Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
  28 #include "gc_implementation/g1/g1Log.hpp"
  29 #include "gc_implementation/g1/g1StringDedup.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "runtime/os.hpp"
  32 
  33 // Helper class for avoiding interleaved logging
  34 class LineBuffer: public StackObj {
  35 
  36 private:
  37   static const int BUFFER_LEN = 1024;
  38   static const int INDENT_CHARS = 3;
  39   char _buffer[BUFFER_LEN];
  40   int _indent_level;
  41   int _cur;
  42 
  43   void vappend(const char* format, va_list ap)  ATTRIBUTE_PRINTF(2, 0) {
  44     int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
  45     if (res != -1) {
  46       _cur += res;
  47     } else {
  48       DEBUG_ONLY(warning("buffer too small in LineBuffer");)
  49       _buffer[BUFFER_LEN -1] = 0;
  50       _cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
  51     }
  52   }
  53 
  54 public:
  55   explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
  56     for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
  57       _buffer[_cur] = ' ';
  58     }
  59   }
  60 
  61 #ifndef PRODUCT
  62   ~LineBuffer() {
  63     assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
  64   }
  65 #endif
  66 
  67   void append(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3) {
  68     va_list ap;
  69     va_start(ap, format);
  70     vappend(format, ap);
  71     va_end(ap);
  72   }
  73 
  74   void print_cr() {
  75     gclog_or_tty->print_cr("%s", _buffer);
  76     _cur = _indent_level * INDENT_CHARS;
  77   }
  78 
  79   void append_and_print_cr(const char* format, ...)  ATTRIBUTE_PRINTF(2, 3) {
  80     va_list ap;
  81     va_start(ap, format);
  82     vappend(format, ap);
  83     va_end(ap);
  84     print_cr();
  85   }
  86 };
  87 
  88 template <class T>
  89 class WorkerDataArray  : public CHeapObj<mtGC> {
  90   friend class G1GCParPhasePrinter;
  91   T*          _data;
  92   uint        _length;
  93   const char* _title;
  94   bool        _print_sum;
  95   int         _log_level;
  96   uint        _indent_level;
  97   bool        _enabled;
  98 
  99   WorkerDataArray<size_t>* _thread_work_items;
 100 
 101   NOT_PRODUCT(T uninitialized();)
 102 
 103   // We are caching the sum and average to only have to calculate them once.
 104   // This is not done in an MT-safe way. It is intended to allow single
 105   // threaded code to call sum() and average() multiple times in any order
 106   // without having to worry about the cost.
 107   bool   _has_new_data;
 108   T      _sum;
 109   T      _min;
 110   T      _max;
 111   double _average;
 112 
 113  public:
 114   WorkerDataArray(uint length, const char* title, bool print_sum, int log_level, uint indent_level) :
 115     _title(title), _length(0), _print_sum(print_sum), _log_level(log_level), _indent_level(indent_level),
 116     _has_new_data(true), _thread_work_items(NULL), _enabled(true) {
 117     assert(length > 0, "Must have some workers to store data for");
 118     _length = length;
 119     _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
 120   }
 121 
 122   ~WorkerDataArray() {
 123     FREE_C_HEAP_ARRAY(T, _data);
 124   }
 125 
 126   void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
 127     _thread_work_items = thread_work_items;
 128   }
 129 
 130   WorkerDataArray<size_t>* thread_work_items() { return _thread_work_items; }
 131 
 132   void set(uint worker_i, T value) {
 133     assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 134     assert(_data[worker_i] == WorkerDataArray<T>::uninitialized(), err_msg("Overwriting data for worker %d in %s", worker_i, _title));
 135     _data[worker_i] = value;
 136     _has_new_data = true;
 137   }
 138 
 139   void set_thread_work_item(uint worker_i, size_t value) {
 140     assert(_thread_work_items != NULL, "No sub count");
 141     _thread_work_items->set(worker_i, value);
 142   }
 143 
 144   T get(uint worker_i) {
 145     assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 146     assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));
 147     return _data[worker_i];
 148   }
 149 
 150   void add(uint worker_i, T value) {
 151     assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 152     assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));
 153     _data[worker_i] += value;
 154     _has_new_data = true;
 155   }
 156 
 157   double average(){
 158     calculate_totals();
 159     return _average;
 160   }
 161 
 162   T sum() {
 163     calculate_totals();
 164     return _sum;
 165   }
 166 
 167   T minimum() {
 168     calculate_totals();
 169     return _min;
 170   }
 171 
 172   T maximum() {
 173     calculate_totals();
 174     return _max;
 175   }
 176 
 177   void reset() PRODUCT_RETURN;
 178   void verify() PRODUCT_RETURN;
 179 
 180   void set_enabled(bool enabled) { _enabled = enabled; }
 181 
 182   int log_level() { return _log_level;  }
 183 
 184  private:
 185 
 186   void calculate_totals(){
 187     if (!_has_new_data) {
 188       return;
 189     }
 190 
 191     _sum = (T)0;
 192     _min = _data[0];
 193     _max = _min;
 194     for (uint i = 0; i < _length; ++i) {
 195       T val = _data[i];
 196       _sum += val;
 197       _min = MIN2(_min, val);
 198       _max = MAX2(_max, val);
 199     }
 200     _average = (double)_sum / (double)_length;
 201     _has_new_data = false;
 202   }
 203 };
 204 
 205 
 206 #ifndef PRODUCT
 207 
 208 template <>
 209 size_t WorkerDataArray<size_t>::uninitialized() {
 210   return (size_t)-1;
 211 }
 212 
 213 template <>
 214 double WorkerDataArray<double>::uninitialized() {
 215   return -1.0;
 216 }
 217 
 218 template <class T>
 219 void WorkerDataArray<T>::reset() {
 220   for (uint i = 0; i < _length; i++) {
 221     _data[i] = WorkerDataArray<T>::uninitialized();
 222   }
 223   if (_thread_work_items != NULL) {
 224     _thread_work_items->reset();
 225   }
 226 }
 227 
 228 template <class T>
 229 void WorkerDataArray<T>::verify() {
 230   if (!_enabled) {
 231     return;
 232   }
 233 
 234   for (uint i = 0; i < _length; i++) {
 235     assert(_data[i] != WorkerDataArray<T>::uninitialized(),
 236         err_msg("Invalid data for worker %u in '%s'", i, _title));
 237   }
 238   if (_thread_work_items != NULL) {
 239     _thread_work_items->verify();
 240   }
 241 }
 242 
 243 #endif
 244 
 245 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
 246   _max_gc_threads(max_gc_threads)
 247 {
 248   assert(max_gc_threads > 0, "Must have some GC threads");
 249 
 250   _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);
 251   _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);
 252   _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFiner, 2);
 253   _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
 254   _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
 255   _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
 256   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
 257   _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms)", true, G1Log::LevelFiner, 2);
 258   _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms)", true, G1Log::LevelFiner, 2);
 259   _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms)", false, G1Log::LevelFiner, 2);
 260   _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms)", true, G1Log::LevelFiner, 2);
 261 
 262   _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers", true, G1Log::LevelFiner, 3);
 263   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers);
 264 
 265   _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts", true, G1Log::LevelFinest, 3);
 266   _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
 267 
 268   _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms)", true, G1Log::LevelFiner, 2);
 269   _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms)", true, G1Log::LevelFiner, 2);
 270 
 271   _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);
 272   _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);
 273   _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
 274 }
 275 
 276 void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {
 277   assert(active_gc_threads > 0, "The number of threads must be > 0");
 278   assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max number of threads");
 279   _active_gc_threads = active_gc_threads;
 280 
 281   for (int i = 0; i < GCParPhasesSentinel; i++) {
 282     _gc_par_phases[i]->reset();
 283   }
 284 
 285   _gc_par_phases[SATBFiltering]->set_enabled(mark_in_progress);
 286 
 287   _gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
 288   _gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
 289 }
 290 
 291 void G1GCPhaseTimes::note_gc_end() {
 292   for (uint i = 0; i < _active_gc_threads; i++) {
 293     double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
 294     record_time_secs(GCWorkerTotal, i , worker_time);
 295 
 296     double worker_known_time =
 297         _gc_par_phases[ExtRootScan]->get(i) +
 298         _gc_par_phases[SATBFiltering]->get(i) +
 299         _gc_par_phases[UpdateRS]->get(i) +
 300         _gc_par_phases[ScanRS]->get(i) +
 301         _gc_par_phases[CodeRoots]->get(i) +
 302         _gc_par_phases[ObjCopy]->get(i) +
 303         _gc_par_phases[Termination]->get(i);
 304 
 305     record_time_secs(Other, i, worker_time - worker_known_time);
 306   }
 307 
 308   for (int i = 0; i < GCParPhasesSentinel; i++) {
 309     _gc_par_phases[i]->verify();
 310   }
 311 }
 312 
 313 void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
 314   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
 315 }
 316 
 317 void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
 318   LineBuffer(level).append_and_print_cr("[%s: "SIZE_FORMAT"]", str, value);
 319 }
 320 
 321 void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
 322   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
 323 }
 324 
 325 double G1GCPhaseTimes::accounted_time_ms() {
 326     // Subtract the root region scanning wait time. It's initialized to
 327     // zero at the start of the pause.
 328     double misc_time_ms = _root_region_scan_wait_time_ms;
 329 
 330     misc_time_ms += _cur_collection_par_time_ms;
 331 
 332     // Now subtract the time taken to fix up roots in generated code
 333     misc_time_ms += _cur_collection_code_root_fixup_time_ms;
 334 
 335     // Strong code root purge time
 336     misc_time_ms += _cur_strong_code_root_purge_time_ms;
 337 
 338     if (G1StringDedup::is_enabled()) {
 339       // String dedup fixup time
 340       misc_time_ms += _cur_string_dedup_fixup_time_ms;
 341     }
 342 
 343     // Subtract the time taken to clean the card table from the
 344     // current value of "other time"
 345     misc_time_ms += _cur_clear_ct_time_ms;
 346 
 347     return misc_time_ms;
 348 }
 349 
 350 // record the time a phase took in seconds
 351 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
 352   _gc_par_phases[phase]->set(worker_i, secs);
 353 }
 354 
 355 // add a number of seconds to a phase
 356 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
 357   _gc_par_phases[phase]->add(worker_i, secs);
 358 }
 359 
 360 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count) {
 361   _gc_par_phases[phase]->set_thread_work_item(worker_i, count);
 362 }
 363 
 364 // return the average time for a phase in milliseconds
 365 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
 366   return _gc_par_phases[phase]->average() * 1000.0;
 367 }
 368 
 369 double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
 370   return _gc_par_phases[phase]->get(worker_i) * 1000.0;
 371 }
 372 
 373 double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
 374   return _gc_par_phases[phase]->sum() * 1000.0;
 375 }
 376 
 377 double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
 378   return _gc_par_phases[phase]->minimum() * 1000.0;
 379 }
 380 
 381 double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
 382   return _gc_par_phases[phase]->maximum() * 1000.0;
 383 }
 384 
 385 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
 386   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 387   return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
 388 }
 389 
 390 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
 391   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 392   return _gc_par_phases[phase]->thread_work_items()->sum();
 393 }
 394 
 395 double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
 396   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 397   return _gc_par_phases[phase]->thread_work_items()->average();
 398 }
 399 
 400 size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
 401   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 402   return _gc_par_phases[phase]->thread_work_items()->minimum();
 403 }
 404 
 405 size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
 406   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 407   return _gc_par_phases[phase]->thread_work_items()->maximum();
 408 }
 409 
 410 class G1GCParPhasePrinter : public StackObj {
 411   G1GCPhaseTimes* _phase_times;
 412  public:
 413   G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
 414 
 415   void print(G1GCPhaseTimes::GCParPhases phase_id) {
 416     WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
 417 
 418     if (phase->_log_level > G1Log::level() || !phase->_enabled) {
 419       return;
 420     }
 421 
 422     if (phase->_length == 1) {
 423       print_single_length(phase_id, phase);
 424     } else {
 425       print_multi_length(phase_id, phase);
 426     }
 427   }
 428 
 429  private:
 430 
 431   void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
 432     // No need for min, max, average and sum for only one worker
 433     LineBuffer buf(phase->_indent_level);
 434     buf.append_and_print_cr("[%s:  %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));
 435 
 436     if (phase->_thread_work_items != NULL) {
 437       LineBuffer buf2(phase->_thread_work_items->_indent_level);
 438       buf2.append_and_print_cr("[%s:  "SIZE_FORMAT"]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));
 439     }
 440   }
 441 
 442   void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
 443     for (uint i = 0; i < phase->_length; ++i) {
 444       buf.append("  %.1lf", _phase_times->get_time_ms(phase_id, i));
 445     }
 446     buf.print_cr();
 447   }
 448 
 449   void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
 450     for (uint i = 0; i < thread_work_items->_length; ++i) {
 451       buf.append("  " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
 452     }
 453     buf.print_cr();
 454   }
 455 
 456   void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
 457     LineBuffer buf(thread_work_items->_indent_level);
 458     buf.append("[%s:", thread_work_items->_title);
 459 
 460     if (G1Log::finest()) {
 461       print_count_values(buf, phase_id, thread_work_items);
 462     }
 463 
 464     assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));
 465 
 466     buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
 467         _phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
 468         _phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
 469   }
 470 
 471   void print_multi_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
 472     LineBuffer buf(phase->_indent_level);
 473     buf.append("[%s:", phase->_title);
 474 
 475     if (G1Log::finest()) {
 476       print_time_values(buf, phase_id, phase);
 477     }
 478 
 479     buf.append(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf",
 480         _phase_times->min_time_ms(phase_id), _phase_times->average_time_ms(phase_id), _phase_times->max_time_ms(phase_id),
 481         _phase_times->max_time_ms(phase_id) - _phase_times->min_time_ms(phase_id));
 482 
 483     if (phase->_print_sum) {
 484       // for things like the start and end times the sum is not
 485       // that relevant
 486       buf.append(", Sum: %.1lf", _phase_times->sum_time_ms(phase_id));
 487     }
 488 
 489     buf.append_and_print_cr("]");
 490 
 491     if (phase->_thread_work_items != NULL) {
 492       print_thread_work_items(phase_id, phase->_thread_work_items);
 493     }
 494   }
 495 };
 496 
 497 void G1GCPhaseTimes::print(double pause_time_sec) {
 498   G1GCParPhasePrinter par_phase_printer(this);
 499 
 500   if (_root_region_scan_wait_time_ms > 0.0) {
 501     print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
 502   }
 503 
 504   print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
 505   for (int i = 0; i <= GCMainParPhasesLast; i++) {
 506     par_phase_printer.print((GCParPhases) i);
 507   }
 508 
 509   print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
 510   print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
 511   if (G1StringDedup::is_enabled()) {
 512     print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
 513     for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
 514       par_phase_printer.print((GCParPhases) i);
 515     }
 516   }
 517   print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
 518   double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
 519   print_stats(1, "Other", misc_time_ms);
 520   if (_cur_verify_before_time_ms > 0.0) {
 521     print_stats(2, "Verify Before", _cur_verify_before_time_ms);
 522   }
 523   if (_cur_expand_heap_time_ms > 0.0) {
 524     print_stats(2, "Expand Heap", _cur_expand_heap_time_ms);
 525   }
 526   if (G1CollectedHeap::heap()->evacuation_failed()) {
 527     double evac_fail_handling = _cur_evac_fail_recalc_used + _cur_evac_fail_remove_self_forwards +
 528       _cur_evac_fail_restore_remsets;
 529     print_stats(2, "Evacuation Failure", evac_fail_handling);
 530     if (G1Log::finest()) {
 531       print_stats(3, "Recalculate Used", _cur_evac_fail_recalc_used);
 532       print_stats(3, "Remove Self Forwards", _cur_evac_fail_remove_self_forwards);
 533       print_stats(3, "Restore RemSet", _cur_evac_fail_restore_remsets);
 534     }
 535   }
 536   print_stats(2, "Choose CSet",
 537     (_recorded_young_cset_choice_time_ms +
 538     _recorded_non_young_cset_choice_time_ms));
 539   print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
 540   print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
 541   print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
 542   par_phase_printer.print(RedirtyCards);
 543   if (G1EagerReclaimHumongousObjects) {
 544     print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
 545     if (G1Log::finest()) {
 546       print_stats(3, "Humongous Total", _cur_fast_reclaim_humongous_total);
 547       print_stats(3, "Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
 548     }
 549     print_stats(2, "Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
 550     if (G1Log::finest()) {
 551       print_stats(3, "Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
 552     }
 553   }
 554   print_stats(2, "Free CSet",
 555     (_recorded_young_free_cset_time_ms +
 556     _recorded_non_young_free_cset_time_ms));
 557   if (G1Log::finest()) {
 558     print_stats(3, "Young Free CSet", _recorded_young_free_cset_time_ms);
 559     print_stats(3, "Non-Young Free CSet", _recorded_non_young_free_cset_time_ms);
 560   }
 561   if (_cur_verify_after_time_ms > 0.0) {
 562     print_stats(2, "Verify After", _cur_verify_after_time_ms);
 563   }
 564 }
 565 
 566 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
 567     _phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
 568   if (_phase_times != NULL) {
 569     _start_time = os::elapsedTime();
 570   }
 571 }
 572 
 573 G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
 574   if (_phase_times != NULL) {
 575     _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
 576   }
 577 }
 578