< prev index next >

src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp

Print this page
rev 7792 : 8149347: G1: guarantee fails with UseDynamicNumberOfGCThreads
Reviewed-by: poonam, kevinw


 137   }
 138 
 139   void set_thread_work_item(uint worker_i, size_t value) {
 140     assert(_thread_work_items != NULL, "No sub count");
 141     _thread_work_items->set(worker_i, value);
 142   }
 143 
 144   T get(uint worker_i) {
 145     assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 146     assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));
 147     return _data[worker_i];
 148   }
 149 
 150   void add(uint worker_i, T value) {
 151     assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 152     assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));
 153     _data[worker_i] += value;
 154     _has_new_data = true;
 155   }
 156 
 157   double average(){
 158     calculate_totals();
 159     return _average;
 160   }
 161 
 162   T sum() {
 163     calculate_totals();
 164     return _sum;
 165   }
 166 
 167   T minimum() {
 168     calculate_totals();
 169     return _min;
 170   }
 171 
 172   T maximum() {
 173     calculate_totals();
 174     return _max;
 175   }
 176 
 177   void reset() PRODUCT_RETURN;
 178   void verify() PRODUCT_RETURN;
 179 
 180   void set_enabled(bool enabled) { _enabled = enabled; }
 181 
 182   int log_level() { return _log_level;  }
 183 
 184  private:
 185 
 186   void calculate_totals(){
 187     if (!_has_new_data) {
 188       return;
 189     }
 190 
 191     _sum = (T)0;
 192     _min = _data[0];
 193     _max = _min;
 194     for (uint i = 0; i < _length; ++i) {

 195       T val = _data[i];
 196       _sum += val;
 197       _min = MIN2(_min, val);
 198       _max = MAX2(_max, val);
 199     }
 200     _average = (double)_sum / (double)_length;
 201     _has_new_data = false;
 202   }
 203 };
 204 
 205 
 206 #ifndef PRODUCT
 207 
 208 template <>
 209 size_t WorkerDataArray<size_t>::uninitialized() {
 210   return (size_t)-1;
 211 }
 212 
 213 template <>
 214 double WorkerDataArray<double>::uninitialized() {
 215   return -1.0;
 216 }
 217 
 218 template <class T>
 219 void WorkerDataArray<T>::reset() {
 220   for (uint i = 0; i < _length; i++) {
 221     _data[i] = WorkerDataArray<T>::uninitialized();
 222   }
 223   if (_thread_work_items != NULL) {
 224     _thread_work_items->reset();
 225   }
 226 }
 227 
 228 template <class T>
 229 void WorkerDataArray<T>::verify() {
 230   if (!_enabled) {
 231     return;
 232   }
 233 
 234   for (uint i = 0; i < _length; i++) {

 235     assert(_data[i] != WorkerDataArray<T>::uninitialized(),
 236         err_msg("Invalid data for worker %u in '%s'", i, _title));
 237   }
 238   if (_thread_work_items != NULL) {
 239     _thread_work_items->verify();
 240   }
 241 }
 242 
 243 #endif
 244 
 245 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
 246   _max_gc_threads(max_gc_threads)
 247 {
 248   assert(max_gc_threads > 0, "Must have some GC threads");
 249 
 250   _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);
 251   _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);
 252 
 253   // Root scanning phases
 254   _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms)", true, G1Log::LevelFinest, 3);
 255   _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms)", true, G1Log::LevelFinest, 3);
 256   _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms)", true, G1Log::LevelFinest, 3);
 257   _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms)", true, G1Log::LevelFinest, 3);
 258   _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms)", true, G1Log::LevelFinest, 3);
 259   _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms)", true, G1Log::LevelFinest, 3);


 304 }
 305 
 306 void G1GCPhaseTimes::note_gc_end() {
 307   for (uint i = 0; i < _active_gc_threads; i++) {
 308     double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
 309     record_time_secs(GCWorkerTotal, i , worker_time);
 310 
 311     double worker_known_time =
 312         _gc_par_phases[ExtRootScan]->get(i) +
 313         _gc_par_phases[SATBFiltering]->get(i) +
 314         _gc_par_phases[UpdateRS]->get(i) +
 315         _gc_par_phases[ScanRS]->get(i) +
 316         _gc_par_phases[CodeRoots]->get(i) +
 317         _gc_par_phases[ObjCopy]->get(i) +
 318         _gc_par_phases[Termination]->get(i);
 319 
 320     record_time_secs(Other, i, worker_time - worker_known_time);
 321   }
 322 
 323   for (int i = 0; i < GCParPhasesSentinel; i++) {
 324     _gc_par_phases[i]->verify();
 325   }
 326 }
 327 
 328 void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
 329   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
 330 }
 331 
 332 void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
 333   LineBuffer(level).append_and_print_cr("[%s: "SIZE_FORMAT"]", str, value);
 334 }
 335 
 336 void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
 337   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
 338 }
 339 
 340 double G1GCPhaseTimes::accounted_time_ms() {
 341     // Subtract the root region scanning wait time. It's initialized to
 342     // zero at the start of the pause.
 343     double misc_time_ms = _root_region_scan_wait_time_ms;
 344 


 361 
 362     return misc_time_ms;
 363 }
 364 
 365 // record the time a phase took in seconds
 366 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
 367   _gc_par_phases[phase]->set(worker_i, secs);
 368 }
 369 
 370 // add a number of seconds to a phase
 371 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
 372   _gc_par_phases[phase]->add(worker_i, secs);
 373 }
 374 
 375 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count) {
 376   _gc_par_phases[phase]->set_thread_work_item(worker_i, count);
 377 }
 378 
 379 // return the average time for a phase in milliseconds
 380 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
 381   return _gc_par_phases[phase]->average() * 1000.0;
 382 }
 383 
 384 double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
 385   return _gc_par_phases[phase]->get(worker_i) * 1000.0;
 386 }
 387 
 388 double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
 389   return _gc_par_phases[phase]->sum() * 1000.0;
 390 }
 391 
 392 double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
 393   return _gc_par_phases[phase]->minimum() * 1000.0;
 394 }
 395 
 396 double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
 397   return _gc_par_phases[phase]->maximum() * 1000.0;
 398 }
 399 
 400 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
 401   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 402   return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
 403 }
 404 
 405 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
 406   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 407   return _gc_par_phases[phase]->thread_work_items()->sum();
 408 }
 409 
 410 double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
 411   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 412   return _gc_par_phases[phase]->thread_work_items()->average();
 413 }
 414 
 415 size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
 416   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 417   return _gc_par_phases[phase]->thread_work_items()->minimum();
 418 }
 419 
 420 size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
 421   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 422   return _gc_par_phases[phase]->thread_work_items()->maximum();
 423 }
 424 
 425 class G1GCParPhasePrinter : public StackObj {
 426   G1GCPhaseTimes* _phase_times;
 427  public:
 428   G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
 429 
 430   void print(G1GCPhaseTimes::GCParPhases phase_id) {
 431     WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
 432 
 433     if (phase->_log_level > G1Log::level() || !phase->_enabled) {
 434       return;
 435     }
 436 
 437     if (phase->_length == 1) {
 438       print_single_length(phase_id, phase);
 439     } else {
 440       print_multi_length(phase_id, phase);
 441     }
 442   }
 443 
 444  private:
 445 
 446   void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
 447     // No need for min, max, average and sum for only one worker
 448     LineBuffer buf(phase->_indent_level);
 449     buf.append_and_print_cr("[%s:  %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));
 450 
 451     if (phase->_thread_work_items != NULL) {
 452       LineBuffer buf2(phase->_thread_work_items->_indent_level);
 453       buf2.append_and_print_cr("[%s:  "SIZE_FORMAT"]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));
 454     }
 455   }
 456 
 457   void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
 458     for (uint i = 0; i < phase->_length; ++i) {

 459       buf.append("  %.1lf", _phase_times->get_time_ms(phase_id, i));
 460     }
 461     buf.print_cr();
 462   }
 463 
 464   void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
 465     for (uint i = 0; i < thread_work_items->_length; ++i) {

 466       buf.append("  " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
 467     }
 468     buf.print_cr();
 469   }
 470 
 471   void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
 472     LineBuffer buf(thread_work_items->_indent_level);
 473     buf.append("[%s:", thread_work_items->_title);
 474 
 475     if (G1Log::finest()) {
 476       print_count_values(buf, phase_id, thread_work_items);
 477     }
 478 
 479     assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));
 480 
 481     buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
 482         _phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
 483         _phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
 484   }
 485 




 137   }
 138 
 139   void set_thread_work_item(uint worker_i, size_t value) {
 140     assert(_thread_work_items != NULL, "No sub count");
 141     _thread_work_items->set(worker_i, value);
 142   }
 143 
 144   T get(uint worker_i) {
 145     assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 146     assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data added for worker %d", worker_i));
 147     return _data[worker_i];
 148   }
 149 
 150   void add(uint worker_i, T value) {
 151     assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 152     assert(_data[worker_i] != WorkerDataArray<T>::uninitialized(), err_msg("No data to add to for worker %d", worker_i));
 153     _data[worker_i] += value;
 154     _has_new_data = true;
 155   }
 156 
 157   double average(uint active_threads){
 158     calculate_totals(active_threads);
 159     return _average;
 160   }
 161 
 162   T sum(uint active_threads) {
 163     calculate_totals(active_threads);
 164     return _sum;
 165   }
 166 
 167   T minimum(uint active_threads) {
 168     calculate_totals(active_threads);
 169     return _min;
 170   }
 171 
 172   T maximum(uint active_threads) {
 173     calculate_totals(active_threads);
 174     return _max;
 175   }
 176 
 177   void reset() PRODUCT_RETURN;
 178   void verify(uint active_threads) PRODUCT_RETURN;
 179 
 180   void set_enabled(bool enabled) { _enabled = enabled; }
 181 
 182   int log_level() { return _log_level;  }
 183 
 184  private:
 185 
 186   void calculate_totals(uint active_threads){
 187     if (!_has_new_data) {
 188       return;
 189     }
 190 
 191     _sum = (T)0;
 192     _min = _data[0];
 193     _max = _min;
 194     assert(active_threads <= _length, "Wrong number of active threads");
 195     for (uint i = 0; i < active_threads; ++i) {
 196       T val = _data[i];
 197       _sum += val;
 198       _min = MIN2(_min, val);
 199       _max = MAX2(_max, val);
 200     }
 201     _average = (double)_sum / (double)active_threads;
 202     _has_new_data = false;
 203   }
 204 };
 205 
 206 
 207 #ifndef PRODUCT
 208 
 209 template <>
 210 size_t WorkerDataArray<size_t>::uninitialized() {
 211   return (size_t)-1;
 212 }
 213 
 214 template <>
 215 double WorkerDataArray<double>::uninitialized() {
 216   return -1.0;
 217 }
 218 
 219 template <class T>
 220 void WorkerDataArray<T>::reset() {
 221   for (uint i = 0; i < _length; i++) {
 222     _data[i] = WorkerDataArray<T>::uninitialized();
 223   }
 224   if (_thread_work_items != NULL) {
 225     _thread_work_items->reset();
 226   }
 227 }
 228 
 229 template <class T>
 230 void WorkerDataArray<T>::verify(uint active_threads) {
 231   if (!_enabled) {
 232     return;
 233   }
 234 
 235   assert(active_threads <= _length, "Wrong number of active threads");
 236   for (uint i = 0; i < active_threads; i++) {
 237     assert(_data[i] != WorkerDataArray<T>::uninitialized(),
 238         err_msg("Invalid data for worker %u in '%s'", i, _title));
 239   }
 240   if (_thread_work_items != NULL) {
 241     _thread_work_items->verify(active_threads);
 242   }
 243 }
 244 
 245 #endif
 246 
 247 G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
 248   _max_gc_threads(max_gc_threads)
 249 {
 250   assert(max_gc_threads > 0, "Must have some GC threads");
 251 
 252   _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms)", false, G1Log::LevelFiner, 2);
 253   _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms)", true, G1Log::LevelFiner, 2);
 254 
 255   // Root scanning phases
 256   _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms)", true, G1Log::LevelFinest, 3);
 257   _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms)", true, G1Log::LevelFinest, 3);
 258   _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms)", true, G1Log::LevelFinest, 3);
 259   _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms)", true, G1Log::LevelFinest, 3);
 260   _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms)", true, G1Log::LevelFinest, 3);
 261   _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms)", true, G1Log::LevelFinest, 3);


 306 }
 307 
 308 void G1GCPhaseTimes::note_gc_end() {
 309   for (uint i = 0; i < _active_gc_threads; i++) {
 310     double worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
 311     record_time_secs(GCWorkerTotal, i , worker_time);
 312 
 313     double worker_known_time =
 314         _gc_par_phases[ExtRootScan]->get(i) +
 315         _gc_par_phases[SATBFiltering]->get(i) +
 316         _gc_par_phases[UpdateRS]->get(i) +
 317         _gc_par_phases[ScanRS]->get(i) +
 318         _gc_par_phases[CodeRoots]->get(i) +
 319         _gc_par_phases[ObjCopy]->get(i) +
 320         _gc_par_phases[Termination]->get(i);
 321 
 322     record_time_secs(Other, i, worker_time - worker_known_time);
 323   }
 324 
 325   for (int i = 0; i < GCParPhasesSentinel; i++) {
 326     _gc_par_phases[i]->verify(_active_gc_threads);
 327   }
 328 }
 329 
 330 void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
 331   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
 332 }
 333 
 334 void G1GCPhaseTimes::print_stats(int level, const char* str, size_t value) {
 335   LineBuffer(level).append_and_print_cr("[%s: "SIZE_FORMAT"]", str, value);
 336 }
 337 
 338 void G1GCPhaseTimes::print_stats(int level, const char* str, double value, uint workers) {
 339   LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %u]", str, value, workers);
 340 }
 341 
 342 double G1GCPhaseTimes::accounted_time_ms() {
 343     // Subtract the root region scanning wait time. It's initialized to
 344     // zero at the start of the pause.
 345     double misc_time_ms = _root_region_scan_wait_time_ms;
 346 


 363 
 364     return misc_time_ms;
 365 }
 366 
 367 // record the time a phase took in seconds
 368 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
 369   _gc_par_phases[phase]->set(worker_i, secs);
 370 }
 371 
 372 // add a number of seconds to a phase
 373 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
 374   _gc_par_phases[phase]->add(worker_i, secs);
 375 }
 376 
 377 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count) {
 378   _gc_par_phases[phase]->set_thread_work_item(worker_i, count);
 379 }
 380 
 381 // return the average time for a phase in milliseconds
 382 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
 383   return _gc_par_phases[phase]->average(_active_gc_threads) * 1000.0;
 384 }
 385 
 386 double G1GCPhaseTimes::get_time_ms(GCParPhases phase, uint worker_i) {
 387   return _gc_par_phases[phase]->get(worker_i) * 1000.0;
 388 }
 389 
 390 double G1GCPhaseTimes::sum_time_ms(GCParPhases phase) {
 391   return _gc_par_phases[phase]->sum(_active_gc_threads) * 1000.0;
 392 }
 393 
 394 double G1GCPhaseTimes::min_time_ms(GCParPhases phase) {
 395   return _gc_par_phases[phase]->minimum(_active_gc_threads) * 1000.0;
 396 }
 397 
 398 double G1GCPhaseTimes::max_time_ms(GCParPhases phase) {
 399   return _gc_par_phases[phase]->maximum(_active_gc_threads) * 1000.0;
 400 }
 401 
 402 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i) {
 403   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 404   return _gc_par_phases[phase]->thread_work_items()->get(worker_i);
 405 }
 406 
 407 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase) {
 408   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 409   return _gc_par_phases[phase]->thread_work_items()->sum(_active_gc_threads);
 410 }
 411 
 412 double G1GCPhaseTimes::average_thread_work_items(GCParPhases phase) {
 413   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 414   return _gc_par_phases[phase]->thread_work_items()->average(_active_gc_threads);
 415 }
 416 
 417 size_t G1GCPhaseTimes::min_thread_work_items(GCParPhases phase) {
 418   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 419   return _gc_par_phases[phase]->thread_work_items()->minimum(_active_gc_threads);
 420 }
 421 
 422 size_t G1GCPhaseTimes::max_thread_work_items(GCParPhases phase) {
 423   assert(_gc_par_phases[phase]->thread_work_items() != NULL, "No sub count");
 424   return _gc_par_phases[phase]->thread_work_items()->maximum(_active_gc_threads);
 425 }
 426 
 427 class G1GCParPhasePrinter : public StackObj {
 428   G1GCPhaseTimes* _phase_times;
 429  public:
 430   G1GCParPhasePrinter(G1GCPhaseTimes* phase_times) : _phase_times(phase_times) {}
 431 
 432   void print(G1GCPhaseTimes::GCParPhases phase_id) {
 433     WorkerDataArray<double>* phase = _phase_times->_gc_par_phases[phase_id];
 434 
 435     if (phase->_log_level > G1Log::level() || !phase->_enabled) {
 436       return;
 437     }
 438 
 439     if (phase->_length == 1) {
 440       print_single_length(phase_id, phase);
 441     } else {
 442       print_multi_length(phase_id, phase);
 443     }
 444   }
 445 
 446  private:
 447 
 448   void print_single_length(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
 449     // No need for min, max, average and sum for only one worker
 450     LineBuffer buf(phase->_indent_level);
 451     buf.append_and_print_cr("[%s:  %.1lf]", phase->_title, _phase_times->get_time_ms(phase_id, 0));
 452 
 453     if (phase->_thread_work_items != NULL) {
 454       LineBuffer buf2(phase->_thread_work_items->_indent_level);
 455       buf2.append_and_print_cr("[%s:  "SIZE_FORMAT"]", phase->_thread_work_items->_title, _phase_times->sum_thread_work_items(phase_id));
 456     }
 457   }
 458 
 459   void print_time_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<double>* phase) {
 460     uint active_length = _phase_times->_active_gc_threads;
 461     for (uint i = 0; i < active_length; ++i) {
 462       buf.append("  %.1lf", _phase_times->get_time_ms(phase_id, i));
 463     }
 464     buf.print_cr();
 465   }
 466 
 467   void print_count_values(LineBuffer& buf, G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
 468     uint active_length = _phase_times->_active_gc_threads;
 469     for (uint i = 0; i < active_length; ++i) {
 470       buf.append("  " SIZE_FORMAT, _phase_times->get_thread_work_item(phase_id, i));
 471     }
 472     buf.print_cr();
 473   }
 474 
 475   void print_thread_work_items(G1GCPhaseTimes::GCParPhases phase_id, WorkerDataArray<size_t>* thread_work_items) {
 476     LineBuffer buf(thread_work_items->_indent_level);
 477     buf.append("[%s:", thread_work_items->_title);
 478 
 479     if (G1Log::finest()) {
 480       print_count_values(buf, phase_id, thread_work_items);
 481     }
 482 
 483     assert(thread_work_items->_print_sum, err_msg("%s does not have print sum true even though it is a count", thread_work_items->_title));
 484 
 485     buf.append_and_print_cr(" Min: " SIZE_FORMAT ", Avg: %.1lf, Max: " SIZE_FORMAT ", Diff: " SIZE_FORMAT ", Sum: " SIZE_FORMAT "]",
 486         _phase_times->min_thread_work_items(phase_id), _phase_times->average_thread_work_items(phase_id), _phase_times->max_thread_work_items(phase_id),
 487         _phase_times->max_thread_work_items(phase_id) - _phase_times->min_thread_work_items(phase_id), _phase_times->sum_thread_work_items(phase_id));
 488   }
 489 


< prev index next >