< prev index next >

src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp

Print this page
rev 7854 : imported patch 8027962-per-phase-timing-measurements-for-strong-roots-processing

@@ -27,10 +27,11 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
 #include "gc_implementation/g1/g1Log.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "runtime/atomic.inline.hpp"
+#include "g1CollectedHeap.hpp"
 
 // Helper class for avoiding interleaved logging
 class LineBuffer: public StackObj {
 
 private:

@@ -143,10 +144,11 @@
 template <class T>
 void WorkerDataArray<T>::reset() {
   for (uint i = 0; i < _length; i++) {
     _data[i] = (T)_uninitialized;
   }
+  _has_new_data = true;
 }
 
 template <class T>
 void WorkerDataArray<T>::verify() {
   for (uint i = 0; i < _length; i++) {

@@ -156,14 +158,16 @@
   }
 }
 
 #endif
 
-G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
+G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads, uint num_ext_root_scan_phases) :
   _max_gc_threads(max_gc_threads),
+  _num_ext_root_scan_phases(num_ext_root_scan_phases),
   _last_gc_worker_start_times_ms(_max_gc_threads, "%.1lf", false),
   _last_ext_root_scan_times_ms(_max_gc_threads, "%.1lf"),
+  _last_ext_root_scan_phase_times_ms(NULL),
   _last_satb_filtering_times_ms(_max_gc_threads, "%.1lf"),
   _last_update_rs_times_ms(_max_gc_threads, "%.1lf"),
   _last_update_rs_processed_buffers(_max_gc_threads, "%d"),
   _last_scan_rs_times_ms(_max_gc_threads, "%.1lf"),
   _last_strong_code_root_scan_times_ms(_max_gc_threads, "%.1lf"),

@@ -177,10 +181,25 @@
   _last_redirty_logged_cards_processed_cards(_max_gc_threads, SIZE_FORMAT),
   _cur_string_dedup_queue_fixup_worker_times_ms(_max_gc_threads, "%.1lf"),
   _cur_string_dedup_table_fixup_worker_times_ms(_max_gc_threads, "%.1lf")
 {
   assert(max_gc_threads > 0, "Must have some GC threads");
+  if (track_ext_root_scan_phases()) {
+    _last_ext_root_scan_phase_times_ms = NEW_C_HEAP_ARRAY(WorkerDataArray<double>*, num_ext_root_scan_phases, mtGC);
+    for (uint i = 0; i < num_ext_root_scan_phases; i++) {
+      _last_ext_root_scan_phase_times_ms[i] = new WorkerDataArray<double>(_max_gc_threads, "%.1lf");
+    }
+  }
+}
+
+G1GCPhaseTimes::~G1GCPhaseTimes() {
+  if (track_ext_root_scan_phases()) {
+    for (uint i = 0; i < _num_ext_root_scan_phases; i++) {
+      delete _last_ext_root_scan_phase_times_ms[i];
+    }
+    FREE_C_HEAP_ARRAY(WorkerDataArray<double>*, _last_ext_root_scan_phase_times_ms);
+  }
 }
 
 void G1GCPhaseTimes::note_gc_start(uint active_gc_threads) {
   assert(active_gc_threads > 0, "The number of threads must be > 0");
   assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads");

@@ -201,10 +220,13 @@
   _last_gc_worker_other_times_ms.reset();
 
   _last_redirty_logged_cards_time_ms.reset();
   _last_redirty_logged_cards_processed_cards.reset();
 
+  for (uint i = 0; i < _num_ext_root_scan_phases; i++) {
+    _last_ext_root_scan_phase_times_ms[i]->reset();
+  }
 }
 
 void G1GCPhaseTimes::note_gc_end() {
   _last_gc_worker_start_times_ms.verify();
   _last_ext_root_scan_times_ms.verify();

@@ -293,10 +315,16 @@
     print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
   }
   print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
   _last_gc_worker_start_times_ms.print(2, "GC Worker Start (ms)");
   _last_ext_root_scan_times_ms.print(2, "Ext Root Scanning (ms)");
+  if (track_ext_root_scan_phases()) {
+    for (uint i = 0; i < _num_ext_root_scan_phases; i++) {
+      WorkerDataArray<double>* data = _last_ext_root_scan_phase_times_ms[i];
+      data->print(3, G1CollectedHeap::ext_roots_task_string(i));
+    }
+  }
   if (_last_satb_filtering_times_ms.sum() > 0.0) {
     _last_satb_filtering_times_ms.print(2, "SATB Filtering (ms)");
   }
   _last_update_rs_times_ms.print(2, "Update RS (ms)");
     _last_update_rs_processed_buffers.print(3, "Processed Buffers");
< prev index next >