< prev index next >

./42c9c92d320a.export

Print this page




   7 8074037: Refactor the G1GCPhaseTime logging to make it easier to add new phases
   8 Reviewed-by: tschatzl, mgerdin, ecaspole
   9 
  10 diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
  11 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
  12 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
  13 @@ -2206,11 +2206,11 @@
  14    hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
  15  
  16    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  17 -  int n_completed_buffers = 0;
  18 +  size_t n_completed_buffers = 0;
  19    while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  20      n_completed_buffers++;
  21    }
  22 -  g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
  23 +  g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
  24    dcqs.clear_n_completed_buffers();
  25    assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  26  }
  27 @@ -3751,9 +3751,9 @@
  28  
  29      TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  30  
  31 -    int active_workers = workers()->active_workers();
  32 +    uint active_workers = workers()->active_workers();

  33      double pause_start_sec = os::elapsedTime();
  34 -    g1_policy()->phase_times()->note_gc_start(active_workers);
  35 +    g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
  36      log_gc_header();
  37  
  38      TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
  39 @@ -4486,8 +4486,7 @@
  40    void work(uint worker_id) {
  41      if (worker_id >= _n_workers) return;  // no work needed this round
  42  
  43 -    double start_time_ms = os::elapsedTime() * 1000.0;
  44 -    _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  45 +    _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
  46  
  47      {
  48        ResourceMark rm;
  49 @@ -4567,10 +4566,11 @@
  50          double start = os::elapsedTime();
  51          G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  52          evac.do_void();


  91    // During conc marking we have to filter the per-thread SATB buffers
  92    // to make sure we remove any oops into the CSet (which will show up
  93    // as implicitly live).
  94 -  double satb_filtering_ms = 0.0;
  95 -  if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
  96 -    if (mark_in_progress()) {
  97 -      double satb_filter_start = os::elapsedTime();
  98 -
  99 +  {
 100 +    G1GCParPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i);
 101 +    if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers) && mark_in_progress()) {
 102        JavaThread::satb_mark_queue_set().filter_thread_buffers();
 103 -
 104 -      satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
 105      }
 106    }
 107 -  g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
 108  
 109    // Now scan the complement of the collection set.
 110    G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
 111 @@ -5073,14 +5064,13 @@
 112    G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
 113  
 114    virtual void work(uint worker_id) {
 115 -    double start_time = os::elapsedTime();
 116 +    G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
 117 +    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
 118  
 119      RedirtyLoggedCardTableEntryClosure cl;

 120      _queue->par_apply_closure_to_all_completed_buffers(&cl);



 121  
 122 -    G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
 123 -    timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
 124 -    timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
 125 +    phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
 126    }
 127  };
 128  
 129 @@ -5658,12 +5648,14 @@
 130      // reported parallel time.
 131    }
 132  
 133 +  G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
 134 +
 135    double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
 136 -  g1_policy()->phase_times()->record_par_time(par_time_ms);
 137 +  phase_times->record_par_time(par_time_ms);
 138  
 139    double code_root_fixup_time_ms =
 140          (os::elapsedTime() - end_par_time_sec) * 1000.0;


 325 +    _length = length;
 326 +    _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
 327    }
 328  
 329 -  T min = _data[0];
 330 -  T max = _data[0];
 331 -  T sum = 0;
 332 -
 333 -  LineBuffer buf(level);
 334 -  buf.append("[%s:", title);
 335 -  for (uint i = 0; i < _length; ++i) {
 336 -    T val = _data[i];
 337 -    min = MIN2(val, min);
 338 -    max = MAX2(val, max);
 339 -    sum += val;
 340 -    if (G1Log::finest()) {
 341 -      buf.append("  ");
 342 -      buf.append(_print_format, val);
 343 -    }
 344 +  ~WorkerDataArray() {
 345 +    FREE_C_HEAP_ARRAY(T, _data);
 346    }
 347  
 348 -  if (G1Log::finest()) {
 349 -    buf.append_and_print_cr("%s", "");
 350 +  void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
 351 +    _thread_work_items = thread_work_items;
 352    }
 353  
 354 -  double avg = (double)sum / (double)_length;
 355 -  buf.append(" Min: ");
 356 -  buf.append(_print_format, min);
 357 -  buf.append(", Avg: ");
 358 -  buf.append("%.1lf", avg); // Always print average as a double
 359 -  buf.append(", Max: ");
 360 -  buf.append(_print_format, max);
 361 -  buf.append(", Diff: ");
 362 -  buf.append(_print_format, max - min);
 363 -  if (_print_sum) {
 364 -    // for things like the start and end times the sum is not
 365 -    // that relevant


 816    print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
 817    if (G1StringDedup::is_enabled()) {
 818      print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
 819 -    _cur_string_dedup_queue_fixup_worker_times_ms.print(2, "Queue Fixup (ms)");
 820 -    _cur_string_dedup_table_fixup_worker_times_ms.print(2, "Table Fixup (ms)");
 821 +    for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
 822 +      par_phase_printer.print((GCParPhases) i);
 823 +    }
 824    }
 825    print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
 826    double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
 827 @@ -340,10 +536,7 @@
 828    print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
 829    print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
 830    print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
 831 -  if (G1Log::finest()) {
 832 -    _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
 833 -    _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
 834 -  }
 835 +  par_phase_printer.print(RedirtyCards);
 836    if (G1EagerReclaimHumongousObjects) {
 837      print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
 838      if (G1Log::finest()) {
 839 @@ -366,3 +559,17 @@
 840      print_stats(2, "Verify After", _cur_verify_after_time_ms);
 841    }
 842  }
 843 +
 844 +G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
 845 +    _phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
 846 +  if (_phase_times != NULL) {
 847 +    _start_time = os::elapsedTime();
 848 +  }
 849 +}
 850 +
 851 +G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
 852 +  if (_phase_times != NULL) {
 853 +    _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
 854 +  }
 855 +}
 856 +


 872 +class LineBuffer;
 873  
 874 -  NOT_PRODUCT(static const T _uninitialized;)
 875 -
 876 -  // We are caching the sum and average to only have to calculate them once.
 877 -  // This is not done in an MT-safe way. It is intended to allow single
 878 -  // threaded code to call sum() and average() multiple times in any order
 879 -  // without having to worry about the cost.
 880 -  bool   _has_new_data;
 881 -  T      _sum;
 882 -  double _average;
 883 -
 884 - public:
 885 -  WorkerDataArray(uint length, const char* print_format, bool print_sum = true) :
 886 -  _length(length), _print_format(print_format), _print_sum(print_sum), _has_new_data(true) {
 887 -    assert(length > 0, "Must have some workers to store data for");
 888 -    _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
 889 -  }
 890 -
 891 -  ~WorkerDataArray() {
 892 -    FREE_C_HEAP_ARRAY(T, _data);
 893 -  }
 894 -
 895 -  void set(uint worker_i, T value) {
 896 -    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 897 -    assert(_data[worker_i] == (T)-1, err_msg("Overwriting data for worker %d", worker_i));
 898 -    _data[worker_i] = value;
 899 -    _has_new_data = true;
 900 -  }
 901 -
 902 -  T get(uint worker_i) {
 903 -    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 904 -    assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
 905 -    return _data[worker_i];
 906 -  }
 907 -
 908 -  void add(uint worker_i, T value) {
 909 -    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 910 -    assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
 911 -    _data[worker_i] += value;
 912 -    _has_new_data = true;


1230    RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
1231  
1232    _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
1233 -
1234 -  _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
1235  }
1236  
1237  void G1RemSet::cleanupHRRS() {
1238 diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
1239 --- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
1240 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
1241 @@ -106,7 +106,7 @@
1242  
1243  void G1StringDedup::oops_do(OopClosure* keep_alive) {
1244    assert(is_enabled(), "String deduplication not enabled");
1245 -  unlink_or_oops_do(NULL, keep_alive);
1246 +  unlink_or_oops_do(NULL, keep_alive, true /* allow_resize_and_rehash */);
1247  }
1248  
1249  void G1StringDedup::unlink(BoolObjectClosure* is_alive) {
1250 @@ -123,45 +123,39 @@
1251  class G1StringDedupUnlinkOrOopsDoTask : public AbstractGangTask {
1252  private:
1253    G1StringDedupUnlinkOrOopsDoClosure _cl;
1254 +  G1GCPhaseTimes* _phase_times;
1255  
1256  public:
1257    G1StringDedupUnlinkOrOopsDoTask(BoolObjectClosure* is_alive,
1258                                    OopClosure* keep_alive,
1259 -                                  bool allow_resize_and_rehash) :
1260 +                                  bool allow_resize_and_rehash,
1261 +                                  G1GCPhaseTimes* phase_times) :
1262      AbstractGangTask("G1StringDedupUnlinkOrOopsDoTask"),
1263 -    _cl(is_alive, keep_alive, allow_resize_and_rehash) {
1264 -  }
1265 +    _cl(is_alive, keep_alive, allow_resize_and_rehash), _phase_times(phase_times) { }
1266  
1267    virtual void work(uint worker_id) {
1268 -    double queue_fixup_start = os::elapsedTime();
1269 -    G1StringDedupQueue::unlink_or_oops_do(&_cl);
1270 -


1282 +    }
1283 +    {
1284 +      G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
1285 +      G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
1286 +    }
1287    }
1288  };
1289  
1290 -void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool allow_resize_and_rehash) {
1291 +void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive,
1292 +                                      OopClosure* keep_alive,
1293 +                                      bool allow_resize_and_rehash,
1294 +                                      G1GCPhaseTimes* phase_times) {
1295    assert(is_enabled(), "String deduplication not enabled");
1296 -  G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1297 -  g1p->phase_times()->note_string_dedup_fixup_start();
1298 -  double fixup_start = os::elapsedTime();
1299  
1300 -  G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash);
1301 +  G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash, phase_times);

1302    G1CollectedHeap* g1h = G1CollectedHeap::heap();
1303    g1h->set_par_threads();
1304    g1h->workers()->run_task(&task);
1305    g1h->set_par_threads(0);



1306 -
1307 -  double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
1308 -  g1p->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
1309 -  g1p->phase_times()->note_string_dedup_fixup_end();
1310  }
1311  
1312  void G1StringDedup::threads_do(ThreadClosure* tc) {
1313 diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
1314 --- a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
1315 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
1316 @@ -91,6 +91,7 @@
1317  class ThreadClosure;
1318  class outputStream;
1319  class G1StringDedupTable;
1320 +class G1GCPhaseTimes;
1321  
1322  //
1323  // Main interface for interacting with string deduplication.
1324 @@ -131,7 +132,7 @@
1325    static void oops_do(OopClosure* keep_alive);


   7 8074037: Refactor the G1GCPhaseTime logging to make it easier to add new phases
   8 Reviewed-by: tschatzl, mgerdin, ecaspole
   9 
  10 diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
  11 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
  12 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
  13 @@ -2206,11 +2206,11 @@
  14    hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
  15  
  16    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  17 -  int n_completed_buffers = 0;
  18 +  size_t n_completed_buffers = 0;
  19    while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
  20      n_completed_buffers++;
  21    }
  22 -  g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
  23 +  g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
  24    dcqs.clear_n_completed_buffers();
  25    assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
  26  }
  27 @@ -3751,10 +3751,10 @@
  28  
  29      TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  30  
  31 -    int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  32 +    uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  33                                  workers()->active_workers() : 1);
  34      double pause_start_sec = os::elapsedTime();
  35 -    g1_policy()->phase_times()->note_gc_start(active_workers);
  36 +    g1_policy()->phase_times()->note_gc_start(active_workers, mark_in_progress());
  37      log_gc_header();
  38  
  39      TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
  40 @@ -4486,8 +4486,7 @@
  41    void work(uint worker_id) {
  42      if (worker_id >= _n_workers) return;  // no work needed this round
  43  
  44 -    double start_time_ms = os::elapsedTime() * 1000.0;
  45 -    _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
  46 +    _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::GCWorkerStart, worker_id, os::elapsedTime());
  47  
  48      {
  49        ResourceMark rm;
  50 @@ -4567,10 +4566,11 @@
  51          double start = os::elapsedTime();
  52          G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
  53          evac.do_void();


  92    // During conc marking we have to filter the per-thread SATB buffers
  93    // to make sure we remove any oops into the CSet (which will show up
  94    // as implicitly live).
  95 -  double satb_filtering_ms = 0.0;
  96 -  if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers)) {
  97 -    if (mark_in_progress()) {
  98 -      double satb_filter_start = os::elapsedTime();
  99 -
 100 +  {
 101 +    G1GCParPhaseTimesTracker x(g1_policy()->phase_times(), G1GCPhaseTimes::SATBFiltering, worker_i);
 102 +    if (!_process_strong_tasks->is_task_claimed(G1H_PS_filter_satb_buffers) && mark_in_progress()) {
 103        JavaThread::satb_mark_queue_set().filter_thread_buffers();
 104 -
 105 -      satb_filtering_ms = (os::elapsedTime() - satb_filter_start) * 1000.0;
 106      }
 107    }
 108 -  g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
 109  
 110    // Now scan the complement of the collection set.
 111    G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
 112 @@ -5073,18 +5064,17 @@
 113    G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue) : AbstractGangTask("Redirty Cards"), _queue(queue) { }
 114  
 115    virtual void work(uint worker_id) {
 116 -    double start_time = os::elapsedTime();
 117 +    G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->g1_policy()->phase_times();
 118 +    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
 119  
 120      RedirtyLoggedCardTableEntryClosure cl;
 121      if (G1CollectedHeap::heap()->use_parallel_gc_threads()) {
 122        _queue->par_apply_closure_to_all_completed_buffers(&cl);
 123      } else {
 124        _queue->apply_closure_to_all_completed_buffers(&cl);
 125      }
 126  
 127 -    G1GCPhaseTimes* timer = G1CollectedHeap::heap()->g1_policy()->phase_times();
 128 -    timer->record_redirty_logged_cards_time_ms(worker_id, (os::elapsedTime() - start_time) * 1000.0);
 129 -    timer->record_redirty_logged_cards_processed_cards(worker_id, cl.num_processed());
 130 +    phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_processed());
 131    }
 132  };
 133  
 134 @@ -5658,12 +5648,14 @@
 135      // reported parallel time.
 136    }
 137  
 138 +  G1GCPhaseTimes* phase_times = g1_policy()->phase_times();
 139 +
 140    double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
 141 -  g1_policy()->phase_times()->record_par_time(par_time_ms);
 142 +  phase_times->record_par_time(par_time_ms);
 143  
 144    double code_root_fixup_time_ms =
 145          (os::elapsedTime() - end_par_time_sec) * 1000.0;


 330 +    _length = length;
 331 +    _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
 332    }
 333  
 334 -  T min = _data[0];
 335 -  T max = _data[0];
 336 -  T sum = 0;
 337 -
 338 -  LineBuffer buf(level);
 339 -  buf.append("[%s:", title);
 340 -  for (uint i = 0; i < _length; ++i) {
 341 -    T val = _data[i];
 342 -    min = MIN2(val, min);
 343 -    max = MAX2(val, max);
 344 -    sum += val;
 345 -    if (G1Log::finest()) {
 346 -      buf.append("  ");
 347 -      buf.append(_print_format, val);
 348 -    }
 349 +  ~WorkerDataArray() {
 350 +    FREE_C_HEAP_ARRAY(T, _data, mtGC);
 351    }
 352  
 353 -  if (G1Log::finest()) {
 354 -    buf.append_and_print_cr("%s", "");
 355 +  void link_thread_work_items(WorkerDataArray<size_t>* thread_work_items) {
 356 +    _thread_work_items = thread_work_items;
 357    }
 358  
 359 -  double avg = (double)sum / (double)_length;
 360 -  buf.append(" Min: ");
 361 -  buf.append(_print_format, min);
 362 -  buf.append(", Avg: ");
 363 -  buf.append("%.1lf", avg); // Always print average as a double
 364 -  buf.append(", Max: ");
 365 -  buf.append(_print_format, max);
 366 -  buf.append(", Diff: ");
 367 -  buf.append(_print_format, max - min);
 368 -  if (_print_sum) {
 369 -    // for things like the start and end times the sum is not
 370 -    // that relevant


 821    print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
 822    if (G1StringDedup::is_enabled()) {
 823      print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
 824 -    _cur_string_dedup_queue_fixup_worker_times_ms.print(2, "Queue Fixup (ms)");
 825 -    _cur_string_dedup_table_fixup_worker_times_ms.print(2, "Table Fixup (ms)");
 826 +    for (int i = StringDedupPhasesFirst; i <= StringDedupPhasesLast; i++) {
 827 +      par_phase_printer.print((GCParPhases) i);
 828 +    }
 829    }
 830    print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
 831    double misc_time_ms = pause_time_sec * MILLIUNITS - accounted_time_ms();
 832 @@ -340,10 +536,7 @@
 833    print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
 834    print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
 835    print_stats(2, "Redirty Cards", _recorded_redirty_logged_cards_time_ms);
 836 -  if (G1Log::finest()) {
 837 -    _last_redirty_logged_cards_time_ms.print(3, "Parallel Redirty");
 838 -    _last_redirty_logged_cards_processed_cards.print(3, "Redirtied Cards");
 839 -  }
 840 +  par_phase_printer.print(RedirtyCards);
 841    if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
 842      print_stats(2, "Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
 843      if (G1Log::finest()) {
 844 @@ -366,3 +559,17 @@
 845      print_stats(2, "Verify After", _cur_verify_after_time_ms);
 846    }
 847  }
 848 +
 849 +G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
 850 +    _phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
 851 +  if (_phase_times != NULL) {
 852 +    _start_time = os::elapsedTime();
 853 +  }
 854 +}
 855 +
 856 +G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
 857 +  if (_phase_times != NULL) {
 858 +    _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
 859 +  }
 860 +}
 861 +


 877 +class LineBuffer;
 878  
 879 -  NOT_PRODUCT(static const T _uninitialized;)
 880 -
 881 -  // We are caching the sum and average to only have to calculate them once.
 882 -  // This is not done in an MT-safe way. It is intended to allow single
 883 -  // threaded code to call sum() and average() multiple times in any order
 884 -  // without having to worry about the cost.
 885 -  bool   _has_new_data;
 886 -  T      _sum;
 887 -  double _average;
 888 -
 889 - public:
 890 -  WorkerDataArray(uint length, const char* print_format, bool print_sum = true) :
 891 -  _length(length), _print_format(print_format), _print_sum(print_sum), _has_new_data(true) {
 892 -    assert(length > 0, "Must have some workers to store data for");
 893 -    _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
 894 -  }
 895 -
 896 -  ~WorkerDataArray() {
 897 -    FREE_C_HEAP_ARRAY(T, _data, mtGC);
 898 -  }
 899 -
 900 -  void set(uint worker_i, T value) {
 901 -    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 902 -    assert(_data[worker_i] == (T)-1, err_msg("Overwriting data for worker %d", worker_i));
 903 -    _data[worker_i] = value;
 904 -    _has_new_data = true;
 905 -  }
 906 -
 907 -  T get(uint worker_i) {
 908 -    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 909 -    assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
 910 -    return _data[worker_i];
 911 -  }
 912 -
 913 -  void add(uint worker_i, T value) {
 914 -    assert(worker_i < _length, err_msg("Worker %d is greater than max: %d", worker_i, _length));
 915 -    assert(_data[worker_i] != (T)-1, err_msg("No data to add to for worker %d", worker_i));
 916 -    _data[worker_i] += value;
 917 -    _has_new_data = true;


1235    RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
1236  
1237    _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
1238 -
1239 -  _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
1240  }
1241  
1242  void G1RemSet::cleanupHRRS() {
1243 diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
1244 --- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
1245 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp
1246 @@ -106,7 +106,7 @@
1247  
1248  void G1StringDedup::oops_do(OopClosure* keep_alive) {
1249    assert(is_enabled(), "String deduplication not enabled");
1250 -  unlink_or_oops_do(NULL, keep_alive);
1251 +  unlink_or_oops_do(NULL, keep_alive, true /* allow_resize_and_rehash */);
1252  }
1253  
1254  void G1StringDedup::unlink(BoolObjectClosure* is_alive) {
1255 @@ -123,49 +123,43 @@
1256  class G1StringDedupUnlinkOrOopsDoTask : public AbstractGangTask {
1257  private:
1258    G1StringDedupUnlinkOrOopsDoClosure _cl;
1259 +  G1GCPhaseTimes* _phase_times;
1260  
1261  public:
1262    G1StringDedupUnlinkOrOopsDoTask(BoolObjectClosure* is_alive,
1263                                    OopClosure* keep_alive,
1264 -                                  bool allow_resize_and_rehash) :
1265 +                                  bool allow_resize_and_rehash,
1266 +                                  G1GCPhaseTimes* phase_times) :
1267      AbstractGangTask("G1StringDedupUnlinkOrOopsDoTask"),
1268 -    _cl(is_alive, keep_alive, allow_resize_and_rehash) {
1269 -  }
1270 +    _cl(is_alive, keep_alive, allow_resize_and_rehash), _phase_times(phase_times) { }
1271 
1272    virtual void work(uint worker_id) {
1273 -    double queue_fixup_start = os::elapsedTime();
1274 -    G1StringDedupQueue::unlink_or_oops_do(&_cl);
1275 -


1287 +    }
1288 +    {
1289 +      G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
1290 +      G1StringDedupTable::unlink_or_oops_do(&_cl, worker_id);
1291 +    }
1292    }
1293  };
1294  
1295 -void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, bool allow_resize_and_rehash) {
1296 +void G1StringDedup::unlink_or_oops_do(BoolObjectClosure* is_alive,
1297 +                                      OopClosure* keep_alive,
1298 +                                      bool allow_resize_and_rehash,
1299 +                                      G1GCPhaseTimes* phase_times) {
1300    assert(is_enabled(), "String deduplication not enabled");
1301 -  G1CollectorPolicy* g1p = G1CollectedHeap::heap()->g1_policy();
1302 -  g1p->phase_times()->note_string_dedup_fixup_start();
1303 -  double fixup_start = os::elapsedTime();
1304  
1305 -  G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash);
1306 +  G1StringDedupUnlinkOrOopsDoTask task(is_alive, keep_alive, allow_resize_and_rehash, phase_times);
1307    if (G1CollectedHeap::use_parallel_gc_threads()) {
1308      G1CollectedHeap* g1h = G1CollectedHeap::heap();
1309      g1h->set_par_threads();
1310      g1h->workers()->run_task(&task);
1311      g1h->set_par_threads(0);
1312    } else {
1313      task.work(0);
1314    }
1315 -
1316 -  double fixup_time_ms = (os::elapsedTime() - fixup_start) * 1000.0;
1317 -  g1p->phase_times()->record_string_dedup_fixup_time(fixup_time_ms);
1318 -  g1p->phase_times()->note_string_dedup_fixup_end();
1319  }
1320  
1321  void G1StringDedup::threads_do(ThreadClosure* tc) {
1322 diff --git a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
1323 --- a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
1324 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp
1325 @@ -91,6 +91,7 @@
1326  class ThreadClosure;
1327  class outputStream;
1328  class G1StringDedupTable;
1329 +class G1GCPhaseTimes;
1330  
1331  //
1332  // Main interface for interacting with string deduplication.
1333 @@ -131,7 +132,7 @@
1334    static void oops_do(OopClosure* keep_alive);
< prev index next >