rev 57242 : imported patch 8235341-workerdataarray-cleanup
rev 57243 : [mq]: 8235341-stefanj-review

   1 /*
   2  * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1CollectedHeap.inline.hpp"
  27 #include "gc/g1/g1GCPhaseTimes.hpp"
  28 #include "gc/g1/g1HotCardCache.hpp"
  29 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  30 #include "gc/g1/g1StringDedup.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/workerDataArray.inline.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "logging/log.hpp"
  35 #include "logging/logStream.hpp"
  36 #include "runtime/timer.hpp"
  37 #include "runtime/os.hpp"
  38 #include "utilities/macros.hpp"
  39 
  40 static const char* Indents[5] = {"", "  ", "    ", "      ", "        "};
  41 
  42 G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
  43   _max_gc_threads(max_gc_threads),
  44   _gc_start_counter(0),
  45   _gc_pause_time_ms(0.0),
  46   _ref_phase_times(gc_timer, max_gc_threads),
  47   _weak_phase_times(max_gc_threads)
  48 {
  49   assert(max_gc_threads > 0, "Must have some GC threads");
  50 
  51   _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms):");
  52   _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms):");
  53 
  54   // Root scanning phases
  55   _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
  56   _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
  57   _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
  58   _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
  59   _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
  60   _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
  61   _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
  62   _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
  63   AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");)
  64   _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
  65 
  66   _gc_par_phases[MergeER] = new WorkerDataArray<double>(max_gc_threads, "Eager Reclaim (ms):");
  67 
  68   _gc_par_phases[MergeRS] = new WorkerDataArray<double>(max_gc_threads, "Remembered Sets (ms):");
  69   _merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
  70   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_sparse, MergeRSMergedSparse);
  71   _merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
  72   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_fine, MergeRSMergedFine);
  73   _merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
  74   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_merged_coarse, MergeRSMergedCoarse);
  75   _merge_rs_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
  76   _gc_par_phases[MergeRS]->link_thread_work_items(_merge_rs_dirty_cards, MergeRSDirtyCards);
  77 
  78   _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Remembered Sets (ms):");
  79   _opt_merge_rs_merged_sparse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Sparse:");
  80   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_sparse, MergeRSMergedSparse);
  81   _opt_merge_rs_merged_fine = new WorkerDataArray<size_t>(max_gc_threads, "Merged Fine:");
  82   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_fine, MergeRSMergedFine);
  83   _opt_merge_rs_merged_coarse = new WorkerDataArray<size_t>(max_gc_threads, "Merged Coarse:");
  84   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_merged_coarse, MergeRSMergedCoarse);
  85   _opt_merge_rs_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
  86   _gc_par_phases[OptMergeRS]->link_thread_work_items(_opt_merge_rs_dirty_cards, MergeRSDirtyCards);
  87 
  88   _gc_par_phases[MergeLB] = new WorkerDataArray<double>(max_gc_threads, "Log Buffers (ms):");
  89   if (G1HotCardCache::default_use_cache()) {
  90     _gc_par_phases[MergeHCC] = new WorkerDataArray<double>(max_gc_threads, "Hot Card Cache (ms):");
  91     _merge_hcc_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
  92     _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_dirty_cards, MergeHCCDirtyCards);
  93     _merge_hcc_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
  94     _gc_par_phases[MergeHCC]->link_thread_work_items(_merge_hcc_skipped_cards, MergeHCCSkippedCards);
  95   } else {
  96     _gc_par_phases[MergeHCC] = NULL;
  97     _merge_hcc_dirty_cards = NULL;
  98     _merge_hcc_skipped_cards = NULL;
  99   }
 100   _gc_par_phases[ScanHR] = new WorkerDataArray<double>(max_gc_threads, "Scan Heap Roots (ms):");
 101   _gc_par_phases[OptScanHR] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan Heap Roots (ms):");
 102   _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scan (ms):");
 103   _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Optional Code Root Scan (ms):");
 104   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
 105   _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
 106   _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
 107   _gc_par_phases[OptTermination] = new WorkerDataArray<double>(max_gc_threads, "Optional Termination (ms):");
 108   _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
 109   _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
 110   _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
 111 
 112   _scan_hr_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
 113   _gc_par_phases[ScanHR]->link_thread_work_items(_scan_hr_scanned_cards, ScanHRScannedCards);
 114   _scan_hr_scanned_blocks = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Blocks:");
 115   _gc_par_phases[ScanHR]->link_thread_work_items(_scan_hr_scanned_blocks, ScanHRScannedBlocks);
 116   _scan_hr_claimed_chunks = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Chunks:");
 117   _gc_par_phases[ScanHR]->link_thread_work_items(_scan_hr_claimed_chunks, ScanHRClaimedChunks);
 118 
 119   _opt_scan_hr_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
 120   _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_scanned_cards, ScanHRScannedCards);
 121   _opt_scan_hr_scanned_blocks = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Blocks:");
 122   _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_scanned_blocks, ScanHRScannedBlocks);
 123   _opt_scan_hr_claimed_chunks = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Chunks:");
 124   _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_claimed_chunks, ScanHRClaimedChunks);
 125   _opt_scan_hr_scanned_opt_refs = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Refs:");
 126   _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_scanned_opt_refs, ScanHRScannedOptRefs);
 127   _opt_scan_hr_used_memory = new WorkerDataArray<size_t>(max_gc_threads, "Used Memory:");
 128   _gc_par_phases[OptScanHR]->link_thread_work_items(_opt_scan_hr_used_memory, ScanHRUsedMemory);
 129 
 130   _merge_lb_dirty_cards = new WorkerDataArray<size_t>(max_gc_threads, "Dirty Cards:");
 131   _gc_par_phases[MergeLB]->link_thread_work_items(_merge_lb_dirty_cards, MergeLBDirtyCards);
 132   _merge_lb_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
 133   _gc_par_phases[MergeLB]->link_thread_work_items(_merge_lb_skipped_cards, MergeLBSkippedCards);
 134 
 135   _gc_par_phases[MergePSS] = new WorkerDataArray<double>(1, "Merge Per-Thread State", true /* is_serial */);
 136 
 137   _merge_pss_copied_bytes = new WorkerDataArray<size_t>(max_gc_threads, "Copied Bytes");
 138   _gc_par_phases[MergePSS]->link_thread_work_items(_merge_pss_copied_bytes, MergePSSCopiedBytes);
 139   _merge_pss_lab_waste_bytes = new WorkerDataArray<size_t>(max_gc_threads, "LAB Waste");
 140   _gc_par_phases[MergePSS]->link_thread_work_items(_merge_pss_lab_waste_bytes, MergePSSLABWasteBytes);
 141   _merge_pss_lab_undo_waste_bytes = new WorkerDataArray<size_t>(max_gc_threads, "LAB Undo Waste");
 142   _gc_par_phases[MergePSS]->link_thread_work_items(_merge_pss_lab_undo_waste_bytes, MergePSSLABUndoWasteBytes);
 143 
 144   _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
 145   _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
 146 
 147   _opt_termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Optional Termination Attempts:");
 148   _gc_par_phases[OptTermination]->link_thread_work_items(_opt_termination_attempts);
 149 
 150   if (UseStringDeduplication) {
 151     _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
 152     _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
 153   } else {
 154     _gc_par_phases[StringDedupQueueFixup] = NULL;
 155     _gc_par_phases[StringDedupTableFixup] = NULL;
 156   }
 157 
 158   _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty (ms):");
 159   _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:");
 160   _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
 161 
 162   _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Young Free Collection Set (ms):");
 163   _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Non-Young Free Collection Set (ms):");
 164 
 165   reset();
 166 }
 167 
 168 void G1GCPhaseTimes::reset() {
 169   _cur_collection_initial_evac_time_ms = 0.0;
 170   _cur_optional_evac_time_ms = 0.0;
 171   _cur_collection_code_root_fixup_time_ms = 0.0;
 172   _cur_strong_code_root_purge_time_ms = 0.0;
 173   _cur_merge_heap_roots_time_ms = 0.0;
 174   _cur_optional_merge_heap_roots_time_ms = 0.0;
 175   _cur_prepare_merge_heap_roots_time_ms = 0.0;
 176   _cur_optional_prepare_merge_heap_roots_time_ms = 0.0;
 177   _cur_evac_fail_recalc_used = 0.0;
 178   _cur_evac_fail_remove_self_forwards = 0.0;
 179   _cur_string_deduplication_time_ms = 0.0;
 180   _cur_prepare_tlab_time_ms = 0.0;
 181   _cur_resize_tlab_time_ms = 0.0;
 182   _cur_derived_pointer_table_update_time_ms = 0.0;
 183   _cur_clear_ct_time_ms = 0.0;
 184   _cur_expand_heap_time_ms = 0.0;
 185   _cur_ref_proc_time_ms = 0.0;
 186   _cur_collection_start_sec = 0.0;
 187   _root_region_scan_wait_time_ms = 0.0;
 188   _external_accounted_time_ms = 0.0;
 189   _recorded_prepare_heap_roots_time_ms = 0.0;
 190   _recorded_clear_claimed_marks_time_ms = 0.0;
 191   _recorded_young_cset_choice_time_ms = 0.0;
 192   _recorded_non_young_cset_choice_time_ms = 0.0;
 193   _recorded_redirty_logged_cards_time_ms = 0.0;
 194   _recorded_preserve_cm_referents_time_ms = 0.0;
 195   _recorded_start_new_cset_time_ms = 0.0;
 196   _recorded_total_free_cset_time_ms = 0.0;
 197   _recorded_serial_free_cset_time_ms = 0.0;
 198   _cur_fast_reclaim_humongous_time_ms = 0.0;
 199   _cur_region_register_time = 0.0;
 200   _cur_fast_reclaim_humongous_total = 0;
 201   _cur_fast_reclaim_humongous_candidates = 0;
 202   _cur_fast_reclaim_humongous_reclaimed = 0;
 203   _cur_verify_before_time_ms = 0.0;
 204   _cur_verify_after_time_ms = 0.0;
 205 
 206   for (int i = 0; i < GCParPhasesSentinel; i++) {
 207     if (_gc_par_phases[i] != NULL) {
 208       _gc_par_phases[i]->reset();
 209     }
 210   }
 211 
 212   _ref_phase_times.reset();
 213   _weak_phase_times.reset();
 214 }
 215 
 216 void G1GCPhaseTimes::note_gc_start() {
 217   _gc_start_counter = os::elapsed_counter();
 218   reset();
 219 }
 220 
 221 #define ASSERT_PHASE_UNINITIALIZED(phase) \
 222     assert(_gc_par_phases[phase] == NULL || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started");
 223 
 224 double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) {
 225   if (_gc_par_phases[phase] == NULL) {
 226     return 0.0;
 227   }
 228   double value = _gc_par_phases[phase]->get(worker);
 229   if (value != WorkerDataArray<double>::uninitialized()) {
 230     return value;
 231   }
 232   return 0.0;
 233 }
 234 
 235 void G1GCPhaseTimes::note_gc_end() {
 236   _gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter);
 237 
 238   double uninitialized = WorkerDataArray<double>::uninitialized();
 239 
 240   for (uint i = 0; i < _max_gc_threads; i++) {
 241     double worker_start = _gc_par_phases[GCWorkerStart]->get(i);
 242     if (worker_start != uninitialized) {
 243       assert(_gc_par_phases[GCWorkerEnd]->get(i) != uninitialized, "Worker started but not ended.");
 244       double total_worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
 245       record_time_secs(GCWorkerTotal, i , total_worker_time);
 246 
 247       double worker_known_time = worker_time(ExtRootScan, i) +
 248                                  worker_time(ScanHR, i) +
 249                                  worker_time(CodeRoots, i) +
 250                                  worker_time(ObjCopy, i) +
 251                                  worker_time(Termination, i);
 252 
 253       record_time_secs(Other, i, total_worker_time - worker_known_time);
 254     } else {
 255       // Make sure all slots are uninitialized since this thread did not seem to have been started
 256       ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd);
 257       ASSERT_PHASE_UNINITIALIZED(ExtRootScan);
 258       ASSERT_PHASE_UNINITIALIZED(MergeER);
 259       ASSERT_PHASE_UNINITIALIZED(MergeRS);
 260       ASSERT_PHASE_UNINITIALIZED(OptMergeRS);
 261       ASSERT_PHASE_UNINITIALIZED(MergeHCC);
 262       ASSERT_PHASE_UNINITIALIZED(MergeLB);
 263       ASSERT_PHASE_UNINITIALIZED(ScanHR);
 264       ASSERT_PHASE_UNINITIALIZED(CodeRoots);
 265       ASSERT_PHASE_UNINITIALIZED(OptCodeRoots);
 266       ASSERT_PHASE_UNINITIALIZED(ObjCopy);
 267       ASSERT_PHASE_UNINITIALIZED(OptObjCopy);
 268       ASSERT_PHASE_UNINITIALIZED(Termination);
 269     }
 270   }
 271 }
 272 
 273 #undef ASSERT_PHASE_UNINITIALIZED
 274 
 275 // record the time a phase took in seconds
 276 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_id, double secs) {
 277   _gc_par_phases[phase]->set(worker_id, secs);
 278 }
 279 
 280 // add a number of seconds to a phase
 281 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_id, double secs) {
 282   _gc_par_phases[phase]->add(worker_id, secs);
 283 }
 284 
 285 void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_id, double secs) {
 286   if (_gc_par_phases[phase]->get(worker_id) == _gc_par_phases[phase]->uninitialized()) {
 287     record_time_secs(phase, worker_id, secs);
 288   } else {
 289     add_time_secs(phase, worker_id, secs);
 290   }
 291 }
 292 
 293 double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_id) {
 294   return _gc_par_phases[phase]->get(worker_id);
 295 }
 296 
 297 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
 298   _gc_par_phases[phase]->set_thread_work_item(worker_id, count, index);
 299 }
 300 
 301 void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
 302   _gc_par_phases[phase]->set_or_add_thread_work_item(worker_id, count, index);
 303 }
 304 
 305 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, uint index) {
 306   return _gc_par_phases[phase]->get_thread_work_item(worker_id, index);
 307 }
 308 
 309 // return the average time for a phase in milliseconds
 310 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
 311   if (_gc_par_phases[phase] == NULL) {
 312     return 0.0;
 313   }
 314   return _gc_par_phases[phase]->average() * 1000.0;
 315 }
 316 
 317 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) {
 318   if (_gc_par_phases[phase] == NULL) {
 319     return 0;
 320   }
 321   assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count");
 322   return _gc_par_phases[phase]->thread_work_items(index)->sum();
 323 }
 324 
 325 template <class T>
 326 void G1GCPhaseTimes::details(T* phase, const char* indent) const {
 327   LogTarget(Trace, gc, phases, task) lt;
 328   if (lt.is_enabled()) {
 329     LogStream ls(lt);
 330     ls.print("%s", indent);
 331     phase->print_details_on(&ls);
 332   }
 333 }
 334 
 335 void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const {
 336   out->print("%s", Indents[indent]);
 337   phase->print_summary_on(out, print_sum);
 338   details(phase, Indents[indent]);
 339 
 340   for (uint i = 0; i < phase->MaxThreadWorkItems; i++) {
 341     WorkerDataArray<size_t>* work_items = phase->thread_work_items(i);
 342     if (work_items != NULL) {
 343       out->print("%s", Indents[indent + 1]);
 344       work_items->print_summary_on(out, true);
 345       details(work_items, Indents[indent + 1]);
 346     }
 347   }
 348 }
 349 
 350 void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase, uint extra_indent) const {
 351   LogTarget(Debug, gc, phases) lt;
 352   if (lt.is_enabled()) {
 353     ResourceMark rm;
 354     LogStream ls(lt);
 355     log_phase(phase, 2 + extra_indent, &ls, true);
 356   }
 357 }
 358 
 359 void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) const {
 360   LogTarget(Trace, gc, phases) lt;
 361   if (lt.is_enabled()) {
 362     LogStream ls(lt);
 363     log_phase(phase, 3, &ls, print_sum);
 364   }
 365 }
 366 
 367 #define TIME_FORMAT "%.1lfms"
 368 
 369 void G1GCPhaseTimes::info_time(const char* name, double value) const {
 370   log_info(gc, phases)("%s%s: " TIME_FORMAT, Indents[1], name, value);
 371 }
 372 
 373 void G1GCPhaseTimes::debug_time(const char* name, double value) const {
 374   log_debug(gc, phases)("%s%s: " TIME_FORMAT, Indents[2], name, value);
 375 }
 376 
 377 void G1GCPhaseTimes::debug_time_for_reference(const char* name, double value) const {
 378   LogTarget(Debug, gc, phases) lt;
 379   LogTarget(Debug, gc, phases, ref) lt2;
 380 
 381   if (lt.is_enabled()) {
 382     LogStream ls(lt);
 383     ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value);
 384   } else if (lt2.is_enabled()) {
 385     LogStream ls(lt2);
 386     ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value);
 387   }
 388 }
 389 
 390 void G1GCPhaseTimes::trace_time(const char* name, double value) const {
 391   log_trace(gc, phases)("%s%s: " TIME_FORMAT, Indents[3], name, value);
 392 }
 393 
 394 void G1GCPhaseTimes::trace_count(const char* name, size_t value) const {
 395   log_trace(gc, phases)("%s%s: " SIZE_FORMAT, Indents[3], name, value);
 396 }
 397 
 398 double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
 399   const double sum_ms = _root_region_scan_wait_time_ms +
 400                         _recorded_young_cset_choice_time_ms +
 401                         _recorded_non_young_cset_choice_time_ms +
 402                         _cur_region_register_time +
 403                         _recorded_prepare_heap_roots_time_ms +
 404                         _recorded_clear_claimed_marks_time_ms;
 405 
 406   info_time("Pre Evacuate Collection Set", sum_ms);
 407 
 408   if (_root_region_scan_wait_time_ms > 0.0) {
 409     debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
 410   }
 411   debug_time("Prepare TLABs", _cur_prepare_tlab_time_ms);
 412   debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
 413   debug_time("Region Register", _cur_region_register_time);
 414   if (G1EagerReclaimHumongousObjects) {
 415     trace_count("Humongous Total", _cur_fast_reclaim_humongous_total);
 416     trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
 417   }
 418 
 419   debug_time("Prepare Heap Roots", _recorded_prepare_heap_roots_time_ms);
 420   if (_recorded_clear_claimed_marks_time_ms > 0.0) {
 421     debug_time("Clear Claimed Marks", _recorded_clear_claimed_marks_time_ms);
 422   }
 423   return sum_ms;
 424 }
 425 
 426 double G1GCPhaseTimes::print_evacuate_optional_collection_set() const {
 427   const double sum_ms = _cur_optional_evac_time_ms + _cur_optional_merge_heap_roots_time_ms;
 428   if (sum_ms > 0) {
 429     info_time("Merge Optional Heap Roots", _cur_optional_merge_heap_roots_time_ms);
 430 
 431     debug_time("Prepare Optional Merge Heap Roots", _cur_optional_prepare_merge_heap_roots_time_ms);
 432     debug_phase(_gc_par_phases[OptMergeRS]);
 433 
 434     info_time("Evacuate Optional Collection Set", _cur_optional_evac_time_ms);
 435     debug_phase(_gc_par_phases[OptScanHR]);
 436     debug_phase(_gc_par_phases[OptObjCopy]);
 437     debug_phase(_gc_par_phases[OptCodeRoots]);
 438     debug_phase(_gc_par_phases[OptTermination]);
 439   }
 440   return sum_ms;
 441 }
 442 
 443 double G1GCPhaseTimes::print_evacuate_initial_collection_set() const {
 444   info_time("Merge Heap Roots", _cur_merge_heap_roots_time_ms);
 445 
 446   debug_time("Prepare Merge Heap Roots", _cur_prepare_merge_heap_roots_time_ms);
 447   debug_phase(_gc_par_phases[MergeER]);
 448   debug_phase(_gc_par_phases[MergeRS]);
 449   if (G1HotCardCache::default_use_cache()) {
 450     debug_phase(_gc_par_phases[MergeHCC]);
 451   }
 452   debug_phase(_gc_par_phases[MergeLB]);
 453 
 454   info_time("Evacuate Collection Set", _cur_collection_initial_evac_time_ms);
 455 
 456   trace_phase(_gc_par_phases[GCWorkerStart], false);
 457   debug_phase(_gc_par_phases[ExtRootScan]);
 458   for (int i = ExtRootScanSubPhasesFirst; i <= ExtRootScanSubPhasesLast; i++) {
 459     trace_phase(_gc_par_phases[i]);
 460   }
 461   debug_phase(_gc_par_phases[ScanHR]);
 462   debug_phase(_gc_par_phases[CodeRoots]);
 463   debug_phase(_gc_par_phases[ObjCopy]);
 464   debug_phase(_gc_par_phases[Termination]);
 465   debug_phase(_gc_par_phases[Other]);
 466   debug_phase(_gc_par_phases[GCWorkerTotal]);
 467   trace_phase(_gc_par_phases[GCWorkerEnd], false);
 468 
 469   return _cur_collection_initial_evac_time_ms + _cur_merge_heap_roots_time_ms;
 470 }
 471 
 472 double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
 473   const double evac_fail_handling = _cur_evac_fail_recalc_used +
 474                                     _cur_evac_fail_remove_self_forwards;
 475   assert(_gc_par_phases[MergePSS]->get(0) != WorkerDataArray<double>::uninitialized(), "must be set");
 476   const double merge_pss = _gc_par_phases[MergePSS]->get(0) * MILLIUNITS;
 477   const double sum_ms = evac_fail_handling +
 478                         _cur_collection_code_root_fixup_time_ms +
 479                         _recorded_preserve_cm_referents_time_ms +
 480                         _cur_ref_proc_time_ms +
 481                         (_weak_phase_times.total_time_sec() * MILLIUNITS) +
 482                         _cur_clear_ct_time_ms +
 483                         merge_pss +
 484                         _cur_strong_code_root_purge_time_ms +
 485                         _recorded_redirty_logged_cards_time_ms +
 486                         _recorded_total_free_cset_time_ms +
 487                         _cur_fast_reclaim_humongous_time_ms +
 488                         _cur_expand_heap_time_ms +
 489                         _cur_string_deduplication_time_ms;
 490 
 491   info_time("Post Evacuate Collection Set", sum_ms);
 492 
 493   debug_time("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
 494 
 495   debug_time("Clear Card Table", _cur_clear_ct_time_ms);
 496 
 497   debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms);
 498   _ref_phase_times.print_all_references(2, false);
 499   _weak_phase_times.log_print(2);
 500 
 501   if (G1StringDedup::is_enabled()) {
 502     debug_time("String Deduplication", _cur_string_deduplication_time_ms);
 503     debug_phase(_gc_par_phases[StringDedupQueueFixup], 1);
 504     debug_phase(_gc_par_phases[StringDedupTableFixup], 1);
 505   }
 506 
 507   if (G1CollectedHeap::heap()->evacuation_failed()) {
 508     debug_time("Evacuation Failure", evac_fail_handling);
 509     trace_time("Recalculate Used", _cur_evac_fail_recalc_used);
 510     trace_time("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
 511   }
 512 
 513   debug_phase(_gc_par_phases[MergePSS], 0);
 514   debug_time("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
 515 
 516   debug_time("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
 517   trace_phase(_gc_par_phases[RedirtyCards]);
 518 #if COMPILER2_OR_JVMCI
 519   debug_time("DerivedPointerTable Update", _cur_derived_pointer_table_update_time_ms);
 520 #endif
 521 
 522   debug_time("Free Collection Set", _recorded_total_free_cset_time_ms);
 523   trace_time("Free Collection Set Serial", _recorded_serial_free_cset_time_ms);
 524   trace_phase(_gc_par_phases[YoungFreeCSet]);
 525   trace_phase(_gc_par_phases[NonYoungFreeCSet]);
 526 
 527   if (G1EagerReclaimHumongousObjects) {
 528     debug_time("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
 529     trace_count("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
 530   }
 531   debug_time("Start New Collection Set", _recorded_start_new_cset_time_ms);
 532   if (UseTLAB && ResizeTLAB) {
 533     debug_time("Resize TLABs", _cur_resize_tlab_time_ms);
 534   }
 535   debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms);
 536 
 537 
 538   return sum_ms;
 539 }
 540 
 541 void G1GCPhaseTimes::print_other(double accounted_ms) const {
 542   info_time("Other", _gc_pause_time_ms - accounted_ms);
 543 }
 544 
 545 void G1GCPhaseTimes::print() {
 546   note_gc_end();
 547 
 548   if (_cur_verify_before_time_ms > 0.0) {
 549     debug_time("Verify Before", _cur_verify_before_time_ms);
 550   }
 551 
 552   double accounted_ms = 0.0;
 553   accounted_ms += print_pre_evacuate_collection_set();
 554   accounted_ms += print_evacuate_initial_collection_set();
 555   accounted_ms += print_evacuate_optional_collection_set();
 556   accounted_ms += print_post_evacuate_collection_set();
 557   print_other(accounted_ms);
 558 
 559   if (_cur_verify_after_time_ms > 0.0) {
 560     debug_time("Verify After", _cur_verify_after_time_ms);
 561   }
 562 }
 563 
 564 const char* G1GCPhaseTimes::phase_name(GCParPhases phase) {
 565   static const char* names[] = {
 566       "GCWorkerStart",
 567       "ExtRootScan",
 568       "ThreadRoots",
 569       "UniverseRoots",
 570       "JNIRoots",
 571       "ObjectSynchronizerRoots",
 572       "ManagementRoots",
 573       "SystemDictionaryRoots",
 574       "CLDGRoots",
 575       "JVMTIRoots",
 576       AOT_ONLY("AOTCodeRoots" COMMA)
 577       "CMRefRoots",
 578       "MergeER",
 579       "MergeRS",
 580       "OptMergeRS",
 581       "MergeLB",
 582       "MergeHCC",
 583       "ScanHR",
 584       "OptScanHR",
 585       "CodeRoots",
 586       "OptCodeRoots",
 587       "ObjCopy",
 588       "OptObjCopy",
 589       "Termination",
 590       "OptTermination",
 591       "Other",
 592       "GCWorkerTotal",
 593       "GCWorkerEnd",
 594       "StringDedupQueueFixup",
 595       "StringDedupTableFixup",
 596       "RedirtyCards",
 597       "YoungFreeCSet",
 598       "NonYoungFreeCSet",
 599       "MergePSS"
 600       //GCParPhasesSentinel only used to tell end of enum
 601       };
 602 
 603   STATIC_ASSERT(ARRAY_SIZE(names) == G1GCPhaseTimes::GCParPhasesSentinel); // GCParPhases enum and corresponding string array should have the same "length", this tries to assert it
 604 
 605   return names[phase];
 606 }
 607 
 608 G1EvacPhaseWithTrimTimeTracker::G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time) :
 609   _pss(pss),
 610   _start(Ticks::now()),
 611   _total_time(total_time),
 612   _trim_time(trim_time),
 613   _stopped(false) {
 614 
 615   assert(_pss->trim_ticks().value() == 0, "Possibly remaining trim ticks left over from previous use");
 616 }
 617 
 618 G1EvacPhaseWithTrimTimeTracker::~G1EvacPhaseWithTrimTimeTracker() {
 619   if (!_stopped) {
 620     stop();
 621   }
 622 }
 623 
 624 void G1EvacPhaseWithTrimTimeTracker::stop() {
 625   assert(!_stopped, "Should only be called once");
 626   _total_time += (Ticks::now() - _start) - _pss->trim_ticks();
 627   _trim_time += _pss->trim_ticks();
 628   _pss->reset_trim_ticks();
 629   _stopped = true;
 630 }
 631 
 632 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id, bool must_record) :
 633   _start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id), _event(), _must_record(must_record) {
 634   if (_phase_times != NULL) {
 635     _start_time = Ticks::now();
 636   }
 637 }
 638 
 639 G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
 640   if (_phase_times != NULL) {
 641     if (_must_record) {
 642       _phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds());
 643     } else {
 644       _phase_times->record_or_add_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds());
 645     }
 646     _event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_phase));
 647   }
 648 }
 649 
 650 G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times,
 651                                                  G1ParScanThreadState* pss,
 652                                                  G1GCPhaseTimes::GCParPhases phase,
 653                                                  uint worker_id) :
 654   G1GCParPhaseTimesTracker(phase_times, phase, worker_id),
 655   _total_time(),
 656   _trim_time(),
 657   _trim_tracker(pss, _total_time, _trim_time) {
 658 }
 659 
 660 G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() {
 661   if (_phase_times != NULL) {
 662     // Explicitly stop the trim tracker since it's not yet destructed.
 663     _trim_tracker.stop();
 664     // Exclude trim time by increasing the start time.
 665     _start_time += _trim_time;
 666     _phase_times->record_or_add_time_secs(G1GCPhaseTimes::ObjCopy, _worker_id, _trim_time.seconds());
 667   }
 668 }
 669 
--- EOF ---