1 /* 2 * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/g1GCPhaseTimes.hpp" 28 #include "gc/g1/g1HotCardCache.hpp" 29 #include "gc/g1/g1ParScanThreadState.inline.hpp" 30 #include "gc/g1/g1StringDedup.hpp" 31 #include "gc/shared/gcTimer.hpp" 32 #include "gc/shared/workerDataArray.inline.hpp" 33 #include "memory/resourceArea.hpp" 34 #include "logging/log.hpp" 35 #include "logging/logStream.hpp" 36 #include "runtime/timer.hpp" 37 #include "runtime/os.hpp" 38 #include "utilities/macros.hpp" 39 40 static const char* Indents[5] = {"", " ", " ", " ", " "}; 41 42 G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) : 43 _max_gc_threads(max_gc_threads), 44 _gc_start_counter(0), 45 _gc_pause_time_ms(0.0), 46 _ref_phase_times(gc_timer, max_gc_threads), 47 _weak_phase_times(max_gc_threads) 48 { 49 assert(max_gc_threads > 0, "Must have some GC threads"); 50 51 _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>("GC Worker Start (ms):", max_gc_threads); 52 _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>("Ext Root Scanning (ms):", max_gc_threads); 53 54 // Root scanning phases 55 _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("Thread Roots (ms):", max_gc_threads); 56 _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>("Universe Roots (ms):", max_gc_threads); 57 _gc_par_phases[JNIRoots] = new WorkerDataArray<double>("JNI Handles Roots (ms):", max_gc_threads); 58 _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>("ObjectSynchronizer Roots (ms):", max_gc_threads); 59 _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>("Management Roots (ms):", max_gc_threads); 60 _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>("SystemDictionary Roots (ms):", max_gc_threads); 61 _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDG Roots (ms):", max_gc_threads); 62 _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>("JVMTI Roots (ms):", max_gc_threads); 63 AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>("AOT Root Scan (ms):", max_gc_threads);) 64 _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>("CM RefProcessor Roots (ms):", max_gc_threads); 65 66 _gc_par_phases[MergeER] = new WorkerDataArray<double>("Eager Reclaim (ms):", max_gc_threads); 67 68 _gc_par_phases[MergeRS] = new WorkerDataArray<double>("Remembered Sets (ms):", max_gc_threads); 69 _gc_par_phases[MergeRS]->create_thread_work_items("Merged Sparse:", MergeRSMergedSparse); 70 _gc_par_phases[MergeRS]->create_thread_work_items("Merged Fine:", MergeRSMergedFine); 71 _gc_par_phases[MergeRS]->create_thread_work_items("Merged Coarse:", MergeRSMergedCoarse); 72 _gc_par_phases[MergeRS]->create_thread_work_items("Dirty Cards:", MergeRSDirtyCards); 73 74 _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>("Optional Remembered Sets (ms):", max_gc_threads); 75 _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Sparse:", MergeRSMergedSparse); 76 _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Fine:", MergeRSMergedFine); 77 _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Coarse:", MergeRSMergedCoarse); 78 _gc_par_phases[OptMergeRS]->create_thread_work_items("Dirty Cards:", MergeRSDirtyCards); 79 80 _gc_par_phases[MergeLB] = new WorkerDataArray<double>("Log Buffers (ms):", max_gc_threads); 81 if (G1HotCardCache::default_use_cache()) { 82 _gc_par_phases[MergeHCC] = new WorkerDataArray<double>("Hot Card Cache (ms):", max_gc_threads); 83 _gc_par_phases[MergeHCC]->create_thread_work_items("Dirty Cards:", MergeHCCDirtyCards); 84 _gc_par_phases[MergeHCC]->create_thread_work_items("Skipped Cards:", MergeHCCSkippedCards); 85 } else { 86 _gc_par_phases[MergeHCC] = NULL; 87 } 88 _gc_par_phases[ScanHR] = new WorkerDataArray<double>("Scan Heap Roots (ms):", max_gc_threads); 89 _gc_par_phases[OptScanHR] = new WorkerDataArray<double>("Optional Scan Heap Roots (ms):", max_gc_threads); 90 _gc_par_phases[CodeRoots] = new WorkerDataArray<double>("Code Root Scan (ms):", max_gc_threads); 91 _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>("Optional Code Root Scan (ms):", max_gc_threads); 92 _gc_par_phases[ObjCopy] = new WorkerDataArray<double>("Object Copy (ms):", max_gc_threads); 93 _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>("Optional Object Copy (ms):", max_gc_threads); 94 _gc_par_phases[Termination] = new WorkerDataArray<double>("Termination (ms):", max_gc_threads); 95 _gc_par_phases[OptTermination] = new WorkerDataArray<double>("Optional Termination (ms):", max_gc_threads); 96 _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>("GC Worker Total (ms):", max_gc_threads); 97 _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>("GC Worker End (ms):", max_gc_threads); 98 _gc_par_phases[Other] = new WorkerDataArray<double>("GC Worker Other (ms):", max_gc_threads); 99 100 _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); 101 _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks); 102 _gc_par_phases[ScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks); 103 104 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); 105 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks); 106 _gc_par_phases[OptScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks); 107 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Refs:", ScanHRScannedOptRefs); 108 _gc_par_phases[OptScanHR]->create_thread_work_items("Used Memory:", ScanHRUsedMemory); 109 110 _gc_par_phases[MergeLB]->create_thread_work_items("Dirty Cards:", MergeLBDirtyCards); 111 _gc_par_phases[MergeLB]->create_thread_work_items("Skipped Cards:", MergeLBSkippedCards); 112 113 _gc_par_phases[MergePSS] = new WorkerDataArray<double>("Merge Per-Thread State", 1 /* length */, true /* is_serial */); 114 115 _gc_par_phases[MergePSS]->create_thread_work_items("Copied Bytes", MergePSSCopiedBytes, max_gc_threads); 116 _gc_par_phases[MergePSS]->create_thread_work_items("LAB Waste", MergePSSLABWasteBytes, max_gc_threads); 117 _gc_par_phases[MergePSS]->create_thread_work_items("LAB Undo Waste", MergePSSLABUndoWasteBytes, max_gc_threads); 118 119 _gc_par_phases[Termination]->create_thread_work_items("Termination Attempts:"); 120 121 _gc_par_phases[OptTermination]->create_thread_work_items("Optional Termination Attempts:"); 122 123 if (UseStringDeduplication) { 124 _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>("Queue Fixup (ms):", max_gc_threads); 125 _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>("Table Fixup (ms):", max_gc_threads); 126 } else { 127 _gc_par_phases[StringDedupQueueFixup] = NULL; 128 _gc_par_phases[StringDedupTableFixup] = NULL; 129 } 130 131 _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>("Parallel Redirty (ms):", max_gc_threads); 132 _gc_par_phases[RedirtyCards]->create_thread_work_items("Redirtied Cards:"); 133 134 _gc_par_phases[ParFreeCSet] = new WorkerDataArray<double>("Parallel Free Collection Set (ms):", max_gc_threads); 135 _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>("Young Free Collection Set (ms):", max_gc_threads); 136 _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>("Non-Young Free Collection Set (ms):", max_gc_threads); 137 _gc_par_phases[RebuildFreeList] = new WorkerDataArray<double>("Parallel Rebuild Free List (ms):", max_gc_threads); 138 139 reset(); 140 } 141 142 void G1GCPhaseTimes::reset() { 143 _cur_collection_initial_evac_time_ms = 0.0; 144 _cur_optional_evac_time_ms = 0.0; 145 _cur_collection_code_root_fixup_time_ms = 0.0; 146 _cur_strong_code_root_purge_time_ms = 0.0; 147 _cur_merge_heap_roots_time_ms = 0.0; 148 _cur_optional_merge_heap_roots_time_ms = 0.0; 149 _cur_prepare_merge_heap_roots_time_ms = 0.0; 150 _cur_optional_prepare_merge_heap_roots_time_ms = 0.0; 151 _cur_evac_fail_recalc_used = 0.0; 152 _cur_evac_fail_remove_self_forwards = 0.0; 153 _cur_string_deduplication_time_ms = 0.0; 154 _cur_prepare_tlab_time_ms = 0.0; 155 _cur_resize_tlab_time_ms = 0.0; 156 _cur_derived_pointer_table_update_time_ms = 0.0; 157 _cur_clear_ct_time_ms = 0.0; 158 _cur_expand_heap_time_ms = 0.0; 159 _cur_ref_proc_time_ms = 0.0; 160 _cur_collection_start_sec = 0.0; 161 _root_region_scan_wait_time_ms = 0.0; 162 _external_accounted_time_ms = 0.0; 163 _recorded_prepare_heap_roots_time_ms = 0.0; 164 _recorded_clear_claimed_marks_time_ms = 0.0; 165 _recorded_young_cset_choice_time_ms = 0.0; 166 _recorded_non_young_cset_choice_time_ms = 0.0; 167 _recorded_redirty_logged_cards_time_ms = 0.0; 168 _recorded_preserve_cm_referents_time_ms = 0.0; 169 _recorded_start_new_cset_time_ms = 0.0; 170 _recorded_total_free_cset_time_ms = 0.0; 171 _recorded_serial_free_cset_time_ms = 0.0; 172 _recorded_total_rebuild_freelist_time_ms = 0.0; 173 _recorded_serial_rebuild_freelist_time_ms = 0.0; 174 _cur_fast_reclaim_humongous_time_ms = 0.0; 175 _cur_region_register_time = 0.0; 176 _cur_fast_reclaim_humongous_total = 0; 177 _cur_fast_reclaim_humongous_candidates = 0; 178 _cur_fast_reclaim_humongous_reclaimed = 0; 179 _cur_verify_before_time_ms = 0.0; 180 _cur_verify_after_time_ms = 0.0; 181 182 for (int i = 0; i < GCParPhasesSentinel; i++) { 183 if (_gc_par_phases[i] != NULL) { 184 _gc_par_phases[i]->reset(); 185 } 186 } 187 188 _ref_phase_times.reset(); 189 _weak_phase_times.reset(); 190 } 191 192 void G1GCPhaseTimes::note_gc_start() { 193 _gc_start_counter = os::elapsed_counter(); 194 reset(); 195 } 196 197 #define ASSERT_PHASE_UNINITIALIZED(phase) \ 198 assert(_gc_par_phases[phase] == NULL || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started"); 199 200 double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) { 201 if (_gc_par_phases[phase] == NULL) { 202 return 0.0; 203 } 204 double value = _gc_par_phases[phase]->get(worker); 205 if (value != WorkerDataArray<double>::uninitialized()) { 206 return value; 207 } 208 return 0.0; 209 } 210 211 void G1GCPhaseTimes::note_gc_end() { 212 _gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter); 213 214 double uninitialized = WorkerDataArray<double>::uninitialized(); 215 216 for (uint i = 0; i < _max_gc_threads; i++) { 217 double worker_start = _gc_par_phases[GCWorkerStart]->get(i); 218 if (worker_start != uninitialized) { 219 assert(_gc_par_phases[GCWorkerEnd]->get(i) != uninitialized, "Worker started but not ended."); 220 double total_worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i); 221 record_time_secs(GCWorkerTotal, i , total_worker_time); 222 223 double worker_known_time = worker_time(ExtRootScan, i) + 224 worker_time(ScanHR, i) + 225 worker_time(CodeRoots, i) + 226 worker_time(ObjCopy, i) + 227 worker_time(Termination, i); 228 229 record_time_secs(Other, i, total_worker_time - worker_known_time); 230 } else { 231 // Make sure all slots are uninitialized since this thread did not seem to have been started 232 ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd); 233 ASSERT_PHASE_UNINITIALIZED(ExtRootScan); 234 ASSERT_PHASE_UNINITIALIZED(MergeER); 235 ASSERT_PHASE_UNINITIALIZED(MergeRS); 236 ASSERT_PHASE_UNINITIALIZED(OptMergeRS); 237 ASSERT_PHASE_UNINITIALIZED(MergeHCC); 238 ASSERT_PHASE_UNINITIALIZED(MergeLB); 239 ASSERT_PHASE_UNINITIALIZED(ScanHR); 240 ASSERT_PHASE_UNINITIALIZED(CodeRoots); 241 ASSERT_PHASE_UNINITIALIZED(OptCodeRoots); 242 ASSERT_PHASE_UNINITIALIZED(ObjCopy); 243 ASSERT_PHASE_UNINITIALIZED(OptObjCopy); 244 ASSERT_PHASE_UNINITIALIZED(Termination); 245 } 246 } 247 } 248 249 #undef ASSERT_PHASE_UNINITIALIZED 250 251 // record the time a phase took in seconds 252 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_id, double secs) { 253 _gc_par_phases[phase]->set(worker_id, secs); 254 } 255 256 // add a number of seconds to a phase 257 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_id, double secs) { 258 _gc_par_phases[phase]->add(worker_id, secs); 259 } 260 261 void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_id, double secs) { 262 if (_gc_par_phases[phase]->get(worker_id) == _gc_par_phases[phase]->uninitialized()) { 263 record_time_secs(phase, worker_id, secs); 264 } else { 265 add_time_secs(phase, worker_id, secs); 266 } 267 } 268 269 double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_id) { 270 return _gc_par_phases[phase]->get(worker_id); 271 } 272 273 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) { 274 _gc_par_phases[phase]->set_thread_work_item(worker_id, count, index); 275 } 276 277 void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) { 278 _gc_par_phases[phase]->set_or_add_thread_work_item(worker_id, count, index); 279 } 280 281 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, uint index) { 282 return _gc_par_phases[phase]->get_thread_work_item(worker_id, index); 283 } 284 285 // return the average time for a phase in milliseconds 286 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) { 287 if (_gc_par_phases[phase] == NULL) { 288 return 0.0; 289 } 290 return _gc_par_phases[phase]->average() * 1000.0; 291 } 292 293 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) { 294 if (_gc_par_phases[phase] == NULL) { 295 return 0; 296 } 297 assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count"); 298 return _gc_par_phases[phase]->thread_work_items(index)->sum(); 299 } 300 301 template <class T> 302 void G1GCPhaseTimes::details(T* phase, const char* indent) const { 303 LogTarget(Trace, gc, phases, task) lt; 304 if (lt.is_enabled()) { 305 LogStream ls(lt); 306 ls.print("%s", indent); 307 phase->print_details_on(&ls); 308 } 309 } 310 311 void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const { 312 out->print("%s", Indents[indent]); 313 phase->print_summary_on(out, print_sum); 314 details(phase, Indents[indent]); 315 316 for (uint i = 0; i < phase->MaxThreadWorkItems; i++) { 317 WorkerDataArray<size_t>* work_items = phase->thread_work_items(i); 318 if (work_items != NULL) { 319 out->print("%s", Indents[indent + 1]); 320 work_items->print_summary_on(out, true); 321 details(work_items, Indents[indent + 1]); 322 } 323 } 324 } 325 326 void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase, uint extra_indent) const { 327 LogTarget(Debug, gc, phases) lt; 328 if (lt.is_enabled()) { 329 ResourceMark rm; 330 LogStream ls(lt); 331 log_phase(phase, 2 + extra_indent, &ls, true); 332 } 333 } 334 335 void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum, uint extra_indent) const { 336 LogTarget(Trace, gc, phases) lt; 337 if (lt.is_enabled()) { 338 LogStream ls(lt); 339 log_phase(phase, 3 + extra_indent, &ls, print_sum); 340 } 341 } 342 343 #define TIME_FORMAT "%.1lfms" 344 345 void G1GCPhaseTimes::info_time(const char* name, double value) const { 346 log_info(gc, phases)("%s%s: " TIME_FORMAT, Indents[1], name, value); 347 } 348 349 void G1GCPhaseTimes::debug_time(const char* name, double value) const { 350 log_debug(gc, phases)("%s%s: " TIME_FORMAT, Indents[2], name, value); 351 } 352 353 void G1GCPhaseTimes::debug_time_for_reference(const char* name, double value) const { 354 LogTarget(Debug, gc, phases) lt; 355 LogTarget(Debug, gc, phases, ref) lt2; 356 357 if (lt.is_enabled()) { 358 LogStream ls(lt); 359 ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value); 360 } else if (lt2.is_enabled()) { 361 LogStream ls(lt2); 362 ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value); 363 } 364 } 365 366 void G1GCPhaseTimes::trace_time(const char* name, double value) const { 367 log_trace(gc, phases)("%s%s: " TIME_FORMAT, Indents[3], name, value); 368 } 369 370 void G1GCPhaseTimes::trace_count(const char* name, size_t value) const { 371 log_trace(gc, phases)("%s%s: " SIZE_FORMAT, Indents[3], name, value); 372 } 373 374 double G1GCPhaseTimes::print_pre_evacuate_collection_set() const { 375 const double sum_ms = _root_region_scan_wait_time_ms + 376 _recorded_young_cset_choice_time_ms + 377 _recorded_non_young_cset_choice_time_ms + 378 _cur_region_register_time + 379 _recorded_prepare_heap_roots_time_ms + 380 _recorded_clear_claimed_marks_time_ms; 381 382 info_time("Pre Evacuate Collection Set", sum_ms); 383 384 if (_root_region_scan_wait_time_ms > 0.0) { 385 debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms); 386 } 387 debug_time("Prepare TLABs", _cur_prepare_tlab_time_ms); 388 debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms)); 389 debug_time("Region Register", _cur_region_register_time); 390 if (G1EagerReclaimHumongousObjects) { 391 trace_count("Humongous Total", _cur_fast_reclaim_humongous_total); 392 trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates); 393 } 394 395 debug_time("Prepare Heap Roots", _recorded_prepare_heap_roots_time_ms); 396 if (_recorded_clear_claimed_marks_time_ms > 0.0) { 397 debug_time("Clear Claimed Marks", _recorded_clear_claimed_marks_time_ms); 398 } 399 return sum_ms; 400 } 401 402 double G1GCPhaseTimes::print_evacuate_optional_collection_set() const { 403 const double sum_ms = _cur_optional_evac_time_ms + _cur_optional_merge_heap_roots_time_ms; 404 if (sum_ms > 0) { 405 info_time("Merge Optional Heap Roots", _cur_optional_merge_heap_roots_time_ms); 406 407 debug_time("Prepare Optional Merge Heap Roots", _cur_optional_prepare_merge_heap_roots_time_ms); 408 debug_phase(_gc_par_phases[OptMergeRS]); 409 410 info_time("Evacuate Optional Collection Set", _cur_optional_evac_time_ms); 411 debug_phase(_gc_par_phases[OptScanHR]); 412 debug_phase(_gc_par_phases[OptObjCopy]); 413 debug_phase(_gc_par_phases[OptCodeRoots]); 414 debug_phase(_gc_par_phases[OptTermination]); 415 } 416 return sum_ms; 417 } 418 419 double G1GCPhaseTimes::print_evacuate_initial_collection_set() const { 420 info_time("Merge Heap Roots", _cur_merge_heap_roots_time_ms); 421 422 debug_time("Prepare Merge Heap Roots", _cur_prepare_merge_heap_roots_time_ms); 423 debug_phase(_gc_par_phases[MergeER]); 424 debug_phase(_gc_par_phases[MergeRS]); 425 if (G1HotCardCache::default_use_cache()) { 426 debug_phase(_gc_par_phases[MergeHCC]); 427 } 428 debug_phase(_gc_par_phases[MergeLB]); 429 430 info_time("Evacuate Collection Set", _cur_collection_initial_evac_time_ms); 431 432 trace_phase(_gc_par_phases[GCWorkerStart], false); 433 debug_phase(_gc_par_phases[ExtRootScan]); 434 for (int i = ExtRootScanSubPhasesFirst; i <= ExtRootScanSubPhasesLast; i++) { 435 trace_phase(_gc_par_phases[i]); 436 } 437 debug_phase(_gc_par_phases[ScanHR]); 438 debug_phase(_gc_par_phases[CodeRoots]); 439 debug_phase(_gc_par_phases[ObjCopy]); 440 debug_phase(_gc_par_phases[Termination]); 441 debug_phase(_gc_par_phases[Other]); 442 debug_phase(_gc_par_phases[GCWorkerTotal]); 443 trace_phase(_gc_par_phases[GCWorkerEnd], false); 444 445 return _cur_collection_initial_evac_time_ms + _cur_merge_heap_roots_time_ms; 446 } 447 448 double G1GCPhaseTimes::print_post_evacuate_collection_set() const { 449 const double evac_fail_handling = _cur_evac_fail_recalc_used + 450 _cur_evac_fail_remove_self_forwards; 451 assert(_gc_par_phases[MergePSS]->get(0) != WorkerDataArray<double>::uninitialized(), "must be set"); 452 const double merge_pss = _gc_par_phases[MergePSS]->get(0) * MILLIUNITS; 453 const double sum_ms = evac_fail_handling + 454 _cur_collection_code_root_fixup_time_ms + 455 _recorded_preserve_cm_referents_time_ms + 456 _cur_ref_proc_time_ms + 457 (_weak_phase_times.total_time_sec() * MILLIUNITS) + 458 _cur_clear_ct_time_ms + 459 merge_pss + 460 _cur_strong_code_root_purge_time_ms + 461 _recorded_redirty_logged_cards_time_ms + 462 _recorded_total_free_cset_time_ms + 463 _recorded_total_rebuild_freelist_time_ms + 464 _cur_fast_reclaim_humongous_time_ms + 465 _cur_expand_heap_time_ms + 466 _cur_string_deduplication_time_ms; 467 468 info_time("Post Evacuate Collection Set", sum_ms); 469 470 debug_time("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms); 471 472 debug_time("Clear Card Table", _cur_clear_ct_time_ms); 473 474 debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms); 475 _ref_phase_times.print_all_references(2, false); 476 _weak_phase_times.log_print(2); 477 478 if (G1StringDedup::is_enabled()) { 479 debug_time("String Deduplication", _cur_string_deduplication_time_ms); 480 debug_phase(_gc_par_phases[StringDedupQueueFixup], 1); 481 debug_phase(_gc_par_phases[StringDedupTableFixup], 1); 482 } 483 484 if (G1CollectedHeap::heap()->evacuation_failed()) { 485 debug_time("Evacuation Failure", evac_fail_handling); 486 trace_time("Recalculate Used", _cur_evac_fail_recalc_used); 487 trace_time("Remove Self Forwards",_cur_evac_fail_remove_self_forwards); 488 } 489 490 debug_phase(_gc_par_phases[MergePSS], 0); 491 debug_time("Code Roots Purge", _cur_strong_code_root_purge_time_ms); 492 493 debug_time("Redirty Cards", _recorded_redirty_logged_cards_time_ms); 494 trace_phase(_gc_par_phases[RedirtyCards]); 495 #if COMPILER2_OR_JVMCI 496 debug_time("DerivedPointerTable Update", _cur_derived_pointer_table_update_time_ms); 497 #endif 498 499 debug_time("Free Collection Set", _recorded_total_free_cset_time_ms); 500 trace_time("Serial Free Collection Set", _recorded_serial_free_cset_time_ms); 501 trace_phase(_gc_par_phases[ParFreeCSet]); 502 trace_phase(_gc_par_phases[YoungFreeCSet], true, 1); 503 trace_phase(_gc_par_phases[NonYoungFreeCSet], true, 1); 504 505 debug_time("Rebuild Free List", _recorded_total_rebuild_freelist_time_ms); 506 trace_time("Serial Rebuild Free List ", _recorded_serial_rebuild_freelist_time_ms); 507 trace_phase(_gc_par_phases[RebuildFreeList]); 508 509 if (G1EagerReclaimHumongousObjects) { 510 debug_time("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms); 511 trace_count("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed); 512 } 513 debug_time("Start New Collection Set", _recorded_start_new_cset_time_ms); 514 if (UseTLAB && ResizeTLAB) { 515 debug_time("Resize TLABs", _cur_resize_tlab_time_ms); 516 } 517 debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms); 518 519 520 return sum_ms; 521 } 522 523 void G1GCPhaseTimes::print_other(double accounted_ms) const { 524 info_time("Other", _gc_pause_time_ms - accounted_ms); 525 } 526 527 void G1GCPhaseTimes::print() { 528 note_gc_end(); 529 530 if (_cur_verify_before_time_ms > 0.0) { 531 debug_time("Verify Before", _cur_verify_before_time_ms); 532 } 533 534 double accounted_ms = 0.0; 535 accounted_ms += print_pre_evacuate_collection_set(); 536 accounted_ms += print_evacuate_initial_collection_set(); 537 accounted_ms += print_evacuate_optional_collection_set(); 538 accounted_ms += print_post_evacuate_collection_set(); 539 print_other(accounted_ms); 540 541 if (_cur_verify_after_time_ms > 0.0) { 542 debug_time("Verify After", _cur_verify_after_time_ms); 543 } 544 } 545 546 const char* G1GCPhaseTimes::phase_name(GCParPhases phase) { 547 static const char* names[] = { 548 "GCWorkerStart", 549 "ExtRootScan", 550 "ThreadRoots", 551 "UniverseRoots", 552 "JNIRoots", 553 "ObjectSynchronizerRoots", 554 "ManagementRoots", 555 "SystemDictionaryRoots", 556 "CLDGRoots", 557 "JVMTIRoots", 558 AOT_ONLY("AOTCodeRoots" COMMA) 559 "CMRefRoots", 560 "MergeER", 561 "MergeRS", 562 "OptMergeRS", 563 "MergeLB", 564 "MergeHCC", 565 "ScanHR", 566 "OptScanHR", 567 "CodeRoots", 568 "OptCodeRoots", 569 "ObjCopy", 570 "OptObjCopy", 571 "Termination", 572 "OptTermination", 573 "Other", 574 "GCWorkerTotal", 575 "GCWorkerEnd", 576 "StringDedupQueueFixup", 577 "StringDedupTableFixup", 578 "RedirtyCards", 579 "ParFreeCSet", 580 "YoungFreeCSet", 581 "NonYoungFreeCSet", 582 "RebuildFreeList", 583 "MergePSS" 584 //GCParPhasesSentinel only used to tell end of enum 585 }; 586 587 STATIC_ASSERT(ARRAY_SIZE(names) == G1GCPhaseTimes::GCParPhasesSentinel); // GCParPhases enum and corresponding string array should have the same "length", this tries to assert it 588 589 return names[phase]; 590 } 591 592 G1EvacPhaseWithTrimTimeTracker::G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time) : 593 _pss(pss), 594 _start(Ticks::now()), 595 _total_time(total_time), 596 _trim_time(trim_time), 597 _stopped(false) { 598 599 assert(_pss->trim_ticks().value() == 0, "Possibly remaining trim ticks left over from previous use"); 600 } 601 602 G1EvacPhaseWithTrimTimeTracker::~G1EvacPhaseWithTrimTimeTracker() { 603 if (!_stopped) { 604 stop(); 605 } 606 } 607 608 void G1EvacPhaseWithTrimTimeTracker::stop() { 609 assert(!_stopped, "Should only be called once"); 610 _total_time += (Ticks::now() - _start) - _pss->trim_ticks(); 611 _trim_time += _pss->trim_ticks(); 612 _pss->reset_trim_ticks(); 613 _stopped = true; 614 } 615 616 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id, bool must_record) : 617 _start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id), _event(), _must_record(must_record) { 618 if (_phase_times != NULL) { 619 _start_time = Ticks::now(); 620 } 621 } 622 623 G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() { 624 if (_phase_times != NULL) { 625 if (_must_record) { 626 _phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); 627 } else { 628 _phase_times->record_or_add_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); 629 } 630 _event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_phase)); 631 } 632 } 633 634 G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times, 635 G1ParScanThreadState* pss, 636 G1GCPhaseTimes::GCParPhases phase, 637 uint worker_id) : 638 G1GCParPhaseTimesTracker(phase_times, phase, worker_id), 639 _total_time(), 640 _trim_time(), 641 _trim_tracker(pss, _total_time, _trim_time) { 642 } 643 644 G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() { 645 if (_phase_times != NULL) { 646 // Explicitly stop the trim tracker since it's not yet destructed. 647 _trim_tracker.stop(); 648 // Exclude trim time by increasing the start time. 649 _start_time += _trim_time; 650 _phase_times->record_or_add_time_secs(G1GCPhaseTimes::ObjCopy, _worker_id, _trim_time.seconds()); 651 } 652 } 653