rev 59958 : [mq]: 8247819-kbarrett-review
1 /* 2 * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1CollectedHeap.inline.hpp" 27 #include "gc/g1/g1GCParPhaseTimesTracker.hpp" 28 #include "gc/g1/g1GCPhaseTimes.hpp" 29 #include "gc/g1/g1HotCardCache.hpp" 30 #include "gc/g1/g1ParScanThreadState.inline.hpp" 31 #include "gc/g1/g1StringDedup.hpp" 32 #include "gc/shared/gcTimer.hpp" 33 #include "gc/shared/workerDataArray.inline.hpp" 34 #include "memory/resourceArea.hpp" 35 #include "logging/log.hpp" 36 #include "logging/logStream.hpp" 37 #include "runtime/timer.hpp" 38 #include "runtime/os.hpp" 39 #include "utilities/macros.hpp" 40 41 static const char* indent(uint level) { 42 static const char* Indents[] = {"", " ", " ", " ", " ", " "}; 43 assert(level < ARRAY_SIZE(Indents), "Too high indent level %u", level); 44 return Indents[level]; 45 } 46 47 G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) : 48 _max_gc_threads(max_gc_threads), 49 _gc_start_counter(0), 50 _gc_pause_time_ms(0.0), 51 _ref_phase_times(gc_timer, max_gc_threads), 52 _weak_phase_times(max_gc_threads) 53 { 54 assert(max_gc_threads > 0, "Must have some GC threads"); 55 56 _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>("GCWorkerStart", "GC Worker Start (ms):", max_gc_threads); 57 _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>("ExtRootScan", "Ext Root Scanning (ms):", max_gc_threads); 58 59 // Root scanning phases 60 _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("ThreadRoots", "Thread Roots (ms):", max_gc_threads); 61 _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>("UniverseRoots", "Universe Roots (ms):", max_gc_threads); 62 _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>("ObjectSynchronizerRoots", "ObjectSynchronizer Roots (ms):", max_gc_threads); 63 _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>("ManagementRoots", "Management Roots (ms):", max_gc_threads); 64 _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDGRoots", "CLDG Roots (ms):", max_gc_threads); 65 _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>("JVMTIRoots", "JVMTI Roots (ms):", max_gc_threads); 66 AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>("AOTCodeRoots", "AOT Root Scan (ms):", max_gc_threads);) 67 _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>("CMRefRoots", "CM RefProcessor Roots (ms):", max_gc_threads); 68 69 int index = G1GCPhaseTimes::StrongOopStorageSetRoots; 70 for (OopStorageSet::Iterator it = OopStorageSet::strong_iterator(); !it.is_end(); ++it, ++index) { 71 const char* phase_name_postfix = " Roots (ms):"; 72 char* oop_storage_phase_name = NEW_C_HEAP_ARRAY(char, strlen(phase_name_postfix) + strlen(it->name()) + 1, mtGC); 73 strcpy(oop_storage_phase_name, it->name()); 74 strcat(oop_storage_phase_name, phase_name_postfix); 75 _gc_par_phases[index] = new WorkerDataArray<double>(it->name(), oop_storage_phase_name, max_gc_threads); 76 } 77 78 _gc_par_phases[MergeER] = new WorkerDataArray<double>("MergeER", "Eager Reclaim (ms):", max_gc_threads); 79 80 _gc_par_phases[MergeRS] = new WorkerDataArray<double>("MergeRS", "Remembered Sets (ms):", max_gc_threads); 81 _gc_par_phases[MergeRS]->create_thread_work_items("Merged Sparse:", MergeRSMergedSparse); 82 _gc_par_phases[MergeRS]->create_thread_work_items("Merged Fine:", MergeRSMergedFine); 83 _gc_par_phases[MergeRS]->create_thread_work_items("Merged Coarse:", MergeRSMergedCoarse); 84 _gc_par_phases[MergeRS]->create_thread_work_items("Dirty Cards:", MergeRSDirtyCards); 85 86 _gc_par_phases[OptMergeRS] = new WorkerDataArray<double>("OptMergeRS", "Optional Remembered Sets (ms):", max_gc_threads); 87 _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Sparse:", MergeRSMergedSparse); 88 _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Fine:", MergeRSMergedFine); 89 _gc_par_phases[OptMergeRS]->create_thread_work_items("Merged Coarse:", MergeRSMergedCoarse); 90 _gc_par_phases[OptMergeRS]->create_thread_work_items("Dirty Cards:", MergeRSDirtyCards); 91 92 _gc_par_phases[MergeLB] = new WorkerDataArray<double>("MergeLB", "Log Buffers (ms):", max_gc_threads); 93 if (G1HotCardCache::default_use_cache()) { 94 _gc_par_phases[MergeHCC] = new WorkerDataArray<double>("MergeHCC", "Hot Card Cache (ms):", max_gc_threads); 95 _gc_par_phases[MergeHCC]->create_thread_work_items("Dirty Cards:", MergeHCCDirtyCards); 96 _gc_par_phases[MergeHCC]->create_thread_work_items("Skipped Cards:", MergeHCCSkippedCards); 97 } else { 98 _gc_par_phases[MergeHCC] = NULL; 99 } 100 _gc_par_phases[ScanHR] = new WorkerDataArray<double>("ScanHR", "Scan Heap Roots (ms):", max_gc_threads); 101 _gc_par_phases[OptScanHR] = new WorkerDataArray<double>("OptScanHR", "Optional Scan Heap Roots (ms):", max_gc_threads); 102 _gc_par_phases[CodeRoots] = new WorkerDataArray<double>("CodeRoots", "Code Root Scan (ms):", max_gc_threads); 103 _gc_par_phases[OptCodeRoots] = new WorkerDataArray<double>("OptCodeRoots", "Optional Code Root Scan (ms):", max_gc_threads); 104 _gc_par_phases[ObjCopy] = new WorkerDataArray<double>("ObjCopy", "Object Copy (ms):", max_gc_threads); 105 _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>("OptObjCopy", "Optional Object Copy (ms):", max_gc_threads); 106 _gc_par_phases[Termination] = new WorkerDataArray<double>("Termination", "Termination (ms):", max_gc_threads); 107 _gc_par_phases[OptTermination] = new WorkerDataArray<double>("OptTermination", "Optional Termination (ms):", max_gc_threads); 108 _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>("GCWorkerTotal", "GC Worker Total (ms):", max_gc_threads); 109 _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>("GCWorkerEnd", "GC Worker End (ms):", max_gc_threads); 110 _gc_par_phases[Other] = new WorkerDataArray<double>("Other", "GC Worker Other (ms):", max_gc_threads); 111 112 _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); 113 _gc_par_phases[ScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks); 114 _gc_par_phases[ScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks); 115 116 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards); 117 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks); 118 _gc_par_phases[OptScanHR]->create_thread_work_items("Claimed Chunks:", ScanHRClaimedChunks); 119 _gc_par_phases[OptScanHR]->create_thread_work_items("Scanned Refs:", ScanHRScannedOptRefs); 120 _gc_par_phases[OptScanHR]->create_thread_work_items("Used Memory:", ScanHRUsedMemory); 121 122 _gc_par_phases[MergeLB]->create_thread_work_items("Dirty Cards:", MergeLBDirtyCards); 123 _gc_par_phases[MergeLB]->create_thread_work_items("Skipped Cards:", MergeLBSkippedCards); 124 125 _gc_par_phases[MergePSS] = new WorkerDataArray<double>("MergePSS", "Merge Per-Thread State", 1 /* length */, true /* is_serial */); 126 127 _gc_par_phases[MergePSS]->create_thread_work_items("Copied Bytes", MergePSSCopiedBytes, max_gc_threads); 128 _gc_par_phases[MergePSS]->create_thread_work_items("LAB Waste", MergePSSLABWasteBytes, max_gc_threads); 129 _gc_par_phases[MergePSS]->create_thread_work_items("LAB Undo Waste", MergePSSLABUndoWasteBytes, max_gc_threads); 130 131 _gc_par_phases[Termination]->create_thread_work_items("Termination Attempts:"); 132 133 _gc_par_phases[OptTermination]->create_thread_work_items("Optional Termination Attempts:"); 134 135 if (UseStringDeduplication) { 136 _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>("StringDedupQueueFixup", "Queue Fixup (ms):", max_gc_threads); 137 _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>("StringDedupTableFixup", "Table Fixup (ms):", max_gc_threads); 138 } else { 139 _gc_par_phases[StringDedupQueueFixup] = NULL; 140 _gc_par_phases[StringDedupTableFixup] = NULL; 141 } 142 143 _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>("RedirtyCards", "Parallel Redirty (ms):", max_gc_threads); 144 _gc_par_phases[RedirtyCards]->create_thread_work_items("Redirtied Cards:"); 145 146 _gc_par_phases[ParFreeCSet] = new WorkerDataArray<double>("ParFreeCSet", "Parallel Free Collection Set (ms):", max_gc_threads); 147 _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>("YoungFreeCSet", "Young Free Collection Set (ms):", max_gc_threads); 148 _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>("NonYoungFreeCSet", "Non-Young Free Collection Set (ms):", max_gc_threads); 149 _gc_par_phases[RebuildFreeList] = new WorkerDataArray<double>("RebuildFreeList", "Parallel Rebuild Free List (ms):", max_gc_threads); 150 151 reset(); 152 } 153 154 void G1GCPhaseTimes::reset() { 155 _cur_collection_initial_evac_time_ms = 0.0; 156 _cur_optional_evac_time_ms = 0.0; 157 _cur_collection_code_root_fixup_time_ms = 0.0; 158 _cur_strong_code_root_purge_time_ms = 0.0; 159 _cur_merge_heap_roots_time_ms = 0.0; 160 _cur_optional_merge_heap_roots_time_ms = 0.0; 161 _cur_prepare_merge_heap_roots_time_ms = 0.0; 162 _cur_optional_prepare_merge_heap_roots_time_ms = 0.0; 163 _cur_evac_fail_recalc_used = 0.0; 164 _cur_evac_fail_remove_self_forwards = 0.0; 165 _cur_string_deduplication_time_ms = 0.0; 166 _cur_prepare_tlab_time_ms = 0.0; 167 _cur_resize_tlab_time_ms = 0.0; 168 _cur_concatenate_dirty_card_logs_time_ms = 0.0; 169 _cur_derived_pointer_table_update_time_ms = 0.0; 170 _cur_clear_ct_time_ms = 0.0; 171 _cur_expand_heap_time_ms = 0.0; 172 _cur_ref_proc_time_ms = 0.0; 173 _cur_collection_start_sec = 0.0; 174 _root_region_scan_wait_time_ms = 0.0; 175 _external_accounted_time_ms = 0.0; 176 _recorded_prepare_heap_roots_time_ms = 0.0; 177 _recorded_clear_claimed_marks_time_ms = 0.0; 178 _recorded_young_cset_choice_time_ms = 0.0; 179 _recorded_non_young_cset_choice_time_ms = 0.0; 180 _recorded_redirty_logged_cards_time_ms = 0.0; 181 _recorded_preserve_cm_referents_time_ms = 0.0; 182 _recorded_start_new_cset_time_ms = 0.0; 183 _recorded_total_free_cset_time_ms = 0.0; 184 _recorded_serial_free_cset_time_ms = 0.0; 185 _recorded_total_rebuild_freelist_time_ms = 0.0; 186 _recorded_serial_rebuild_freelist_time_ms = 0.0; 187 _cur_fast_reclaim_humongous_time_ms = 0.0; 188 _cur_region_register_time = 0.0; 189 _cur_fast_reclaim_humongous_total = 0; 190 _cur_fast_reclaim_humongous_candidates = 0; 191 _cur_fast_reclaim_humongous_reclaimed = 0; 192 _cur_verify_before_time_ms = 0.0; 193 _cur_verify_after_time_ms = 0.0; 194 195 for (int i = 0; i < GCParPhasesSentinel; i++) { 196 if (_gc_par_phases[i] != NULL) { 197 _gc_par_phases[i]->reset(); 198 } 199 } 200 201 _ref_phase_times.reset(); 202 _weak_phase_times.reset(); 203 } 204 205 void G1GCPhaseTimes::note_gc_start() { 206 _gc_start_counter = os::elapsed_counter(); 207 reset(); 208 } 209 210 #define ASSERT_PHASE_UNINITIALIZED(phase) \ 211 assert(_gc_par_phases[phase] == NULL || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started"); 212 213 double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) { 214 if (_gc_par_phases[phase] == NULL) { 215 return 0.0; 216 } 217 double value = _gc_par_phases[phase]->get(worker); 218 if (value != WorkerDataArray<double>::uninitialized()) { 219 return value; 220 } 221 return 0.0; 222 } 223 224 void G1GCPhaseTimes::note_gc_end() { 225 _gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter); 226 227 double uninitialized = WorkerDataArray<double>::uninitialized(); 228 229 for (uint i = 0; i < _max_gc_threads; i++) { 230 double worker_start = _gc_par_phases[GCWorkerStart]->get(i); 231 if (worker_start != uninitialized) { 232 assert(_gc_par_phases[GCWorkerEnd]->get(i) != uninitialized, "Worker started but not ended."); 233 double total_worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i); 234 record_time_secs(GCWorkerTotal, i , total_worker_time); 235 236 double worker_known_time = worker_time(ExtRootScan, i) + 237 worker_time(ScanHR, i) + 238 worker_time(CodeRoots, i) + 239 worker_time(ObjCopy, i) + 240 worker_time(Termination, i); 241 242 record_time_secs(Other, i, total_worker_time - worker_known_time); 243 } else { 244 // Make sure all slots are uninitialized since this thread did not seem to have been started 245 ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd); 246 ASSERT_PHASE_UNINITIALIZED(ExtRootScan); 247 ASSERT_PHASE_UNINITIALIZED(MergeER); 248 ASSERT_PHASE_UNINITIALIZED(MergeRS); 249 ASSERT_PHASE_UNINITIALIZED(OptMergeRS); 250 ASSERT_PHASE_UNINITIALIZED(MergeHCC); 251 ASSERT_PHASE_UNINITIALIZED(MergeLB); 252 ASSERT_PHASE_UNINITIALIZED(ScanHR); 253 ASSERT_PHASE_UNINITIALIZED(CodeRoots); 254 ASSERT_PHASE_UNINITIALIZED(OptCodeRoots); 255 ASSERT_PHASE_UNINITIALIZED(ObjCopy); 256 ASSERT_PHASE_UNINITIALIZED(OptObjCopy); 257 ASSERT_PHASE_UNINITIALIZED(Termination); 258 } 259 } 260 } 261 262 #undef ASSERT_PHASE_UNINITIALIZED 263 264 // record the time a phase took in seconds 265 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_id, double secs) { 266 _gc_par_phases[phase]->set(worker_id, secs); 267 } 268 269 // add a number of seconds to a phase 270 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_id, double secs) { 271 _gc_par_phases[phase]->add(worker_id, secs); 272 } 273 274 void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_id, double secs) { 275 if (_gc_par_phases[phase]->get(worker_id) == _gc_par_phases[phase]->uninitialized()) { 276 record_time_secs(phase, worker_id, secs); 277 } else { 278 add_time_secs(phase, worker_id, secs); 279 } 280 } 281 282 double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_id) { 283 return _gc_par_phases[phase]->get(worker_id); 284 } 285 286 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) { 287 _gc_par_phases[phase]->set_thread_work_item(worker_id, count, index); 288 } 289 290 void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) { 291 _gc_par_phases[phase]->set_or_add_thread_work_item(worker_id, count, index); 292 } 293 294 size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, uint index) { 295 return _gc_par_phases[phase]->get_thread_work_item(worker_id, index); 296 } 297 298 // return the average time for a phase in milliseconds 299 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) { 300 if (_gc_par_phases[phase] == NULL) { 301 return 0.0; 302 } 303 return _gc_par_phases[phase]->average() * 1000.0; 304 } 305 306 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) { 307 if (_gc_par_phases[phase] == NULL) { 308 return 0; 309 } 310 assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count"); 311 return _gc_par_phases[phase]->thread_work_items(index)->sum(); 312 } 313 314 template <class T> 315 void G1GCPhaseTimes::details(T* phase, const char* indent_str) const { 316 LogTarget(Trace, gc, phases, task) lt; 317 if (lt.is_enabled()) { 318 LogStream ls(lt); 319 ls.print("%s", indent_str); 320 phase->print_details_on(&ls); 321 } 322 } 323 324 void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent_level, outputStream* out, bool print_sum) const { 325 out->print("%s", indent(indent_level)); 326 phase->print_summary_on(out, print_sum); 327 details(phase, indent(indent_level)); 328 329 for (uint i = 0; i < phase->MaxThreadWorkItems; i++) { 330 WorkerDataArray<size_t>* work_items = phase->thread_work_items(i); 331 if (work_items != NULL) { 332 out->print("%s", indent(indent_level + 1)); 333 work_items->print_summary_on(out, true); 334 details(work_items, indent(indent_level + 1)); 335 } 336 } 337 } 338 339 void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase, uint extra_indent) const { 340 LogTarget(Debug, gc, phases) lt; 341 if (lt.is_enabled()) { 342 ResourceMark rm; 343 LogStream ls(lt); 344 log_phase(phase, 2 + extra_indent, &ls, true); 345 } 346 } 347 348 void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum, uint extra_indent) const { 349 LogTarget(Trace, gc, phases) lt; 350 if (lt.is_enabled()) { 351 LogStream ls(lt); 352 log_phase(phase, 3 + extra_indent, &ls, print_sum); 353 } 354 } 355 356 #define TIME_FORMAT "%.1lfms" 357 358 void G1GCPhaseTimes::info_time(const char* name, double value) const { 359 log_info(gc, phases)("%s%s: " TIME_FORMAT, indent(1), name, value); 360 } 361 362 void G1GCPhaseTimes::debug_time(const char* name, double value) const { 363 log_debug(gc, phases)("%s%s: " TIME_FORMAT, indent(2), name, value); 364 } 365 366 void G1GCPhaseTimes::debug_time_for_reference(const char* name, double value) const { 367 LogTarget(Debug, gc, phases) lt; 368 LogTarget(Debug, gc, phases, ref) lt2; 369 370 if (lt.is_enabled()) { 371 LogStream ls(lt); 372 ls.print_cr("%s%s: " TIME_FORMAT, indent(2), name, value); 373 } else if (lt2.is_enabled()) { 374 LogStream ls(lt2); 375 ls.print_cr("%s%s: " TIME_FORMAT, indent(2), name, value); 376 } 377 } 378 379 void G1GCPhaseTimes::trace_time(const char* name, double value) const { 380 log_trace(gc, phases)("%s%s: " TIME_FORMAT, indent(3), name, value); 381 } 382 383 void G1GCPhaseTimes::trace_count(const char* name, size_t value) const { 384 log_trace(gc, phases)("%s%s: " SIZE_FORMAT, indent(3), name, value); 385 } 386 387 double G1GCPhaseTimes::print_pre_evacuate_collection_set() const { 388 const double sum_ms = _root_region_scan_wait_time_ms + 389 _cur_prepare_tlab_time_ms + 390 _cur_concatenate_dirty_card_logs_time_ms + 391 _recorded_young_cset_choice_time_ms + 392 _recorded_non_young_cset_choice_time_ms + 393 _cur_region_register_time + 394 _recorded_prepare_heap_roots_time_ms + 395 _recorded_clear_claimed_marks_time_ms; 396 397 info_time("Pre Evacuate Collection Set", sum_ms); 398 399 if (_root_region_scan_wait_time_ms > 0.0) { 400 debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms); 401 } 402 debug_time("Prepare TLABs", _cur_prepare_tlab_time_ms); 403 debug_time("Concatenate Dirty Card Logs", _cur_concatenate_dirty_card_logs_time_ms); 404 debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms)); 405 debug_time("Region Register", _cur_region_register_time); 406 if (G1EagerReclaimHumongousObjects) { 407 trace_count("Humongous Total", _cur_fast_reclaim_humongous_total); 408 trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates); 409 } 410 411 debug_time("Prepare Heap Roots", _recorded_prepare_heap_roots_time_ms); 412 if (_recorded_clear_claimed_marks_time_ms > 0.0) { 413 debug_time("Clear Claimed Marks", _recorded_clear_claimed_marks_time_ms); 414 } 415 return sum_ms; 416 } 417 418 double G1GCPhaseTimes::print_evacuate_optional_collection_set() const { 419 const double sum_ms = _cur_optional_evac_time_ms + _cur_optional_merge_heap_roots_time_ms; 420 if (sum_ms > 0) { 421 info_time("Merge Optional Heap Roots", _cur_optional_merge_heap_roots_time_ms); 422 423 debug_time("Prepare Optional Merge Heap Roots", _cur_optional_prepare_merge_heap_roots_time_ms); 424 debug_phase(_gc_par_phases[OptMergeRS]); 425 426 info_time("Evacuate Optional Collection Set", _cur_optional_evac_time_ms); 427 debug_phase(_gc_par_phases[OptScanHR]); 428 debug_phase(_gc_par_phases[OptObjCopy]); 429 debug_phase(_gc_par_phases[OptCodeRoots]); 430 debug_phase(_gc_par_phases[OptTermination]); 431 } 432 return sum_ms; 433 } 434 435 double G1GCPhaseTimes::print_evacuate_initial_collection_set() const { 436 info_time("Merge Heap Roots", _cur_merge_heap_roots_time_ms); 437 438 debug_time("Prepare Merge Heap Roots", _cur_prepare_merge_heap_roots_time_ms); 439 debug_phase(_gc_par_phases[MergeER]); 440 debug_phase(_gc_par_phases[MergeRS]); 441 if (G1HotCardCache::default_use_cache()) { 442 debug_phase(_gc_par_phases[MergeHCC]); 443 } 444 debug_phase(_gc_par_phases[MergeLB]); 445 446 info_time("Evacuate Collection Set", _cur_collection_initial_evac_time_ms); 447 448 trace_phase(_gc_par_phases[GCWorkerStart], false); 449 debug_phase(_gc_par_phases[ExtRootScan]); 450 for (int i = ExtRootScanSubPhasesFirst; i <= ExtRootScanSubPhasesLast; i++) { 451 trace_phase(_gc_par_phases[i]); 452 } 453 debug_phase(_gc_par_phases[ScanHR]); 454 debug_phase(_gc_par_phases[CodeRoots]); 455 debug_phase(_gc_par_phases[ObjCopy]); 456 debug_phase(_gc_par_phases[Termination]); 457 debug_phase(_gc_par_phases[Other]); 458 debug_phase(_gc_par_phases[GCWorkerTotal]); 459 trace_phase(_gc_par_phases[GCWorkerEnd], false); 460 461 return _cur_collection_initial_evac_time_ms + _cur_merge_heap_roots_time_ms; 462 } 463 464 double G1GCPhaseTimes::print_post_evacuate_collection_set() const { 465 const double evac_fail_handling = _cur_evac_fail_recalc_used + 466 _cur_evac_fail_remove_self_forwards; 467 assert(_gc_par_phases[MergePSS]->get(0) != WorkerDataArray<double>::uninitialized(), "must be set"); 468 const double merge_pss = _gc_par_phases[MergePSS]->get(0) * MILLIUNITS; 469 const double sum_ms = evac_fail_handling + 470 _cur_collection_code_root_fixup_time_ms + 471 _recorded_preserve_cm_referents_time_ms + 472 _cur_ref_proc_time_ms + 473 (_weak_phase_times.total_time_sec() * MILLIUNITS) + 474 _cur_clear_ct_time_ms + 475 merge_pss + 476 _cur_strong_code_root_purge_time_ms + 477 _recorded_redirty_logged_cards_time_ms + 478 _recorded_total_free_cset_time_ms + 479 _recorded_total_rebuild_freelist_time_ms + 480 _cur_fast_reclaim_humongous_time_ms + 481 _cur_expand_heap_time_ms + 482 _cur_string_deduplication_time_ms; 483 484 info_time("Post Evacuate Collection Set", sum_ms); 485 486 debug_time("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms); 487 488 debug_time("Clear Card Table", _cur_clear_ct_time_ms); 489 490 debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms); 491 _ref_phase_times.print_all_references(2, false); 492 _weak_phase_times.log_print(2); 493 494 if (G1StringDedup::is_enabled()) { 495 debug_time("String Deduplication", _cur_string_deduplication_time_ms); 496 debug_phase(_gc_par_phases[StringDedupQueueFixup], 1); 497 debug_phase(_gc_par_phases[StringDedupTableFixup], 1); 498 } 499 500 if (G1CollectedHeap::heap()->evacuation_failed()) { 501 debug_time("Evacuation Failure", evac_fail_handling); 502 trace_time("Recalculate Used", _cur_evac_fail_recalc_used); 503 trace_time("Remove Self Forwards",_cur_evac_fail_remove_self_forwards); 504 } 505 506 debug_phase(_gc_par_phases[MergePSS], 0); 507 debug_time("Code Roots Purge", _cur_strong_code_root_purge_time_ms); 508 509 debug_time("Redirty Cards", _recorded_redirty_logged_cards_time_ms); 510 trace_phase(_gc_par_phases[RedirtyCards]); 511 #if COMPILER2_OR_JVMCI 512 debug_time("DerivedPointerTable Update", _cur_derived_pointer_table_update_time_ms); 513 #endif 514 515 debug_time("Free Collection Set", _recorded_total_free_cset_time_ms); 516 trace_time("Serial Free Collection Set", _recorded_serial_free_cset_time_ms); 517 trace_phase(_gc_par_phases[ParFreeCSet]); 518 trace_phase(_gc_par_phases[YoungFreeCSet], true, 1); 519 trace_phase(_gc_par_phases[NonYoungFreeCSet], true, 1); 520 521 debug_time("Rebuild Free List", _recorded_total_rebuild_freelist_time_ms); 522 trace_time("Serial Rebuild Free List ", _recorded_serial_rebuild_freelist_time_ms); 523 trace_phase(_gc_par_phases[RebuildFreeList]); 524 525 if (G1EagerReclaimHumongousObjects) { 526 debug_time("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms); 527 trace_count("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed); 528 } 529 debug_time("Start New Collection Set", _recorded_start_new_cset_time_ms); 530 if (UseTLAB && ResizeTLAB) { 531 debug_time("Resize TLABs", _cur_resize_tlab_time_ms); 532 } 533 debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms); 534 535 536 return sum_ms; 537 } 538 539 void G1GCPhaseTimes::print_other(double accounted_ms) const { 540 info_time("Other", _gc_pause_time_ms - accounted_ms); 541 } 542 543 void G1GCPhaseTimes::print() { 544 note_gc_end(); 545 546 if (_cur_verify_before_time_ms > 0.0) { 547 debug_time("Verify Before", _cur_verify_before_time_ms); 548 } 549 550 double accounted_ms = 0.0; 551 accounted_ms += print_pre_evacuate_collection_set(); 552 accounted_ms += print_evacuate_initial_collection_set(); 553 accounted_ms += print_evacuate_optional_collection_set(); 554 accounted_ms += print_post_evacuate_collection_set(); 555 print_other(accounted_ms); 556 557 if (_cur_verify_after_time_ms > 0.0) { 558 debug_time("Verify After", _cur_verify_after_time_ms); 559 } 560 } 561 562 const char* G1GCPhaseTimes::phase_name(GCParPhases phase) { 563 G1GCPhaseTimes* phase_times = G1CollectedHeap::heap()->phase_times(); 564 return phase_times->_gc_par_phases[phase]->short_name(); 565 } 566 567 G1EvacPhaseWithTrimTimeTracker::G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time) : 568 _pss(pss), 569 _start(Ticks::now()), 570 _total_time(total_time), 571 _trim_time(trim_time), 572 _stopped(false) { 573 574 assert(_pss->trim_ticks().value() == 0, "Possibly remaining trim ticks left over from previous use"); 575 } 576 577 G1EvacPhaseWithTrimTimeTracker::~G1EvacPhaseWithTrimTimeTracker() { 578 if (!_stopped) { 579 stop(); 580 } 581 } 582 583 void G1EvacPhaseWithTrimTimeTracker::stop() { 584 assert(!_stopped, "Should only be called once"); 585 _total_time += (Ticks::now() - _start) - _pss->trim_ticks(); 586 _trim_time += _pss->trim_ticks(); 587 _pss->reset_trim_ticks(); 588 _stopped = true; 589 } 590 591 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id, bool must_record) : 592 _start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id), _event(), _must_record(must_record) { 593 if (_phase_times != NULL) { 594 _start_time = Ticks::now(); 595 } 596 } 597 598 G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() { 599 if (_phase_times != NULL) { 600 if (_must_record) { 601 _phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); 602 } else { 603 _phase_times->record_or_add_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds()); 604 } 605 _event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_phase)); 606 } 607 } 608 609 G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times, 610 G1ParScanThreadState* pss, 611 G1GCPhaseTimes::GCParPhases phase, 612 uint worker_id) : 613 G1GCParPhaseTimesTracker(phase_times, phase, worker_id), 614 _total_time(), 615 _trim_time(), 616 _trim_tracker(pss, _total_time, _trim_time) { 617 } 618 619 G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() { 620 if (_phase_times != NULL) { 621 // Explicitly stop the trim tracker since it's not yet destructed. 622 _trim_tracker.stop(); 623 // Exclude trim time by increasing the start time. 624 _start_time += _trim_time; 625 _phase_times->record_or_add_time_secs(G1GCPhaseTimes::ObjCopy, _worker_id, _trim_time.seconds()); 626 } 627 } 628 --- EOF ---