1 /*
  2  * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/g1/g1CollectedHeap.inline.hpp"
 27 #include "gc/g1/g1GCPhaseTimes.hpp"
 28 #include "gc/g1/g1HotCardCache.hpp"
 29 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 30 #include "gc/g1/g1StringDedup.hpp"
 31 #include "gc/shared/gcTimer.hpp"
 32 #include "gc/shared/workerDataArray.inline.hpp"
 33 #include "memory/resourceArea.hpp"
 34 #include "logging/log.hpp"
 35 #include "logging/logStream.hpp"
 36 #include "runtime/timer.hpp"
 37 #include "runtime/os.hpp"
 38 #include "utilities/macros.hpp"
 39 
 40 static const char* Indents[5] = {"", "  ", "    ", "      ", "        "};
 41 
 42 G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
 43   _max_gc_threads(max_gc_threads),
 44   _gc_start_counter(0),
 45   _gc_pause_time_ms(0.0),
 46   _ref_phase_times(gc_timer, max_gc_threads),
 47   _weak_phase_times(max_gc_threads)
 48 {
 49   assert(max_gc_threads > 0, "Must have some GC threads");
 50 
 51   _gc_par_phases[GCWorkerStart] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Start (ms):");
 52   _gc_par_phases[ExtRootScan] = new WorkerDataArray<double>(max_gc_threads, "Ext Root Scanning (ms):");
 53 
 54   // Root scanning phases
 55   _gc_par_phases[ThreadRoots] = new WorkerDataArray<double>(max_gc_threads, "Thread Roots (ms):");
 56   _gc_par_phases[StringTableRoots] = new WorkerDataArray<double>(max_gc_threads, "StringTable Roots (ms):");
 57   _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
 58   _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
 59   _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
 60   _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
 61   _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
 62   _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
 63   _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
 64   _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
 65   _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
 66   _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
 67   _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms):");
 68 
 69   _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms):");
 70   if (G1HotCardCache::default_use_cache()) {
 71     _gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC (ms):");
 72   } else {
 73     _gc_par_phases[ScanHCC] = NULL;
 74   }
 75   _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
 76   _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
 77 #if INCLUDE_AOT
 78   _gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scanning (ms):");
 79 #endif
 80   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
 81   _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
 82   _gc_par_phases[GCWorkerTotal] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Total (ms):");
 83   _gc_par_phases[GCWorkerEnd] = new WorkerDataArray<double>(max_gc_threads, "GC Worker End (ms):");
 84   _gc_par_phases[Other] = new WorkerDataArray<double>(max_gc_threads, "GC Worker Other (ms):");
 85 
 86   _scan_rs_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
 87   _gc_par_phases[ScanRS]->link_thread_work_items(_scan_rs_scanned_cards, ScanRSScannedCards);
 88   _scan_rs_claimed_cards = new WorkerDataArray<size_t>(max_gc_threads, "Claimed Cards:");
 89   _gc_par_phases[ScanRS]->link_thread_work_items(_scan_rs_claimed_cards, ScanRSClaimedCards);
 90   _scan_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
 91   _gc_par_phases[ScanRS]->link_thread_work_items(_scan_rs_skipped_cards, ScanRSSkippedCards);
 92 
 93   _update_rs_processed_buffers = new WorkerDataArray<size_t>(max_gc_threads, "Processed Buffers:");
 94   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_processed_buffers, UpdateRSProcessedBuffers);
 95   _update_rs_scanned_cards = new WorkerDataArray<size_t>(max_gc_threads, "Scanned Cards:");
 96   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_scanned_cards, UpdateRSScannedCards);
 97   _update_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
 98   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_skipped_cards, ScanRSSkippedCards);
 99 
100   _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
101   _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
102 
103   if (UseStringDeduplication) {
104     _gc_par_phases[StringDedupQueueFixup] = new WorkerDataArray<double>(max_gc_threads, "Queue Fixup (ms):");
105     _gc_par_phases[StringDedupTableFixup] = new WorkerDataArray<double>(max_gc_threads, "Table Fixup (ms):");
106   } else {
107     _gc_par_phases[StringDedupQueueFixup] = NULL;
108     _gc_par_phases[StringDedupTableFixup] = NULL;
109   }
110 
111   _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty (ms):");
112   _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards:");
113   _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
114 
115   _gc_par_phases[YoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Young Free Collection Set (ms):");
116   _gc_par_phases[NonYoungFreeCSet] = new WorkerDataArray<double>(max_gc_threads, "Non-Young Free Collection Set (ms):");
117 
118   reset();
119 }
120 
121 void G1GCPhaseTimes::reset() {
122   _cur_collection_par_time_ms = 0.0;
123   _cur_collection_code_root_fixup_time_ms = 0.0;
124   _cur_strong_code_root_purge_time_ms = 0.0;
125   _cur_evac_fail_recalc_used = 0.0;
126   _cur_evac_fail_remove_self_forwards = 0.0;
127   _cur_string_dedup_fixup_time_ms = 0.0;
128   _cur_prepare_tlab_time_ms = 0.0;
129   _cur_resize_tlab_time_ms = 0.0;
130   _cur_derived_pointer_table_update_time_ms = 0.0;
131   _cur_clear_ct_time_ms = 0.0;
132   _cur_expand_heap_time_ms = 0.0;
133   _cur_ref_proc_time_ms = 0.0;
134   _cur_collection_start_sec = 0.0;
135   _root_region_scan_wait_time_ms = 0.0;
136   _external_accounted_time_ms = 0.0;
137   _recorded_clear_claimed_marks_time_ms = 0.0;
138   _recorded_young_cset_choice_time_ms = 0.0;
139   _recorded_non_young_cset_choice_time_ms = 0.0;
140   _recorded_redirty_logged_cards_time_ms = 0.0;
141   _recorded_preserve_cm_referents_time_ms = 0.0;
142   _recorded_merge_pss_time_ms = 0.0;
143   _recorded_start_new_cset_time_ms = 0.0;
144   _recorded_total_free_cset_time_ms = 0.0;
145   _recorded_serial_free_cset_time_ms = 0.0;
146   _cur_fast_reclaim_humongous_time_ms = 0.0;
147   _cur_fast_reclaim_humongous_register_time_ms = 0.0;
148   _cur_fast_reclaim_humongous_total = 0;
149   _cur_fast_reclaim_humongous_candidates = 0;
150   _cur_fast_reclaim_humongous_reclaimed = 0;
151   _cur_verify_before_time_ms = 0.0;
152   _cur_verify_after_time_ms = 0.0;
153 
154   for (int i = 0; i < GCParPhasesSentinel; i++) {
155     if (_gc_par_phases[i] != NULL) {
156       _gc_par_phases[i]->reset();
157     }
158   }
159 
160   _ref_phase_times.reset();
161   _weak_phase_times.reset();
162 }
163 
164 void G1GCPhaseTimes::note_gc_start() {
165   _gc_start_counter = os::elapsed_counter();
166   reset();
167 }
168 
169 #define ASSERT_PHASE_UNINITIALIZED(phase) \
170     assert(_gc_par_phases[phase] == NULL || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started");
171 
172 double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) {
173   if (_gc_par_phases[phase] == NULL) {
174     return 0.0;
175   }
176   double value = _gc_par_phases[phase]->get(worker);
177   if (value != WorkerDataArray<double>::uninitialized()) {
178     return value;
179   }
180   return 0.0;
181 }
182 
183 void G1GCPhaseTimes::note_gc_end() {
184   _gc_pause_time_ms = TimeHelper::counter_to_millis(os::elapsed_counter() - _gc_start_counter);
185 
186   double uninitialized = WorkerDataArray<double>::uninitialized();
187 
188   for (uint i = 0; i < _max_gc_threads; i++) {
189     double worker_start = _gc_par_phases[GCWorkerStart]->get(i);
190     if (worker_start != uninitialized) {
191       assert(_gc_par_phases[GCWorkerEnd]->get(i) != uninitialized, "Worker started but not ended.");
192       double total_worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
193       record_time_secs(GCWorkerTotal, i , total_worker_time);
194 
195       double worker_known_time = worker_time(ExtRootScan, i) +
196                                  worker_time(ScanHCC, i) +
197                                  worker_time(UpdateRS, i) +
198                                  worker_time(ScanRS, i) +
199                                  worker_time(CodeRoots, i) +
200                                  worker_time(ObjCopy, i) +
201                                  worker_time(Termination, i);
202 
203       record_time_secs(Other, i, total_worker_time - worker_known_time);
204     } else {
205       // Make sure all slots are uninitialized since this thread did not seem to have been started
206       ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd);
207       ASSERT_PHASE_UNINITIALIZED(ExtRootScan);
208       ASSERT_PHASE_UNINITIALIZED(ScanHCC);
209       ASSERT_PHASE_UNINITIALIZED(UpdateRS);
210       ASSERT_PHASE_UNINITIALIZED(ScanRS);
211       ASSERT_PHASE_UNINITIALIZED(CodeRoots);
212       ASSERT_PHASE_UNINITIALIZED(ObjCopy);
213       ASSERT_PHASE_UNINITIALIZED(Termination);
214     }
215   }
216 }
217 
218 #undef ASSERT_PHASE_UNINITIALIZED
219 
220 // record the time a phase took in seconds
221 void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
222   _gc_par_phases[phase]->set(worker_i, secs);
223 }
224 
225 // add a number of seconds to a phase
226 void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
227   _gc_par_phases[phase]->add(worker_i, secs);
228 }
229 
230 void G1GCPhaseTimes::record_or_add_objcopy_time_secs(uint worker_i, double secs) {
231   if (_gc_par_phases[ObjCopy]->get(worker_i) == _gc_par_phases[ObjCopy]->uninitialized()) {
232     record_time_secs(ObjCopy, worker_i, secs);
233   } else {
234     add_time_secs(ObjCopy, worker_i, secs);
235   }
236 }
237 
238 void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
239   _gc_par_phases[phase]->set_thread_work_item(worker_i, count, index);
240 }
241 
242 // return the average time for a phase in milliseconds
243 double G1GCPhaseTimes::average_time_ms(GCParPhases phase) {
244   return _gc_par_phases[phase]->average() * 1000.0;
245 }
246 
247 size_t G1GCPhaseTimes::sum_thread_work_items(GCParPhases phase, uint index) {
248   assert(_gc_par_phases[phase]->thread_work_items(index) != NULL, "No sub count");
249   return _gc_par_phases[phase]->thread_work_items(index)->sum();
250 }
251 
252 template <class T>
253 void G1GCPhaseTimes::details(T* phase, const char* indent) const {
254   LogTarget(Trace, gc, phases, task) lt;
255   if (lt.is_enabled()) {
256     LogStream ls(lt);
257     ls.print("%s", indent);
258     phase->print_details_on(&ls);
259   }
260 }
261 
262 void G1GCPhaseTimes::log_phase(WorkerDataArray<double>* phase, uint indent, outputStream* out, bool print_sum) const {
263   out->print("%s", Indents[indent]);
264   phase->print_summary_on(out, print_sum);
265   details(phase, Indents[indent]);
266 
267   for (uint i = 0; i < phase->MaxThreadWorkItems; i++) {
268     WorkerDataArray<size_t>* work_items = phase->thread_work_items(i);
269     if (work_items != NULL) {
270       out->print("%s", Indents[indent + 1]);
271       work_items->print_summary_on(out, true);
272       details(work_items, Indents[indent + 1]);
273     }
274   }
275 }
276 
277 void G1GCPhaseTimes::debug_phase(WorkerDataArray<double>* phase) const {
278   LogTarget(Debug, gc, phases) lt;
279   if (lt.is_enabled()) {
280     ResourceMark rm;
281     LogStream ls(lt);
282     log_phase(phase, 2, &ls, true);
283   }
284 }
285 
286 void G1GCPhaseTimes::trace_phase(WorkerDataArray<double>* phase, bool print_sum) const {
287   LogTarget(Trace, gc, phases) lt;
288   if (lt.is_enabled()) {
289     LogStream ls(lt);
290     log_phase(phase, 3, &ls, print_sum);
291   }
292 }
293 
294 #define TIME_FORMAT "%.1lfms"
295 
296 void G1GCPhaseTimes::info_time(const char* name, double value) const {
297   log_info(gc, phases)("%s%s: " TIME_FORMAT, Indents[1], name, value);
298 }
299 
300 void G1GCPhaseTimes::debug_time(const char* name, double value) const {
301   log_debug(gc, phases)("%s%s: " TIME_FORMAT, Indents[2], name, value);
302 }
303 
304 void G1GCPhaseTimes::debug_time_for_reference(const char* name, double value) const {
305   LogTarget(Debug, gc, phases) lt;
306   LogTarget(Debug, gc, phases, ref) lt2;
307 
308   if (lt.is_enabled()) {
309     LogStream ls(lt);
310     ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value);
311   } else if (lt2.is_enabled()) {
312     LogStream ls(lt2);
313     ls.print_cr("%s%s: " TIME_FORMAT, Indents[2], name, value);
314   }
315 }
316 
317 void G1GCPhaseTimes::trace_time(const char* name, double value) const {
318   log_trace(gc, phases)("%s%s: " TIME_FORMAT, Indents[3], name, value);
319 }
320 
321 void G1GCPhaseTimes::trace_count(const char* name, size_t value) const {
322   log_trace(gc, phases)("%s%s: " SIZE_FORMAT, Indents[3], name, value);
323 }
324 
325 double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
326   const double sum_ms = _root_region_scan_wait_time_ms +
327                         _recorded_young_cset_choice_time_ms +
328                         _recorded_non_young_cset_choice_time_ms +
329                         _cur_fast_reclaim_humongous_register_time_ms +
330                         _recorded_clear_claimed_marks_time_ms;
331 
332   info_time("Pre Evacuate Collection Set", sum_ms);
333 
334   if (_root_region_scan_wait_time_ms > 0.0) {
335     debug_time("Root Region Scan Waiting", _root_region_scan_wait_time_ms);
336   }
337   debug_time("Prepare TLABs", _cur_prepare_tlab_time_ms);
338   debug_time("Choose Collection Set", (_recorded_young_cset_choice_time_ms + _recorded_non_young_cset_choice_time_ms));
339   if (G1EagerReclaimHumongousObjects) {
340     debug_time("Humongous Register", _cur_fast_reclaim_humongous_register_time_ms);
341     trace_count("Humongous Total", _cur_fast_reclaim_humongous_total);
342     trace_count("Humongous Candidate", _cur_fast_reclaim_humongous_candidates);
343   }
344 
345   if (_recorded_clear_claimed_marks_time_ms > 0.0) {
346     debug_time("Clear Claimed Marks", _recorded_clear_claimed_marks_time_ms);
347   }
348   return sum_ms;
349 }
350 
351 double G1GCPhaseTimes::print_evacuate_collection_set() const {
352   const double sum_ms = _cur_collection_par_time_ms;
353 
354   info_time("Evacuate Collection Set", sum_ms);
355 
356   trace_phase(_gc_par_phases[GCWorkerStart], false);
357   debug_phase(_gc_par_phases[ExtRootScan]);
358   for (int i = ThreadRoots; i <= SATBFiltering; i++) {
359     trace_phase(_gc_par_phases[i]);
360   }
361   debug_phase(_gc_par_phases[UpdateRS]);
362   if (G1HotCardCache::default_use_cache()) {
363     trace_phase(_gc_par_phases[ScanHCC]);
364   }
365   debug_phase(_gc_par_phases[ScanRS]);
366   debug_phase(_gc_par_phases[CodeRoots]);
367 #if INCLUDE_AOT
368   debug_phase(_gc_par_phases[AOTCodeRoots]);
369 #endif
370   debug_phase(_gc_par_phases[ObjCopy]);
371   debug_phase(_gc_par_phases[Termination]);
372   debug_phase(_gc_par_phases[Other]);
373   debug_phase(_gc_par_phases[GCWorkerTotal]);
374   trace_phase(_gc_par_phases[GCWorkerEnd], false);
375 
376   return sum_ms;
377 }
378 
379 double G1GCPhaseTimes::print_post_evacuate_collection_set() const {
380   const double evac_fail_handling = _cur_evac_fail_recalc_used +
381                                     _cur_evac_fail_remove_self_forwards;
382   const double sum_ms = evac_fail_handling +
383                         _cur_collection_code_root_fixup_time_ms +
384                         _recorded_preserve_cm_referents_time_ms +
385                         _cur_ref_proc_time_ms +
386                         (_weak_phase_times.total_time_sec() * MILLIUNITS) +
387                         _cur_clear_ct_time_ms +
388                         _recorded_merge_pss_time_ms +
389                         _cur_strong_code_root_purge_time_ms +
390                         _recorded_redirty_logged_cards_time_ms +
391                         _recorded_total_free_cset_time_ms +
392                         _cur_fast_reclaim_humongous_time_ms +
393                         _cur_expand_heap_time_ms +
394                         _cur_string_dedup_fixup_time_ms;
395 
396   info_time("Post Evacuate Collection Set", sum_ms);
397 
398   debug_time("Code Roots Fixup", _cur_collection_code_root_fixup_time_ms);
399 
400   debug_time("Clear Card Table", _cur_clear_ct_time_ms);
401 
402   debug_time_for_reference("Reference Processing", _cur_ref_proc_time_ms);
403   _ref_phase_times.print_all_references(2, false);
404   _weak_phase_times.log_print(2);
405 
406   if (G1StringDedup::is_enabled()) {
407     debug_time("String Dedup Fixup", _cur_string_dedup_fixup_time_ms);
408     debug_phase(_gc_par_phases[StringDedupQueueFixup]);
409     debug_phase(_gc_par_phases[StringDedupTableFixup]);
410   }
411 
412   if (G1CollectedHeap::heap()->evacuation_failed()) {
413     debug_time("Evacuation Failure", evac_fail_handling);
414     trace_time("Recalculate Used", _cur_evac_fail_recalc_used);
415     trace_time("Remove Self Forwards",_cur_evac_fail_remove_self_forwards);
416   }
417 
418   debug_time("Merge Per-Thread State", _recorded_merge_pss_time_ms);
419   debug_time("Code Roots Purge", _cur_strong_code_root_purge_time_ms);
420 
421   debug_time("Redirty Cards", _recorded_redirty_logged_cards_time_ms);
422   trace_phase(_gc_par_phases[RedirtyCards]);
423 #if COMPILER2_OR_JVMCI
424   debug_time("DerivedPointerTable Update", _cur_derived_pointer_table_update_time_ms);
425 #endif
426 
427   debug_time("Free Collection Set", _recorded_total_free_cset_time_ms);
428   trace_time("Free Collection Set Serial", _recorded_serial_free_cset_time_ms);
429   trace_phase(_gc_par_phases[YoungFreeCSet]);
430   trace_phase(_gc_par_phases[NonYoungFreeCSet]);
431 
432   if (G1EagerReclaimHumongousObjects) {
433     debug_time("Humongous Reclaim", _cur_fast_reclaim_humongous_time_ms);
434     trace_count("Humongous Reclaimed", _cur_fast_reclaim_humongous_reclaimed);
435   }
436   debug_time("Start New Collection Set", _recorded_start_new_cset_time_ms);
437   if (UseTLAB && ResizeTLAB) {
438     debug_time("Resize TLABs", _cur_resize_tlab_time_ms);
439   }
440   debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms);
441 
442 
443   return sum_ms;
444 }
445 
446 void G1GCPhaseTimes::print_other(double accounted_ms) const {
447   info_time("Other", _gc_pause_time_ms - accounted_ms);
448 }
449 
450 void G1GCPhaseTimes::print() {
451   note_gc_end();
452 
453   if (_cur_verify_before_time_ms > 0.0) {
454     debug_time("Verify Before", _cur_verify_before_time_ms);
455   }
456 
457   double accounted_ms = 0.0;
458   accounted_ms += print_pre_evacuate_collection_set();
459   accounted_ms += print_evacuate_collection_set();
460   accounted_ms += print_post_evacuate_collection_set();
461   print_other(accounted_ms);
462 
463   if (_cur_verify_after_time_ms > 0.0) {
464     debug_time("Verify After", _cur_verify_after_time_ms);
465   }
466 }
467 
468 G1EvacPhaseWithTrimTimeTracker::G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time) :
469   _pss(pss),
470   _start(Ticks::now()),
471   _total_time(total_time),
472   _trim_time(trim_time),
473   _stopped(false) {
474 
475   assert(_pss->trim_ticks().value() == 0, "Possibly remaining trim ticks left over from previous use");
476 }
477 
478 G1EvacPhaseWithTrimTimeTracker::~G1EvacPhaseWithTrimTimeTracker() {
479   if (!_stopped) {
480     stop();
481   }
482 }
483 
484 void G1EvacPhaseWithTrimTimeTracker::stop() {
485   assert(!_stopped, "Should only be called once");
486   _total_time += (Ticks::now() - _start) - _pss->trim_ticks();
487   _trim_time += _pss->trim_ticks();
488   _pss->reset_trim_ticks();
489   _stopped = true;
490 }
491 
492 G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
493   _start_time(), _phase(phase), _phase_times(phase_times), _worker_id(worker_id) {
494   if (_phase_times != NULL) {
495     _start_time = Ticks::now();
496   }
497 }
498 
499 G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
500   if (_phase_times != NULL) {
501     _phase_times->record_time_secs(_phase, _worker_id, (Ticks::now() - _start_time).seconds());
502   }
503 }
504 
505 G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times,
506                                                  G1ParScanThreadState* pss,
507                                                  G1GCPhaseTimes::GCParPhases phase,
508                                                  uint worker_id) :
509   G1GCParPhaseTimesTracker(phase_times, phase, worker_id),
510   _total_time(),
511   _trim_time(),
512   _trim_tracker(pss, _total_time, _trim_time) {
513 }
514 
515 G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() {
516   if (_phase_times != NULL) {
517     // Explicitly stop the trim tracker since it's not yet destructed.
518     _trim_tracker.stop();
519     // Exclude trim time by increasing the start time.
520     _start_time += _trim_time;
521     _phase_times->record_or_add_objcopy_time_secs(_worker_id, _trim_time.seconds());
522   }
523 }
524