1 /*
   2  * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/shared/gcHeapSummary.hpp"
  27 #include "gc_implementation/shared/gcTimer.hpp"
  28 #include "gc_implementation/shared/gcTrace.hpp"
  29 #include "gc_implementation/shared/gcWhen.hpp"
  30 #include "gc_implementation/shared/copyFailedInfo.hpp"
  31 #include "runtime/os.hpp"
  32 #include "trace/tracing.hpp"
  33 #include "trace/traceBackend.hpp"
  34 #if INCLUDE_ALL_GCS
  35 #include "gc_implementation/g1/evacuationInfo.hpp"
  36 #include "gc_implementation/g1/g1YCTypes.hpp"
  37 #include "tracefiles/traceEventClasses.hpp"
  38 #endif
  39 
  40 // All GC dependencies against the trace framework is contained within this file.
  41 
  42 typedef uintptr_t TraceAddress;
  43 
  44 void GCTracer::send_garbage_collection_event() const {
  45   EventGarbageCollection event(UNTIMED);
  46   if (event.should_commit()) {
  47     event.set_gcId(_shared_gc_info.gc_id().id());
  48     event.set_name(_shared_gc_info.name());
  49     event.set_cause((u2) _shared_gc_info.cause());
  50     event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
  51     event.set_longestPause(_shared_gc_info.longest_pause());
  52     event.set_starttime(_shared_gc_info.start_timestamp());
  53     event.set_endtime(_shared_gc_info.end_timestamp());
  54     event.commit();
  55   }
  56 }
  57 
  58 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
  59   EventGCReferenceStatistics e;
  60   if (e.should_commit()) {
  61       e.set_gcId(_shared_gc_info.gc_id().id());
  62       e.set_type((u1)type);
  63       e.set_count(count);
  64       e.commit();
  65   }
  66 }
  67 
  68 void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype,
  69                                                       const MetaspaceChunkFreeListSummary& summary) const {
  70   EventMetaspaceChunkFreeListSummary e;
  71   if (e.should_commit()) {
  72     e.set_gcId(_shared_gc_info.gc_id().id());
  73     e.set_when(when);
  74     e.set_metadataType(mdtype);
  75 
  76     e.set_specializedChunks(summary.num_specialized_chunks());
  77     e.set_specializedChunksTotalSize(summary.specialized_chunks_size_in_bytes());
  78 
  79     e.set_smallChunks(summary.num_small_chunks());
  80     e.set_smallChunksTotalSize(summary.small_chunks_size_in_bytes());
  81 
  82     e.set_mediumChunks(summary.num_medium_chunks());
  83     e.set_mediumChunksTotalSize(summary.medium_chunks_size_in_bytes());
  84 
  85     e.set_humongousChunks(summary.num_humongous_chunks());
  86     e.set_humongousChunksTotalSize(summary.humongous_chunks_size_in_bytes());
  87 
  88     e.commit();
  89   }
  90 }
  91 
  92 void ParallelOldTracer::send_parallel_old_event() const {
  93   EventParallelOldGarbageCollection e(UNTIMED);
  94   if (e.should_commit()) {
  95     e.set_gcId(_shared_gc_info.gc_id().id());
  96     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
  97     e.set_starttime(_shared_gc_info.start_timestamp());
  98     e.set_endtime(_shared_gc_info.end_timestamp());
  99     e.commit();
 100   }
 101 }
 102 
 103 void YoungGCTracer::send_young_gc_event() const {
 104   EventYoungGarbageCollection e(UNTIMED);
 105   if (e.should_commit()) {
 106     e.set_gcId(_shared_gc_info.gc_id().id());
 107     e.set_tenuringThreshold(_tenuring_threshold);
 108     e.set_starttime(_shared_gc_info.start_timestamp());
 109     e.set_endtime(_shared_gc_info.end_timestamp());
 110     e.commit();
 111   }
 112 }
 113 
 114 bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
 115   return EventPromoteObjectInNewPLAB::is_enabled();
 116 }
 117 
 118 bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
 119   return EventPromoteObjectOutsidePLAB::is_enabled();
 120 }
 121 
 122 void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
 123                                                      uint age, bool tenured,
 124                                                      size_t plab_size) const {
 125 
 126   EventPromoteObjectInNewPLAB event;
 127   if (event.should_commit()) {
 128     event.set_gcId(_shared_gc_info.gc_id().id());
 129     event.set_objectClass(klass);
 130     event.set_objectSize(obj_size);
 131     event.set_tenured(tenured);
 132     event.set_tenuringAge(age);
 133     event.set_plabSize(plab_size);
 134     event.commit();
 135   }
 136 }
 137 
 138 void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
 139                                                       uint age, bool tenured) const {
 140 
 141   EventPromoteObjectOutsidePLAB event;
 142   if (event.should_commit()) {
 143     event.set_gcId(_shared_gc_info.gc_id().id());
 144     event.set_gcId(GCId::peek().id() - 1);
 145     event.set_objectClass(klass);
 146     event.set_objectSize(obj_size);
 147     event.set_tenured(tenured);
 148     event.set_tenuringAge(age);
 149     event.commit();
 150   }
 151 }
 152 
 153 
 154 void OldGCTracer::send_old_gc_event() const {
 155   EventOldGarbageCollection e(UNTIMED);
 156   if (e.should_commit()) {
 157     e.set_gcId(_shared_gc_info.gc_id().id());
 158     e.set_starttime(_shared_gc_info.start_timestamp());
 159     e.set_endtime(_shared_gc_info.end_timestamp());
 160     e.commit();
 161   }
 162 }
 163 
 164 static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
 165   TraceStructCopyFailed failed_info;
 166   failed_info.set_objectCount(cf_info.failed_count());
 167   failed_info.set_firstSize(cf_info.first_size());
 168   failed_info.set_smallestSize(cf_info.smallest_size());
 169   failed_info.set_totalSize(cf_info.total_size());
 170   return failed_info;
 171 }
 172 
 173 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
 174   EventPromotionFailed e;
 175   if (e.should_commit()) {
 176     e.set_gcId(_shared_gc_info.gc_id().id());
 177     e.set_promotionFailed(to_trace_struct(pf_info));
 178     e.set_thread(pf_info.thread()->thread_id());
 179     e.commit();
 180   }
 181 }
 182 
 183 // Common to CMS and G1
 184 void OldGCTracer::send_concurrent_mode_failure_event() {
 185   EventConcurrentModeFailure e;
 186   if (e.should_commit()) {
 187     e.set_gcId(_shared_gc_info.gc_id().id());
 188     e.commit();
 189   }
 190 }
 191 
 192 #if INCLUDE_ALL_GCS
 193 void G1NewTracer::send_g1_young_gc_event() {
 194   EventG1GarbageCollection e(UNTIMED);
 195   if (e.should_commit()) {
 196     e.set_gcId(_shared_gc_info.gc_id().id());
 197     e.set_type(_g1_young_gc_info.type());
 198     e.set_starttime(_shared_gc_info.start_timestamp());
 199     e.set_endtime(_shared_gc_info.end_timestamp());
 200     e.commit();
 201   }
 202 }
 203 
 204 void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms, bool gc_thread) {
 205   EventG1MMU e;
 206   if (e.should_commit()) {
 207     if (gc_thread) {
 208       e.set_gcId(G1CollectedHeap::heap()->gc_tracer_cm()->gc_id().id());
 209     } else {
 210       e.set_gcId(G1CollectedHeap::heap()->gc_tracer_stw()->gc_id().id());
 211     }
 212     e.set_timeSlice(time_slice_ms);
 213     e.set_gcTime(gc_time_ms);
 214     e.set_pauseTarget(max_time_ms);
 215     e.commit();
 216   }
 217 }
 218 
 219 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
 220   EventEvacuationInformation e;
 221   if (e.should_commit()) {
 222     e.set_gcId(_shared_gc_info.gc_id().id());
 223     e.set_cSetRegions(info->collectionset_regions());
 224     e.set_cSetUsedBefore(info->collectionset_used_before());
 225     e.set_cSetUsedAfter(info->collectionset_used_after());
 226     e.set_allocationRegions(info->allocation_regions());
 227     e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before());
 228     e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
 229     e.set_bytesCopied(info->bytes_copied());
 230     e.set_regionsFreed(info->regions_freed());
 231     e.commit();
 232   }
 233 }
 234 
 235 void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
 236   EventEvacuationFailed e;
 237   if (e.should_commit()) {
 238     e.set_gcId(_shared_gc_info.gc_id().id());
 239     e.set_evacuationFailed(to_trace_struct(ef_info));
 240     e.commit();
 241   }
 242 }
 243 
 244 void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
 245                                              size_t target_occupancy,
 246                                              size_t current_occupancy,
 247                                              size_t last_allocation_size,
 248                                              double last_allocation_duration,
 249                                              double last_marking_length) {
 250   EventG1BasicIHOP evt;
 251   if (evt.should_commit()) {
 252     evt.set_gcId(_shared_gc_info.gc_id().id());
 253     evt.set_threshold(threshold);
 254     evt.set_targetOccupancy(target_occupancy);
 255     evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
 256     evt.set_currentOccupancy(current_occupancy);
 257     evt.set_recentMutatorAllocationSize(last_allocation_size);
 258     evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
 259     evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
 260     evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
 261     evt.commit();
 262   }
 263 }
 264 #endif
 265 
 266 static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
 267   TraceStructVirtualSpace space;
 268   space.set_start((TraceAddress)summary.start());
 269   space.set_committedEnd((TraceAddress)summary.committed_end());
 270   space.set_committedSize(summary.committed_size());
 271   space.set_reservedEnd((TraceAddress)summary.reserved_end());
 272   space.set_reservedSize(summary.reserved_size());
 273   return space;
 274 }
 275 
 276 static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
 277   TraceStructObjectSpace space;
 278   space.set_start((TraceAddress)summary.start());
 279   space.set_end((TraceAddress)summary.end());
 280   space.set_used(summary.used());
 281   space.set_size(summary.size());
 282   return space;
 283 }
 284 
 285 class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
 286   GCId _gc_id;
 287   GCWhen::Type _when;
 288  public:
 289   GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {}
 290 
 291   void visit(const GCHeapSummary* heap_summary) const {
 292     const VirtualSpaceSummary& heap_space = heap_summary->heap();
 293 
 294     EventGCHeapSummary e;
 295     if (e.should_commit()) {
 296       e.set_gcId(_gc_id.id());
 297       e.set_when((u1)_when);
 298       e.set_heapSpace(to_trace_struct(heap_space));
 299       e.set_heapUsed(heap_summary->used());
 300       e.commit();
 301     }
 302   }
 303 
 304   void visit(const G1HeapSummary* g1_heap_summary) const {
 305     visit((GCHeapSummary*)g1_heap_summary);
 306     EventG1HeapSummary e;
 307     if (e.should_commit()) {
 308       e.set_gcId(_gc_id.id());
 309       e.set_when((u1)_when);
 310       e.set_edenUsedSize(g1_heap_summary->edenUsed());
 311       e.set_edenTotalSize(g1_heap_summary->edenCapacity());
 312       e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
 313       e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
 314       e.commit();
 315     }
 316   }
 317 
 318   void visit(const PSHeapSummary* ps_heap_summary) const {
 319     visit((GCHeapSummary*)ps_heap_summary);
 320 
 321     const VirtualSpaceSummary& old_summary = ps_heap_summary->old();
 322     const SpaceSummary& old_space = ps_heap_summary->old_space();
 323     const VirtualSpaceSummary& young_summary = ps_heap_summary->young();
 324     const SpaceSummary& eden_space = ps_heap_summary->eden();
 325     const SpaceSummary& from_space = ps_heap_summary->from();
 326     const SpaceSummary& to_space = ps_heap_summary->to();
 327 
 328     EventPSHeapSummary e;
 329     if (e.should_commit()) {
 330       e.set_gcId(_gc_id.id());
 331       e.set_when((u1)_when);
 332 
 333       e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
 334       e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space()));
 335       e.set_youngSpace(to_trace_struct(ps_heap_summary->young()));
 336       e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
 337       e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
 338       e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
 339       e.commit();
 340     }
 341   }
 342 };
 343 
 344 void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
 345   GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when);
 346   heap_summary.accept(&visitor);
 347 }
 348 
 349 static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
 350   TraceStructMetaspaceSizes meta_sizes;
 351 
 352   meta_sizes.set_committed(sizes.committed());
 353   meta_sizes.set_used(sizes.used());
 354   meta_sizes.set_reserved(sizes.reserved());
 355 
 356   return meta_sizes;
 357 }
 358 
 359 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
 360   EventMetaspaceSummary e;
 361   if (e.should_commit()) {
 362     e.set_gcId(_shared_gc_info.gc_id().id());
 363     e.set_when((u1) when);
 364     e.set_gcThreshold(meta_space_summary.capacity_until_GC());
 365     e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
 366     e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
 367     e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
 368     e.commit();
 369   }
 370 }
 371 
 372 class PhaseSender : public PhaseVisitor {
 373   GCId _gc_id;
 374  public:
 375   PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
 376 
 377   template<typename T>
 378   void send_phase(PausePhase* pause) {
 379     T event(UNTIMED);
 380     if (event.should_commit()) {
 381       event.set_gcId(_gc_id.id());
 382       event.set_name(pause->name());
 383       event.set_starttime(pause->start());
 384       event.set_endtime(pause->end());
 385       event.commit();
 386     }
 387   }
 388 
 389   void visit(GCPhase* pause) { ShouldNotReachHere(); }
 390   void visit(ConcurrentPhase* pause) { Unimplemented(); }
 391   void visit(PausePhase* pause) {
 392     assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types");
 393 
 394     switch (pause->level()) {
 395       case 0: send_phase<EventGCPhasePause>(pause); break;
 396       case 1: send_phase<EventGCPhasePauseLevel1>(pause); break;
 397       case 2: send_phase<EventGCPhasePauseLevel2>(pause); break;
 398       case 3: send_phase<EventGCPhasePauseLevel3>(pause); break;
 399       default: /* Ignore sending this phase */ break;
 400     }
 401   }
 402 };
 403 
 404 void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
 405   PhaseSender phase_reporter(_shared_gc_info.gc_id());
 406 
 407   TimePartitionPhasesIterator iter(time_partitions);
 408   while (iter.has_next()) {
 409     GCPhase* phase = iter.next();
 410     phase->accept(&phase_reporter);
 411   }
 412 }