1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jfr/jfrEvents.hpp"
  27 #include "gc_implementation/shared/gcHeapSummary.hpp"
  28 #include "gc_implementation/shared/gcTimer.hpp"
  29 #include "gc_implementation/shared/gcTrace.hpp"
  30 #include "gc_implementation/shared/gcWhen.hpp"
  31 #include "gc_implementation/shared/copyFailedInfo.hpp"
  32 #include "runtime/os.hpp"
  33 #if INCLUDE_ALL_GCS
  34 #include "gc_implementation/g1/evacuationInfo.hpp"
  35 #include "gc_implementation/g1/g1YCTypes.hpp"
  36 #endif
  37 
  38 // All GC dependencies against the trace framework is contained within this file.
  39 
  40 typedef uintptr_t TraceAddress;
  41 
  42 void GCTracer::send_garbage_collection_event() const {
  43   EventGarbageCollection event(UNTIMED);
  44   if (event.should_commit()) {
  45     event.set_gcId(_shared_gc_info.gc_id().id());
  46     event.set_name(_shared_gc_info.name());
  47     event.set_cause((u2) _shared_gc_info.cause());
  48     event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
  49     event.set_longestPause(_shared_gc_info.longest_pause());
  50     event.set_starttime(_shared_gc_info.start_timestamp());
  51     event.set_endtime(_shared_gc_info.end_timestamp());
  52     event.commit();
  53   }
  54 }
  55 
  56 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
  57   EventGCReferenceStatistics e;
  58   if (e.should_commit()) {
  59       e.set_gcId(_shared_gc_info.gc_id().id());
  60       e.set_type((u1)type);
  61       e.set_count(count);
  62       e.commit();
  63   }
  64 }
  65 
  66 void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype,
  67                                                       const MetaspaceChunkFreeListSummary& summary) const {
  68   EventMetaspaceChunkFreeListSummary e;
  69   if (e.should_commit()) {
  70     e.set_gcId(_shared_gc_info.gc_id().id());
  71     e.set_when(when);
  72     e.set_metadataType(mdtype);
  73 
  74     e.set_specializedChunks(summary.num_specialized_chunks());
  75     e.set_specializedChunksTotalSize(summary.specialized_chunks_size_in_bytes());
  76 
  77     e.set_smallChunks(summary.num_small_chunks());
  78     e.set_smallChunksTotalSize(summary.small_chunks_size_in_bytes());
  79 
  80     e.set_mediumChunks(summary.num_medium_chunks());
  81     e.set_mediumChunksTotalSize(summary.medium_chunks_size_in_bytes());
  82 
  83     e.set_humongousChunks(summary.num_humongous_chunks());
  84     e.set_humongousChunksTotalSize(summary.humongous_chunks_size_in_bytes());
  85 
  86     e.commit();
  87   }
  88 }
  89 
  90 void ParallelOldTracer::send_parallel_old_event() const {
  91   EventParallelOldGarbageCollection e(UNTIMED);
  92   if (e.should_commit()) {
  93     e.set_gcId(_shared_gc_info.gc_id().id());
  94     e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
  95     e.set_starttime(_shared_gc_info.start_timestamp());
  96     e.set_endtime(_shared_gc_info.end_timestamp());
  97     e.commit();
  98   }
  99 }
 100 
 101 void YoungGCTracer::send_young_gc_event() const {
 102   EventYoungGarbageCollection e(UNTIMED);
 103   if (e.should_commit()) {
 104     e.set_gcId(_shared_gc_info.gc_id().id());
 105     e.set_tenuringThreshold(_tenuring_threshold);
 106     e.set_starttime(_shared_gc_info.start_timestamp());
 107     e.set_endtime(_shared_gc_info.end_timestamp());
 108     e.commit();
 109   }
 110 }
 111 
 112 bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
 113   return EventPromoteObjectInNewPLAB::is_enabled();
 114 }
 115 
 116 bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
 117   return EventPromoteObjectOutsidePLAB::is_enabled();
 118 }
 119 
 120 void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
 121                                                      uint age, bool tenured,
 122                                                      size_t plab_size) const {
 123 
 124   EventPromoteObjectInNewPLAB event;
 125   if (event.should_commit()) {
 126     event.set_gcId(_shared_gc_info.gc_id().id());
 127     event.set_objectClass(klass);
 128     event.set_objectSize(obj_size);
 129     event.set_tenured(tenured);
 130     event.set_tenuringAge(age);
 131     event.set_plabSize(plab_size);
 132     event.commit();
 133   }
 134 }
 135 
 136 void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
 137                                                       uint age, bool tenured) const {
 138 
 139   EventPromoteObjectOutsidePLAB event;
 140   if (event.should_commit()) {
 141     event.set_gcId(_shared_gc_info.gc_id().id());
 142     event.set_objectClass(klass);
 143     event.set_objectSize(obj_size);
 144     event.set_tenured(tenured);
 145     event.set_tenuringAge(age);
 146     event.commit();
 147   }
 148 }
 149 
 150 void OldGCTracer::send_old_gc_event() const {
 151   EventOldGarbageCollection e(UNTIMED);
 152   if (e.should_commit()) {
 153     e.set_gcId(_shared_gc_info.gc_id().id());
 154     e.set_starttime(_shared_gc_info.start_timestamp());
 155     e.set_endtime(_shared_gc_info.end_timestamp());
 156     e.commit();
 157   }
 158 }
 159 
 160 static JfrStructCopyFailed to_struct(const CopyFailedInfo& cf_info) {
 161   JfrStructCopyFailed failed_info;
 162   failed_info.set_objectCount(cf_info.failed_count());
 163   failed_info.set_firstSize(cf_info.first_size());
 164   failed_info.set_smallestSize(cf_info.smallest_size());
 165   failed_info.set_totalSize(cf_info.total_size());
 166   return failed_info;
 167 }
 168 
 169 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
 170   EventPromotionFailed e;
 171   if (e.should_commit()) {
 172     e.set_gcId(_shared_gc_info.gc_id().id());
 173     e.set_promotionFailed(to_struct(pf_info));
 174     e.set_thread(pf_info.thread()->thread_id());
 175     e.commit();
 176   }
 177 }
 178 
 179 // Common to CMS and G1
 180 void OldGCTracer::send_concurrent_mode_failure_event() {
 181   EventConcurrentModeFailure e;
 182   if (e.should_commit()) {
 183     e.set_gcId(_shared_gc_info.gc_id().id());
 184     e.commit();
 185   }
 186 }
 187 
 188 #if INCLUDE_ALL_GCS
 189 void G1NewTracer::send_g1_young_gc_event() {
 190   EventG1GarbageCollection e(UNTIMED);
 191   if (e.should_commit()) {
 192     e.set_gcId(_shared_gc_info.gc_id().id());
 193     e.set_type(_g1_young_gc_info.type());
 194     e.set_starttime(_shared_gc_info.start_timestamp());
 195     e.set_endtime(_shared_gc_info.end_timestamp());
 196     e.commit();
 197   }
 198 }
 199 
 200 void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) {
 201   EventG1MMU e;
 202   if (e.should_commit()) {
 203     e.set_gcId(GCId::peek().id());
 204     e.set_timeSlice((s8)time_slice_ms);
 205     e.set_gcTime((s8)gc_time_ms);
 206     e.set_pauseTarget((s8)max_time_ms);
 207     e.commit();
 208   }
 209 }
 210 
 211 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
 212   EventEvacuationInformation e;
 213   if (e.should_commit()) {
 214     e.set_gcId(_shared_gc_info.gc_id().id());
 215     e.set_cSetRegions(info->collectionset_regions());
 216     e.set_cSetUsedBefore(info->collectionset_used_before());
 217     e.set_cSetUsedAfter(info->collectionset_used_after());
 218     e.set_allocationRegions(info->allocation_regions());
 219     e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before());
 220     e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
 221     e.set_bytesCopied(info->bytes_copied());
 222     e.set_regionsFreed(info->regions_freed());
 223     e.commit();
 224   }
 225 }
 226 
 227 void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
 228   EventEvacuationFailed e;
 229   if (e.should_commit()) {
 230     e.set_gcId(_shared_gc_info.gc_id().id());
 231     e.set_evacuationFailed(to_struct(ef_info));
 232     e.commit();
 233   }
 234 }
 235 
 236 // XXX
 237 //static JfrStructG1EvacuationStatistics
 238 //create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
 239 //  JfrStructG1EvacuationStatistics s;
 240 //  s.set_gcId(gcid);
 241 //  s.set_allocated(summary.allocated() * HeapWordSize);
 242 //  s.set_wasted(summary.wasted() * HeapWordSize);
 243 //  s.set_used(summary.used() * HeapWordSize);
 244 //  s.set_undoWaste(summary.undo_wasted() * HeapWordSize);
 245 //  s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize);
 246 //  s.set_regionsRefilled(summary.regions_filled());
 247 //  s.set_directAllocated(summary.direct_allocated() * HeapWordSize);
 248 //  s.set_failureUsed(summary.failure_used() * HeapWordSize);
 249 //  s.set_failureWaste(summary.failure_waste() * HeapWordSize);
 250 //  return s;
 251 //}
 252 //
 253 //void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
 254 //  EventG1EvacuationYoungStatistics surv_evt;
 255 //  if (surv_evt.should_commit()) {
 256 //    surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
 257 //    surv_evt.commit();
 258 //  }
 259 //}
 260 //
 261 //void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
 262 //  EventG1EvacuationOldStatistics old_evt;
 263 //  if (old_evt.should_commit()) {
 264 //    old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
 265 //    old_evt.commit();
 266 //  }
 267 //}
 268 //
 269 //void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
 270 //                                             size_t target_occupancy,
 271 //                                             size_t current_occupancy,
 272 //                                             size_t last_allocation_size,
 273 //                                             double last_allocation_duration,
 274 //                                             double last_marking_length) {
 275 //  EventG1BasicIHOP evt;
 276 //  if (evt.should_commit()) {
 277 //    evt.set_gcId(_shared_gc_info.gc_id().id());
 278 //    evt.set_threshold(threshold);
 279 //    evt.set_targetOccupancy(target_occupancy);
 280 //    evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
 281 //    evt.set_currentOccupancy(current_occupancy);
 282 //    evt.set_recentMutatorAllocationSize(last_allocation_size);
 283 //    evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
 284 //    evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
 285 //    evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
 286 //    evt.commit();
 287 //  }
 288 //}
 289 //
 290 //void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
 291 //                                                size_t internal_target_occupancy,
 292 //                                                size_t current_occupancy,
 293 //                                                size_t additional_buffer_size,
 294 //                                                double predicted_allocation_rate,
 295 //                                                double predicted_marking_length,
 296 //                                                bool prediction_active) {
 297 //  EventG1AdaptiveIHOP evt;
 298 //  if (evt.should_commit()) {
 299 //    evt.set_gcId(_shared_gc_info.gc_id().id());
 300 //    evt.set_threshold(threshold);
 301 //    evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0);
 302 //    evt.set_ihopTargetOccupancy(internal_target_occupancy);
 303 //    evt.set_currentOccupancy(current_occupancy);
 304 //    evt.set_additionalBufferSize(additional_buffer_size);
 305 //    evt.set_predictedAllocationRate(predicted_allocation_rate);
 306 //    evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS);
 307 //    evt.set_predictionActive(prediction_active);
 308 //    evt.commit();
 309 //  }
 310 //}
 311 
 312 #endif // INCLUDE_ALL_GCS
 313 
 314 static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) {
 315   JfrStructVirtualSpace space;
 316   space.set_start((TraceAddress)summary.start());
 317   space.set_committedEnd((TraceAddress)summary.committed_end());
 318   space.set_committedSize(summary.committed_size());
 319   space.set_reservedEnd((TraceAddress)summary.reserved_end());
 320   space.set_reservedSize(summary.reserved_size());
 321   return space;
 322 }
 323 
 324 static JfrStructObjectSpace to_struct(const SpaceSummary& summary) {
 325   JfrStructObjectSpace space;
 326   space.set_start((TraceAddress)summary.start());
 327   space.set_end((TraceAddress)summary.end());
 328   space.set_used(summary.used());
 329   space.set_size(summary.size());
 330   return space;
 331 }
 332 
 333 class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
 334   GCId _gc_id;
 335   GCWhen::Type _when;
 336  public:
 337   GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {}
 338 
 339   void visit(const GCHeapSummary* heap_summary) const {
 340     const VirtualSpaceSummary& heap_space = heap_summary->heap();
 341 
 342     EventGCHeapSummary e;
 343     if (e.should_commit()) {
 344       e.set_gcId(_gc_id.id());
 345       e.set_when((u1)_when);
 346       e.set_heapSpace(to_struct(heap_space));
 347       e.set_heapUsed(heap_summary->used());
 348       e.commit();
 349     }
 350   }
 351 
 352 //  void visit(const G1HeapSummary* g1_heap_summary) const {
 353 //    visit((GCHeapSummary*)g1_heap_summary);
 354 //
 355 //    EventG1HeapSummary e;
 356 //    if (e.should_commit()) {
 357 //      e.set_gcId(_shared_gc_info.gc_id().id());
 358 //      e.set_when((u1)_when);
 359 //      e.set_edenUsedSize(g1_heap_summary->edenUsed());
 360 //      e.set_edenTotalSize(g1_heap_summary->edenCapacity());
 361 //      e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
 362 //      e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
 363 //      e.commit();
 364 //    }
 365 //  }
 366 
 367   void visit(const PSHeapSummary* ps_heap_summary) const {
 368     visit((GCHeapSummary*)ps_heap_summary);
 369 
 370     const VirtualSpaceSummary& old_summary = ps_heap_summary->old();
 371     const SpaceSummary& old_space = ps_heap_summary->old_space();
 372     const VirtualSpaceSummary& young_summary = ps_heap_summary->young();
 373     const SpaceSummary& eden_space = ps_heap_summary->eden();
 374     const SpaceSummary& from_space = ps_heap_summary->from();
 375     const SpaceSummary& to_space = ps_heap_summary->to();
 376 
 377     EventPSHeapSummary e;
 378     if (e.should_commit()) {
 379       e.set_gcId(_gc_id.id());
 380       e.set_when((u1)_when);
 381 
 382       e.set_oldSpace(to_struct(ps_heap_summary->old()));
 383       e.set_oldObjectSpace(to_struct(ps_heap_summary->old_space()));
 384       e.set_youngSpace(to_struct(ps_heap_summary->young()));
 385       e.set_edenSpace(to_struct(ps_heap_summary->eden()));
 386       e.set_fromSpace(to_struct(ps_heap_summary->from()));
 387       e.set_toSpace(to_struct(ps_heap_summary->to()));
 388       e.commit();
 389     }
 390   }
 391 };
 392 
 393 void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
 394   GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when);
 395   heap_summary.accept(&visitor);
 396 }
 397 
 398 static JfrStructMetaspaceSizes to_struct(const MetaspaceSizes& sizes) {
 399   JfrStructMetaspaceSizes meta_sizes;
 400 
 401   meta_sizes.set_committed(sizes.committed());
 402   meta_sizes.set_used(sizes.used());
 403   meta_sizes.set_reserved(sizes.reserved());
 404 
 405   return meta_sizes;
 406 }
 407 
 408 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
 409   EventMetaspaceSummary e;
 410   if (e.should_commit()) {
 411     e.set_gcId(_shared_gc_info.gc_id().id());
 412     e.set_when((u1) when);
 413     e.set_gcThreshold(meta_space_summary.capacity_until_GC());
 414     e.set_metaspace(to_struct(meta_space_summary.meta_space()));
 415     e.set_dataSpace(to_struct(meta_space_summary.data_space()));
 416     e.set_classSpace(to_struct(meta_space_summary.class_space()));
 417     e.commit();
 418   }
 419 }
 420 
 421 class PhaseSender : public PhaseVisitor {
 422   GCId _gc_id;
 423  public:
 424   PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
 425 
 426    template<typename T>
 427   void send_phase(GCPhase* phase) {
 428     T event(UNTIMED);
 429     if (event.should_commit()) {
 430       event.set_gcId(_gc_id.id());
 431       event.set_name(phase->name());
 432       event.set_starttime(phase->start());
 433       event.set_endtime(phase->end());
 434       event.commit();
 435     }
 436   }
 437 
 438   void visit(GCPhase* pause) { ShouldNotReachHere(); }
 439   void visit(ConcurrentPhase* pause) { Unimplemented(); }
 440   void visit(PausePhase* pause) {
 441     assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types");
 442 
 443     switch (pause->level()) {
 444       case 0: send_phase<EventGCPhasePause>(pause); break;
 445       case 1: send_phase<EventGCPhasePauseLevel1>(pause); break;
 446       case 2: send_phase<EventGCPhasePauseLevel2>(pause); break;
 447       case 3: send_phase<EventGCPhasePauseLevel3>(pause); break;
 448       default: /* Ignore sending this phase */ break;
 449     }
 450   }
 451 };
 452 
 453 void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
 454   PhaseSender phase_reporter(_shared_gc_info.gc_id());
 455 
 456   TimePartitionPhasesIterator iter(time_partitions);
 457   while (iter.has_next()) {
 458     GCPhase* phase = iter.next();
 459     phase->accept(&phase_reporter);
 460   }
 461 }