1 /* 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #if INCLUDE_JFR 27 #include "jfr/jfrEvents.hpp" 28 #endif 29 #include "gc_implementation/shared/gcHeapSummary.hpp" 30 #include "gc_implementation/shared/gcTimer.hpp" 31 #include "gc_implementation/shared/gcTrace.hpp" 32 #include "gc_implementation/shared/gcWhen.hpp" 33 #include "gc_implementation/shared/copyFailedInfo.hpp" 34 #include "runtime/os.hpp" 35 #if INCLUDE_ALL_GCS 36 #include "gc_implementation/g1/evacuationInfo.hpp" 37 #include "gc_implementation/g1/g1YCTypes.hpp" 38 #endif 39 40 // All GC dependencies against the trace framework is contained within this file. 41 42 typedef uintptr_t TraceAddress; 43 44 void GCTracer::send_garbage_collection_event() const { 45 #if INCLUDE_JFR 46 EventGarbageCollection event(UNTIMED); 47 if (event.should_commit()) { 48 event.set_gcId(_shared_gc_info.gc_id().id()); 49 event.set_name(_shared_gc_info.name()); 50 event.set_cause((u2) _shared_gc_info.cause()); 51 event.set_sumOfPauses(_shared_gc_info.sum_of_pauses()); 52 event.set_longestPause(_shared_gc_info.longest_pause()); 53 event.set_starttime(_shared_gc_info.start_timestamp()); 54 event.set_endtime(_shared_gc_info.end_timestamp()); 55 event.commit(); 56 } 57 #endif 58 } 59 60 void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const { 61 #if INCLUDE_JFR 62 EventGCReferenceStatistics e; 63 if (e.should_commit()) { 64 e.set_gcId(_shared_gc_info.gc_id().id()); 65 e.set_type((u1)type); 66 e.set_count(count); 67 e.commit(); 68 } 69 #endif 70 } 71 72 void GCTracer::send_metaspace_chunk_free_list_summary(GCWhen::Type when, Metaspace::MetadataType mdtype, 73 const MetaspaceChunkFreeListSummary& summary) const { 74 #if INCLUDE_JFR 75 EventMetaspaceChunkFreeListSummary e; 76 if (e.should_commit()) { 77 e.set_gcId(_shared_gc_info.gc_id().id()); 78 e.set_when(when); 79 e.set_metadataType(mdtype); 80 81 e.set_specializedChunks(summary.num_specialized_chunks()); 82 e.set_specializedChunksTotalSize(summary.specialized_chunks_size_in_bytes()); 83 84 e.set_smallChunks(summary.num_small_chunks()); 85 e.set_smallChunksTotalSize(summary.small_chunks_size_in_bytes()); 86 87 e.set_mediumChunks(summary.num_medium_chunks()); 88 e.set_mediumChunksTotalSize(summary.medium_chunks_size_in_bytes()); 89 90 e.set_humongousChunks(summary.num_humongous_chunks()); 91 e.set_humongousChunksTotalSize(summary.humongous_chunks_size_in_bytes()); 92 93 e.commit(); 94 } 95 #endif 96 } 97 98 void ParallelOldTracer::send_parallel_old_event() const { 99 #if INCLUDE_JFR 100 EventParallelOldGarbageCollection e(UNTIMED); 101 if (e.should_commit()) { 102 e.set_gcId(_shared_gc_info.gc_id().id()); 103 e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix()); 104 e.set_starttime(_shared_gc_info.start_timestamp()); 105 e.set_endtime(_shared_gc_info.end_timestamp()); 106 e.commit(); 107 } 108 #endif 109 } 110 111 void YoungGCTracer::send_young_gc_event() const { 112 #if INCLUDE_JFR 113 EventYoungGarbageCollection e(UNTIMED); 114 if (e.should_commit()) { 115 e.set_gcId(_shared_gc_info.gc_id().id()); 116 e.set_tenuringThreshold(_tenuring_threshold); 117 e.set_starttime(_shared_gc_info.start_timestamp()); 118 e.set_endtime(_shared_gc_info.end_timestamp()); 119 e.commit(); 120 } 121 #endif 122 } 123 124 bool YoungGCTracer::should_send_promotion_in_new_plab_event() const { 125 #if INCLUDE_JFR 126 return EventPromoteObjectInNewPLAB::is_enabled(); 127 #else 128 return false; 129 #endif 130 } 131 132 bool YoungGCTracer::should_send_promotion_outside_plab_event() const { 133 #if INCLUDE_JFR 134 return EventPromoteObjectOutsidePLAB::is_enabled(); 135 #else 136 return false; 137 #endif 138 } 139 140 void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size, 141 uint age, bool tenured, 142 size_t plab_size) const { 143 #if INCLUDE_JFR 144 EventPromoteObjectInNewPLAB event; 145 if (event.should_commit()) { 146 event.set_gcId(_shared_gc_info.gc_id().id()); 147 event.set_objectClass(klass); 148 event.set_objectSize(obj_size); 149 event.set_tenured(tenured); 150 event.set_tenuringAge(age); 151 event.set_plabSize(plab_size); 152 event.commit(); 153 } 154 #endif 155 } 156 157 void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size, 158 uint age, bool tenured) const { 159 #if INCLUDE_JFR 160 EventPromoteObjectOutsidePLAB event; 161 if (event.should_commit()) { 162 event.set_gcId(_shared_gc_info.gc_id().id()); 163 event.set_objectClass(klass); 164 event.set_objectSize(obj_size); 165 event.set_tenured(tenured); 166 event.set_tenuringAge(age); 167 event.commit(); 168 } 169 #endif 170 } 171 172 void OldGCTracer::send_old_gc_event() const { 173 #if INCLUDE_JFR 174 EventOldGarbageCollection e(UNTIMED); 175 if (e.should_commit()) { 176 e.set_gcId(_shared_gc_info.gc_id().id()); 177 e.set_starttime(_shared_gc_info.start_timestamp()); 178 e.set_endtime(_shared_gc_info.end_timestamp()); 179 e.commit(); 180 } 181 #endif 182 } 183 184 #if INCLUDE_JFR 185 static JfrStructCopyFailed to_struct(const CopyFailedInfo& cf_info) { 186 JfrStructCopyFailed failed_info; 187 failed_info.set_objectCount(cf_info.failed_count()); 188 failed_info.set_firstSize(cf_info.first_size()); 189 failed_info.set_smallestSize(cf_info.smallest_size()); 190 failed_info.set_totalSize(cf_info.total_size()); 191 return failed_info; 192 } 193 #endif 194 195 void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const { 196 #if INCLUDE_JFR 197 EventPromotionFailed e; 198 if (e.should_commit()) { 199 e.set_gcId(_shared_gc_info.gc_id().id()); 200 e.set_promotionFailed(to_struct(pf_info)); 201 e.set_thread(pf_info.thread()->thread_id()); 202 e.commit(); 203 } 204 #endif 205 } 206 207 // Common to CMS and G1 208 void OldGCTracer::send_concurrent_mode_failure_event() { 209 #if INCLUDE_JFR 210 EventConcurrentModeFailure e; 211 if (e.should_commit()) { 212 e.set_gcId(_shared_gc_info.gc_id().id()); 213 e.commit(); 214 } 215 #endif 216 } 217 218 #if INCLUDE_ALL_GCS 219 void G1NewTracer::send_g1_young_gc_event() { 220 #if INCLUDE_JFR 221 EventG1GarbageCollection e(UNTIMED); 222 if (e.should_commit()) { 223 e.set_gcId(_shared_gc_info.gc_id().id()); 224 e.set_type(_g1_young_gc_info.type()); 225 e.set_starttime(_shared_gc_info.start_timestamp()); 226 e.set_endtime(_shared_gc_info.end_timestamp()); 227 e.commit(); 228 } 229 #endif 230 } 231 232 void G1MMUTracer::send_g1_mmu_event(double time_slice_ms, double gc_time_ms, double max_time_ms) { 233 #if INCLUDE_JFR 234 EventG1MMU e; 235 if (e.should_commit()) { 236 e.set_gcId(GCId::peek().id()); 237 e.set_timeSlice((s8)time_slice_ms); 238 e.set_gcTime((s8)gc_time_ms); 239 e.set_pauseTarget((s8)max_time_ms); 240 e.commit(); 241 } 242 #endif 243 } 244 245 void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) { 246 #if INCLUDE_JFR 247 EventEvacuationInformation e; 248 if (e.should_commit()) { 249 e.set_gcId(_shared_gc_info.gc_id().id()); 250 e.set_cSetRegions(info->collectionset_regions()); 251 e.set_cSetUsedBefore(info->collectionset_used_before()); 252 e.set_cSetUsedAfter(info->collectionset_used_after()); 253 e.set_allocationRegions(info->allocation_regions()); 254 e.set_allocationRegionsUsedBefore(info->alloc_regions_used_before()); 255 e.set_allocationRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied()); 256 e.set_bytesCopied(info->bytes_copied()); 257 e.set_regionsFreed(info->regions_freed()); 258 e.commit(); 259 } 260 #endif 261 } 262 263 void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const { 264 #if INCLUDE_JFR 265 EventEvacuationFailed e; 266 if (e.should_commit()) { 267 e.set_gcId(_shared_gc_info.gc_id().id()); 268 e.set_evacuationFailed(to_struct(ef_info)); 269 e.commit(); 270 } 271 #endif 272 } 273 274 // XXX 275 //static JfrStructG1EvacuationStatistics 276 //create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) { 277 // JfrStructG1EvacuationStatistics s; 278 // s.set_gcId(gcid); 279 // s.set_allocated(summary.allocated() * HeapWordSize); 280 // s.set_wasted(summary.wasted() * HeapWordSize); 281 // s.set_used(summary.used() * HeapWordSize); 282 // s.set_undoWaste(summary.undo_wasted() * HeapWordSize); 283 // s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize); 284 // s.set_regionsRefilled(summary.regions_filled()); 285 // s.set_directAllocated(summary.direct_allocated() * HeapWordSize); 286 // s.set_failureUsed(summary.failure_used() * HeapWordSize); 287 // s.set_failureWaste(summary.failure_waste() * HeapWordSize); 288 // return s; 289 //} 290 // 291 //void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const { 292 // EventG1EvacuationYoungStatistics surv_evt; 293 // if (surv_evt.should_commit()) { 294 // surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); 295 // surv_evt.commit(); 296 // } 297 //} 298 // 299 //void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const { 300 // EventG1EvacuationOldStatistics old_evt; 301 // if (old_evt.should_commit()) { 302 // old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary)); 303 // old_evt.commit(); 304 // } 305 //} 306 // 307 //void G1NewTracer::send_basic_ihop_statistics(size_t threshold, 308 // size_t target_occupancy, 309 // size_t current_occupancy, 310 // size_t last_allocation_size, 311 // double last_allocation_duration, 312 // double last_marking_length) { 313 // EventG1BasicIHOP evt; 314 // if (evt.should_commit()) { 315 // evt.set_gcId(_shared_gc_info.gc_id().id()); 316 // evt.set_threshold(threshold); 317 // evt.set_targetOccupancy(target_occupancy); 318 // evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0); 319 // evt.set_currentOccupancy(current_occupancy); 320 // evt.set_recentMutatorAllocationSize(last_allocation_size); 321 // evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS); 322 // evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0); 323 // evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS); 324 // evt.commit(); 325 // } 326 //} 327 // 328 //void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold, 329 // size_t internal_target_occupancy, 330 // size_t current_occupancy, 331 // size_t additional_buffer_size, 332 // double predicted_allocation_rate, 333 // double predicted_marking_length, 334 // bool prediction_active) { 335 // EventG1AdaptiveIHOP evt; 336 // if (evt.should_commit()) { 337 // evt.set_gcId(_shared_gc_info.gc_id().id()); 338 // evt.set_threshold(threshold); 339 // evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0); 340 // evt.set_ihopTargetOccupancy(internal_target_occupancy); 341 // evt.set_currentOccupancy(current_occupancy); 342 // evt.set_additionalBufferSize(additional_buffer_size); 343 // evt.set_predictedAllocationRate(predicted_allocation_rate); 344 // evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS); 345 // evt.set_predictionActive(prediction_active); 346 // evt.commit(); 347 // } 348 //} 349 350 #endif // INCLUDE_ALL_GCS 351 352 #if INCLUDE_JFR 353 static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) { 354 JfrStructVirtualSpace space; 355 space.set_start((TraceAddress)summary.start()); 356 space.set_committedEnd((TraceAddress)summary.committed_end()); 357 space.set_committedSize(summary.committed_size()); 358 space.set_reservedEnd((TraceAddress)summary.reserved_end()); 359 space.set_reservedSize(summary.reserved_size()); 360 return space; 361 } 362 363 static JfrStructObjectSpace to_struct(const SpaceSummary& summary) { 364 JfrStructObjectSpace space; 365 space.set_start((TraceAddress)summary.start()); 366 space.set_end((TraceAddress)summary.end()); 367 space.set_used(summary.used()); 368 space.set_size(summary.size()); 369 return space; 370 } 371 #endif 372 373 class GCHeapSummaryEventSender : public GCHeapSummaryVisitor { 374 GCId _gc_id; 375 GCWhen::Type _when; 376 public: 377 GCHeapSummaryEventSender(GCId gc_id, GCWhen::Type when) : _gc_id(gc_id), _when(when) {} 378 379 void visit(const GCHeapSummary* heap_summary) const { 380 #if INCLUDE_JFR 381 const VirtualSpaceSummary& heap_space = heap_summary->heap(); 382 383 EventGCHeapSummary e; 384 if (e.should_commit()) { 385 e.set_gcId(_gc_id.id()); 386 e.set_when((u1)_when); 387 e.set_heapSpace(to_struct(heap_space)); 388 e.set_heapUsed(heap_summary->used()); 389 e.commit(); 390 } 391 #endif 392 } 393 394 void visit(const G1HeapSummary* g1_heap_summary) const { 395 #if INCLUDE_JFR 396 visit((GCHeapSummary*)g1_heap_summary); 397 398 EventG1HeapSummary e; 399 if (e.should_commit()) { 400 e.set_gcId(_gc_id.id()); 401 e.set_when((u1)_when); 402 e.set_edenUsedSize(g1_heap_summary->edenUsed()); 403 e.set_edenTotalSize(g1_heap_summary->edenCapacity()); 404 e.set_survivorUsedSize(g1_heap_summary->survivorUsed()); 405 e.set_numberOfRegions(g1_heap_summary->numberOfRegions()); 406 e.commit(); 407 } 408 #endif 409 } 410 411 void visit(const PSHeapSummary* ps_heap_summary) const { 412 #if INCLUDE_JFR 413 visit((GCHeapSummary*)ps_heap_summary); 414 415 const VirtualSpaceSummary& old_summary = ps_heap_summary->old(); 416 const SpaceSummary& old_space = ps_heap_summary->old_space(); 417 const VirtualSpaceSummary& young_summary = ps_heap_summary->young(); 418 const SpaceSummary& eden_space = ps_heap_summary->eden(); 419 const SpaceSummary& from_space = ps_heap_summary->from(); 420 const SpaceSummary& to_space = ps_heap_summary->to(); 421 422 EventPSHeapSummary e; 423 if (e.should_commit()) { 424 e.set_gcId(_gc_id.id()); 425 e.set_when((u1)_when); 426 427 e.set_oldSpace(to_struct(ps_heap_summary->old())); 428 e.set_oldObjectSpace(to_struct(ps_heap_summary->old_space())); 429 e.set_youngSpace(to_struct(ps_heap_summary->young())); 430 e.set_edenSpace(to_struct(ps_heap_summary->eden())); 431 e.set_fromSpace(to_struct(ps_heap_summary->from())); 432 e.set_toSpace(to_struct(ps_heap_summary->to())); 433 e.commit(); 434 } 435 #endif 436 } 437 }; 438 439 void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const { 440 GCHeapSummaryEventSender visitor(_shared_gc_info.gc_id(), when); 441 heap_summary.accept(&visitor); 442 } 443 444 #if INCLUDE_JFR 445 static JfrStructMetaspaceSizes to_struct(const MetaspaceSizes& sizes) { 446 JfrStructMetaspaceSizes meta_sizes; 447 448 meta_sizes.set_committed(sizes.committed()); 449 meta_sizes.set_used(sizes.used()); 450 meta_sizes.set_reserved(sizes.reserved()); 451 452 return meta_sizes; 453 } 454 #endif 455 456 void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const { 457 #if INCLUDE_JFR 458 EventMetaspaceSummary e; 459 if (e.should_commit()) { 460 e.set_gcId(_shared_gc_info.gc_id().id()); 461 e.set_when((u1) when); 462 e.set_gcThreshold(meta_space_summary.capacity_until_GC()); 463 e.set_metaspace(to_struct(meta_space_summary.meta_space())); 464 e.set_dataSpace(to_struct(meta_space_summary.data_space())); 465 e.set_classSpace(to_struct(meta_space_summary.class_space())); 466 e.commit(); 467 } 468 #endif 469 } 470 471 class PhaseSender : public PhaseVisitor { 472 GCId _gc_id; 473 public: 474 PhaseSender(GCId gc_id) : _gc_id(gc_id) {} 475 476 template<typename T> 477 void send_phase(GCPhase* phase) { 478 #if INCLUDE_JFR 479 T event(UNTIMED); 480 if (event.should_commit()) { 481 event.set_gcId(_gc_id.id()); 482 event.set_name(phase->name()); 483 event.set_starttime(phase->start()); 484 event.set_endtime(phase->end()); 485 event.commit(); 486 } 487 #endif 488 } 489 490 void visit(GCPhase* pause) { ShouldNotReachHere(); } 491 void visit(ConcurrentPhase* pause) { Unimplemented(); } 492 void visit(PausePhase* pause) { 493 assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types"); 494 495 #if INCLUDE_JFR 496 switch (pause->level()) { 497 case 0: send_phase<EventGCPhasePause>(pause); break; 498 case 1: send_phase<EventGCPhasePauseLevel1>(pause); break; 499 case 2: send_phase<EventGCPhasePauseLevel2>(pause); break; 500 case 3: send_phase<EventGCPhasePauseLevel3>(pause); break; 501 default: /* Ignore sending this phase */ break; 502 } 503 #endif 504 } 505 }; 506 507 void GCTracer::send_phase_events(TimePartitions* time_partitions) const { 508 PhaseSender phase_reporter(_shared_gc_info.gc_id()); 509 510 TimePartitionPhasesIterator iter(time_partitions); 511 while (iter.has_next()) { 512 GCPhase* phase = iter.next(); 513 phase->accept(&phase_reporter); 514 } 515 }