< prev index next >

src/share/vm/gc/shared/gcTrace.cpp

Print this page




  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/copyFailedInfo.hpp"
  27 #include "gc/shared/gcHeapSummary.hpp"
  28 #include "gc/shared/gcId.hpp"
  29 #include "gc/shared/gcTimer.hpp"
  30 #include "gc/shared/gcTrace.hpp"
  31 #include "gc/shared/objectCountEventSender.hpp"
  32 #include "gc/shared/referenceProcessorStats.hpp"
  33 #include "memory/heapInspection.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "runtime/os.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/macros.hpp"
  38 #include "utilities/ticks.inline.hpp"
  39 #if INCLUDE_ALL_GCS
  40 #include "gc/g1/evacuationInfo.hpp"
  41 #endif
  42 
  43 #define assert_unset_gc_id() assert(_shared_gc_info.gc_id().is_undefined(), "GC already started?")
  44 #define assert_set_gc_id() assert(!_shared_gc_info.gc_id().is_undefined(), "GC not started?")
  45 
  46 void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {
  47   assert_unset_gc_id();
  48 
  49   GCId gc_id = GCId::create();
  50   _shared_gc_info.set_gc_id(gc_id);
  51   _shared_gc_info.set_cause(cause);
  52   _shared_gc_info.set_start_timestamp(timestamp);
  53 }
  54 
  55 void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {
  56   assert_unset_gc_id();
  57 
  58   report_gc_start_impl(cause, timestamp);
  59 }
  60 
  61 bool GCTracer::has_reported_gc_start() const {
  62   return !_shared_gc_info.gc_id().is_undefined();
  63 }
  64 
  65 void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
  66   assert_set_gc_id();
  67 
  68   _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
  69   _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
  70   _shared_gc_info.set_end_timestamp(timestamp);
  71 
  72   send_phase_events(time_partitions);
  73   send_garbage_collection_event();
  74 }
  75 
  76 void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) {
  77   assert_set_gc_id();
  78 
  79   report_gc_end_impl(timestamp, time_partitions);
  80 
  81   _shared_gc_info.set_gc_id(GCId::undefined());
  82 }
  83 
  84 void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
  85   assert_set_gc_id();
  86 
  87   send_reference_stats_event(REF_SOFT, rps.soft_count());
  88   send_reference_stats_event(REF_WEAK, rps.weak_count());
  89   send_reference_stats_event(REF_FINAL, rps.final_count());
  90   send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
  91 }
  92 
  93 #if INCLUDE_SERVICES
  94 class ObjectCountEventSenderClosure : public KlassInfoClosure {
  95   const GCId _gc_id;
  96   const double _size_threshold_percentage;
  97   const size_t _total_size_in_words;
  98   const Ticks _timestamp;
  99 
 100  public:
 101   ObjectCountEventSenderClosure(GCId gc_id, size_t total_size_in_words, const Ticks& timestamp) :
 102     _gc_id(gc_id),
 103     _size_threshold_percentage(ObjectCountCutOffPercent / 100),
 104     _total_size_in_words(total_size_in_words),
 105     _timestamp(timestamp)
 106   {}
 107 
 108   virtual void do_cinfo(KlassInfoEntry* entry) {
 109     if (should_send_event(entry)) {
 110       ObjectCountEventSender::send(entry, _gc_id, _timestamp);
 111     }
 112   }
 113 
 114  private:
 115   bool should_send_event(const KlassInfoEntry* entry) const {
 116     double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
 117     return percentage_of_heap >= _size_threshold_percentage;
 118   }
 119 };
 120 
 121 void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
 122   assert_set_gc_id();
 123   assert(is_alive_cl != NULL, "Must supply function to check liveness");
 124 
 125   if (ObjectCountEventSender::should_send_event()) {
 126     ResourceMark rm;
 127 
 128     KlassInfoTable cit(false);
 129     if (!cit.allocation_failed()) {
 130       HeapInspection hi(false, false, false, NULL);
 131       hi.populate_table(&cit, is_alive_cl);
 132       ObjectCountEventSenderClosure event_sender(_shared_gc_info.gc_id(), cit.size_of_instances_in_words(), Ticks::now());
 133       cit.iterate(&event_sender);
 134     }
 135   }
 136 }
 137 #endif // INCLUDE_SERVICES
 138 
 139 void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
 140   assert_set_gc_id();
 141 
 142   send_gc_heap_summary_event(when, heap_summary);
 143 }
 144 
 145 void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {
 146   assert_set_gc_id();
 147 
 148   send_meta_space_summary_event(when, summary);
 149 
 150   send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
 151   if (UseCompressedClassPointers) {
 152     send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary());
 153   }
 154 }
 155 
 156 void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
 157   assert_set_gc_id();
 158   assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
 159 
 160   GCTracer::report_gc_end_impl(timestamp, time_partitions);
 161   send_young_gc_event();
 162 
 163   _tenuring_threshold = UNSET_TENURING_THRESHOLD;
 164 }
 165 
 166 void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const {
 167   assert_set_gc_id();
 168 
 169   send_promotion_failed_event(pf_info);
 170 }
 171 
 172 void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) {
 173   _tenuring_threshold = tenuring_threshold;
 174 }
 175 
 176 bool YoungGCTracer::should_report_promotion_events() const {
 177   return should_report_promotion_in_new_plab_event() ||
 178           should_report_promotion_outside_plab_event();
 179 }
 180 
 181 bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
 182   return should_send_promotion_in_new_plab_event();
 183 }
 184 
 185 bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
 186   return should_send_promotion_outside_plab_event();
 187 }
 188 
 189 void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
 190                                                        uint age, bool tenured,
 191                                                        size_t plab_size) const {
 192   assert_set_gc_id();
 193   send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
 194 }
 195 
 196 void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
 197                                                         uint age, bool tenured) const {
 198   assert_set_gc_id();
 199   send_promotion_outside_plab_event(klass, obj_size, age, tenured);
 200 }
 201 
 202 void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
 203   assert_set_gc_id();
 204 
 205   GCTracer::report_gc_end_impl(timestamp, time_partitions);
 206   send_old_gc_event();
 207 }
 208 
 209 void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
 210   assert_set_gc_id();
 211 
 212   OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
 213   send_parallel_old_event();
 214 }
 215 
 216 void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
 217   assert_set_gc_id();
 218 
 219   _parallel_old_gc_info.report_dense_prefix(dense_prefix);
 220 }
 221 
 222 void OldGCTracer::report_concurrent_mode_failure() {
 223   assert_set_gc_id();
 224 
 225   send_concurrent_mode_failure_event();
 226 }
 227 
 228 #if INCLUDE_ALL_GCS
 229 void G1MMUTracer::report_mmu(const GCId& gcId, double timeSlice, double gcTime, double maxTime) {
 230   assert(!gcId.is_undefined(), "Undefined GC id");
 231 
 232   send_g1_mmu_event(gcId, timeSlice, gcTime, maxTime);
 233 }
 234 
 235 void G1NewTracer::report_yc_type(G1YCType type) {
 236   assert_set_gc_id();
 237 
 238   _g1_young_gc_info.set_type(type);
 239 }
 240 
 241 void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
 242   assert_set_gc_id();
 243 
 244   YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
 245   send_g1_young_gc_event();
 246 }
 247 
 248 void G1NewTracer::report_evacuation_info(EvacuationInfo* info) {
 249   assert_set_gc_id();
 250 
 251   send_evacuation_info_event(info);
 252 }
 253 
 254 void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {
 255   assert_set_gc_id();
 256 
 257   send_evacuation_failed_event(ef_info);
 258   ef_info.reset();
 259 }
 260 
 261 void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const {
 262   assert_set_gc_id();
 263 
 264   send_young_evacuation_statistics(young_summary);
 265   send_old_evacuation_statistics(old_summary);
 266 }
 267 
 268 #endif


  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/copyFailedInfo.hpp"
  27 #include "gc/shared/gcHeapSummary.hpp"
  28 #include "gc/shared/gcId.hpp"
  29 #include "gc/shared/gcTimer.hpp"
  30 #include "gc/shared/gcTrace.hpp"
  31 #include "gc/shared/objectCountEventSender.hpp"
  32 #include "gc/shared/referenceProcessorStats.hpp"
  33 #include "memory/heapInspection.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "runtime/os.hpp"
  36 #include "utilities/globalDefinitions.hpp"
  37 #include "utilities/macros.hpp"
  38 #include "utilities/ticks.inline.hpp"
  39 #if INCLUDE_ALL_GCS
  40 #include "gc/g1/evacuationInfo.hpp"
  41 #endif
  42 



  43 void GCTracer::report_gc_start_impl(GCCause::Cause cause, const Ticks& timestamp) {




  44   _shared_gc_info.set_cause(cause);
  45   _shared_gc_info.set_start_timestamp(timestamp);
  46 }
  47 
  48 void GCTracer::report_gc_start(GCCause::Cause cause, const Ticks& timestamp) {


  49   report_gc_start_impl(cause, timestamp);
  50 }
  51 




  52 void GCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {


  53   _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
  54   _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
  55   _shared_gc_info.set_end_timestamp(timestamp);
  56 
  57   send_phase_events(time_partitions);
  58   send_garbage_collection_event();
  59 }
  60 
  61 void GCTracer::report_gc_end(const Ticks& timestamp, TimePartitions* time_partitions) {


  62   report_gc_end_impl(timestamp, time_partitions);


  63 }
  64 
  65 void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {


  66   send_reference_stats_event(REF_SOFT, rps.soft_count());
  67   send_reference_stats_event(REF_WEAK, rps.weak_count());
  68   send_reference_stats_event(REF_FINAL, rps.final_count());
  69   send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
  70 }
  71 
  72 #if INCLUDE_SERVICES
  73 class ObjectCountEventSenderClosure : public KlassInfoClosure {

  74   const double _size_threshold_percentage;
  75   const size_t _total_size_in_words;
  76   const Ticks _timestamp;
  77 
  78  public:
  79   ObjectCountEventSenderClosure(size_t total_size_in_words, const Ticks& timestamp) :

  80     _size_threshold_percentage(ObjectCountCutOffPercent / 100),
  81     _total_size_in_words(total_size_in_words),
  82     _timestamp(timestamp)
  83   {}
  84 
  85   virtual void do_cinfo(KlassInfoEntry* entry) {
  86     if (should_send_event(entry)) {
  87       ObjectCountEventSender::send(entry, _timestamp);
  88     }
  89   }
  90 
  91  private:
  92   bool should_send_event(const KlassInfoEntry* entry) const {
  93     double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
  94     return percentage_of_heap >= _size_threshold_percentage;
  95   }
  96 };
  97 
  98 void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {

  99   assert(is_alive_cl != NULL, "Must supply function to check liveness");
 100 
 101   if (ObjectCountEventSender::should_send_event()) {
 102     ResourceMark rm;
 103 
 104     KlassInfoTable cit(false);
 105     if (!cit.allocation_failed()) {
 106       HeapInspection hi(false, false, false, NULL);
 107       hi.populate_table(&cit, is_alive_cl);
 108       ObjectCountEventSenderClosure event_sender(cit.size_of_instances_in_words(), Ticks::now());
 109       cit.iterate(&event_sender);
 110     }
 111   }
 112 }
 113 #endif // INCLUDE_SERVICES
 114 
 115 void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary) const {


 116   send_gc_heap_summary_event(when, heap_summary);
 117 }
 118 
 119 void GCTracer::report_metaspace_summary(GCWhen::Type when, const MetaspaceSummary& summary) const {


 120   send_meta_space_summary_event(when, summary);
 121 
 122   send_metaspace_chunk_free_list_summary(when, Metaspace::NonClassType, summary.metaspace_chunk_free_list_summary());
 123   if (UseCompressedClassPointers) {
 124     send_metaspace_chunk_free_list_summary(when, Metaspace::ClassType, summary.class_chunk_free_list_summary());
 125   }
 126 }
 127 
 128 void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {

 129   assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
 130 
 131   GCTracer::report_gc_end_impl(timestamp, time_partitions);
 132   send_young_gc_event();
 133 
 134   _tenuring_threshold = UNSET_TENURING_THRESHOLD;
 135 }
 136 
 137 void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const {


 138   send_promotion_failed_event(pf_info);
 139 }
 140 
 141 void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) {
 142   _tenuring_threshold = tenuring_threshold;
 143 }
 144 
 145 bool YoungGCTracer::should_report_promotion_events() const {
 146   return should_report_promotion_in_new_plab_event() ||
 147           should_report_promotion_outside_plab_event();
 148 }
 149 
 150 bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
 151   return should_send_promotion_in_new_plab_event();
 152 }
 153 
 154 bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
 155   return should_send_promotion_outside_plab_event();
 156 }
 157 
 158 void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
 159                                                        uint age, bool tenured,
 160                                                        size_t plab_size) const {

 161   send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
 162 }
 163 
 164 void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
 165                                                         uint age, bool tenured) const {

 166   send_promotion_outside_plab_event(klass, obj_size, age, tenured);
 167 }
 168 
 169 void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {


 170   GCTracer::report_gc_end_impl(timestamp, time_partitions);
 171   send_old_gc_event();
 172 }
 173 
 174 void ParallelOldTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {


 175   OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
 176   send_parallel_old_event();
 177 }
 178 
 179 void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {


 180   _parallel_old_gc_info.report_dense_prefix(dense_prefix);
 181 }
 182 
 183 void OldGCTracer::report_concurrent_mode_failure() {


 184   send_concurrent_mode_failure_event();
 185 }
 186 
 187 #if INCLUDE_ALL_GCS
 188 void G1MMUTracer::report_mmu(double timeSlice, double gcTime, double maxTime) {
 189   send_g1_mmu_event(timeSlice, gcTime, maxTime);


 190 }
 191 
 192 void G1NewTracer::report_yc_type(G1YCType type) {


 193   _g1_young_gc_info.set_type(type);
 194 }
 195 
 196 void G1NewTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {


 197   YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
 198   send_g1_young_gc_event();
 199 }
 200 
 201 void G1NewTracer::report_evacuation_info(EvacuationInfo* info) {


 202   send_evacuation_info_event(info);
 203 }
 204 
 205 void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {


 206   send_evacuation_failed_event(ef_info);
 207   ef_info.reset();
 208 }
 209 
 210 void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summary, const G1EvacSummary& old_summary) const {


 211   send_young_evacuation_statistics(young_summary);
 212   send_old_evacuation_statistics(old_summary);
 213 }
 214 
 215 #endif
< prev index next >