--- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2013-03-25 14:37:10.028253174 +0100
+++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2013-03-25 14:37:09.960253175 +0100
@@ -5060,6 +5060,7 @@
if (UseAdaptiveSizePolicy) {
size_policy()->checkpoint_roots_final_end(gch->gc_cause());
}
+ _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
}
// Parallel remark task
--- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2013-03-25 14:37:10.468253156 +0100
+++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2013-03-25 14:37:10.400253160 +0100
@@ -1202,6 +1202,9 @@
_remark_times.add((now - start) * 1000.0);
g1p->record_concurrent_mark_remark_end();
+
+ G1CMIsAliveClosure is_alive(g1h);
+ g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
}
// Base class of the closures that finalize and verify the
--- old/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp 2013-03-25 14:37:10.784253144 +0100
+++ new/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp 2013-03-25 14:37:10.712253148 +0100
@@ -206,6 +206,8 @@
G1CollectedHeap* g1h = G1CollectedHeap::heap();
gclog_or_tty->print_cr("]");
}
+
+ gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
}
class G1PrepareCompactClosure: public HeapRegionClosure {
--- old/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp 2013-03-25 14:37:11.048253135 +0100
+++ new/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp 2013-03-25 14:37:10.984253138 +0100
@@ -566,6 +566,7 @@
SymbolTable::unlink();
assert(_marking_stack.is_empty(), "stack should be empty by now");
+ _gc_tracer->report_object_count_after_gc(is_alive_closure());
}
--- old/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2013-03-25 14:37:11.308253125 +0100
+++ new/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2013-03-25 14:37:11.240253127 +0100
@@ -2443,6 +2443,7 @@
SymbolTable::unlink();
assert(cm->marking_stacks_empty(), "marking stacks should be empty");
+ _gc_tracer.report_object_count_after_gc(is_alive_closure());
}
// This should be moved to the shared markSweep code!
--- old/src/share/vm/gc_implementation/shared/gcTrace.cpp 2013-03-25 14:37:11.640253113 +0100
+++ new/src/share/vm/gc_implementation/shared/gcTrace.cpp 2013-03-25 14:37:11.576253114 +0100
@@ -27,6 +27,8 @@
#include "gc_implementation/shared/gcTimer.hpp"
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "memory/heapInspection.hpp"
+#include "memory/iterator.hpp"
#include "memory/referenceProcessorStats.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -85,6 +87,29 @@
send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
}
+class ObjectCountEventSenderClosure : public KlassInfoClosure {
+ GCTracer* _gc_tracer;
+ public:
+ ObjectCountEventSenderClosure(GCTracer* gc_tracer) : _gc_tracer(gc_tracer) {}
+ private:
+ void do_cinfo(KlassInfoEntry* entry) {
+ _gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(),
+ entry->words() * BytesPerWord);
+ }
+};
+
+void GCTracer::report_object_count_after_gc(BoolObjectClosure *is_alive_cl) {
+ if (should_send_object_count_after_gc_event()) {
+ ResourceMark rm;
+
+ KlassInfoTable cit(HeapInspection::start_of_perm_gen());
+ if (!cit.allocation_failed()) {
+ ObjectCountEventSenderClosure event_sender(this);
+ HeapInspection::instance_inspection(&cit, &event_sender, false, is_alive_cl);
+ }
+ }
+}
+
void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const PermGenSummary& perm_gen_summary) const {
assert_set_gc_id();
--- old/src/share/vm/gc_implementation/shared/gcTrace.hpp 2013-03-25 14:37:11.900253103 +0100
+++ new/src/share/vm/gc_implementation/shared/gcTrace.hpp 2013-03-25 14:37:11.832253106 +0100
@@ -42,6 +42,7 @@
class PSHeapSummary;
class ReferenceProcessorStats;
class TimePartitions;
+class BoolObjectClosure;
class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
static const jlong UNSET_TIMESTAMP = -1;
@@ -109,6 +110,7 @@
#endif // SERIALGC
class GCTracer : public ResourceObj {
+ friend class ObjectCountEventSenderClosure;
protected:
SharedGCInfo _shared_gc_info;
@@ -117,6 +119,7 @@
void report_gc_end(jlong timestamp, TimePartitions* time_partitions);
void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const PermGenSummary& perm_gen_summary) const;
void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
+ void report_object_count_after_gc(BoolObjectClosure* object_filter);
bool has_reported_gc_start() const;
@@ -131,6 +134,8 @@
void send_perm_gen_summary_event(GCWhen::Type when, const PermGenSummary& perm_gen_summary) const;
void send_reference_stats_event(ReferenceType type, size_t count) const;
void send_phase_events(TimePartitions* time_partitions) const;
+ void send_object_count_after_gc_event(klassOop klass, jlong count, julong total_size) const;
+ bool should_send_object_count_after_gc_event() const;
};
class YoungGCTracer : public GCTracer {
--- old/src/share/vm/gc_implementation/shared/gcTraceSend.cpp 2013-03-25 14:37:12.168253093 +0100
+++ new/src/share/vm/gc_implementation/shared/gcTraceSend.cpp 2013-03-25 14:37:12.104253095 +0100
@@ -28,7 +28,11 @@
#include "gc_implementation/shared/gcTrace.hpp"
#include "gc_implementation/shared/gcWhen.hpp"
#include "gc_implementation/shared/promotionFailedInfo.hpp"
+#include "memory/heapInspection.hpp"
+#include "memory/iterator.hpp"
+#include "trace/traceBackend.hpp"
#include "trace/tracing.hpp"
+#include "utilities/globalDefinitions.hpp"
#ifndef SERIALGC
#include "gc_implementation/g1/g1YCTypes.hpp"
#endif
@@ -105,6 +109,25 @@
}
}
+void GCTracer::send_object_count_after_gc_event(klassOop klass, jlong count, julong total_size) const {
+ EventObjectCountAfterGC e;
+ if (e.should_commit()) {
+ e.set_gcId(_shared_gc_info.id());
+ e.set_class(klass);
+ e.set_count(count);
+ e.set_totalSize(total_size);
+ e.commit();
+ }
+}
+
+bool GCTracer::should_send_object_count_after_gc_event() const {
+#if INCLUDE_TRACE
+ return Tracing::enabled(EventObjectCountAfterGC::eventId);
+#else
+ return false;
+#endif
+}
+
#ifndef SERIALGC
void G1NewTracer::send_g1_young_gc_event() {
EventGCG1GarbageCollection e(UNTIMED);
--- old/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2013-03-25 14:37:12.428253082 +0100
+++ new/src/share/vm/gc_implementation/shared/vmGCOperations.cpp 2013-03-25 14:37:12.360253086 +0100
@@ -144,27 +144,33 @@
return false;
}
-void VM_GC_HeapInspection::doit() {
- HandleMark hm;
+bool VM_GC_HeapInspection::collect() {
CollectedHeap* ch = Universe::heap();
ch->ensure_parsability(false); // must happen, even if collection does
// not happen (e.g. due to GC_locker)
+
+ if (GC_locker::is_active()) {
+ return false;
+ }
+ ch->collect_as_vm_thread(GCCause::_heap_inspection);
+ return true;
+}
+
+void VM_GC_HeapInspection::doit() {
+ HandleMark hm;
if (_full_gc) {
- // The collection attempt below would be skipped anyway if
- // the gc locker is held. The following dump may then be a tad
- // misleading to someone expecting only live objects to show
- // up in the dump (see CR 6944195). Just issue a suitable warning
- // in that case and do not attempt to do a collection.
- // The latter is a subtle point, because even a failed attempt
- // to GC will, in fact, induce one in the future, which we
- // probably want to avoid in this case because the GC that we may
- // be about to attempt holds value for us only
- // if it happens now and not if it happens in the eventual
- // future.
- if (GC_locker::is_active()) {
+ if (!collect()) {
+ // The collection attempt was skipped because the gc locker is held.
+ // The following dump may then be a tad misleading to someone expecting
+ // only live objects to show up in the dump (see CR 6944195). Just issue
+ // a suitable warning in that case and do not attempt to do a collection.
+ // The latter is a subtle point, because even a failed attempt
+ // to GC will, in fact, induce one in the future, which we
+ // probably want to avoid in this case because the GC that we may
+ // be about to attempt holds value for us only
+ // if it happens now and not if it happens in the eventual
+ // future.
warning("GC locker is held; pre-dump GC was skipped");
- } else {
- ch->collect_as_vm_thread(GCCause::_heap_inspection);
}
}
HeapInspection::heap_inspection(_out, _need_prologue /* need_prologue */);
--- old/src/share/vm/gc_implementation/shared/vmGCOperations.hpp 2013-03-25 14:37:12.700253073 +0100
+++ new/src/share/vm/gc_implementation/shared/vmGCOperations.hpp 2013-03-25 14:37:12.632253075 +0100
@@ -150,6 +150,8 @@
virtual bool skip_operation() const;
virtual bool doit_prologue();
virtual void doit();
+ protected:
+ bool collect();
};
--- old/src/share/vm/memory/genMarkSweep.cpp 2013-03-25 14:37:12.964253063 +0100
+++ new/src/share/vm/memory/genMarkSweep.cpp 2013-03-25 14:37:12.900253064 +0100
@@ -318,6 +318,8 @@
SymbolTable::unlink();
assert(_marking_stack.is_empty(), "stack should be empty by now");
+
+ gc_tracer()->report_object_count_after_gc(&is_alive);
}
--- old/src/share/vm/trace/trace.xml 2013-03-25 14:37:13.252253051 +0100
+++ new/src/share/vm/trace/trace.xml 2013-03-25 14:37:13.172253055 +0100
@@ -189,6 +189,13 @@
+
+
+
+
+
+
+