# HG changeset patch # User mlarsson # Date 1438332052 -7200 # Fri Jul 31 10:40:52 2015 +0200 # Node ID 962cd43c1873071e09fb62bf6767b7aef94b5212 # Parent 37e693211debb5a4ac5eb68abef4c3e5b3287258 8065331: Add trace events for failed allocations diff --git a/src/share/vm/gc/shared/allocTracer.cpp b/src/share/vm/gc/shared/allocTracer.cpp --- a/src/share/vm/gc/shared/allocTracer.cpp +++ b/src/share/vm/gc/shared/allocTracer.cpp @@ -46,3 +46,12 @@ event.commit(); } } + +void AllocTracer::send_allocation_requiring_gc_event(size_t size, uint gcId) { + EventAllocationRequiringGC event; + if (event.should_commit()) { + event.set_gcId(gcId); + event.set_size(size); + event.commit(); + } +} diff --git a/src/share/vm/gc/shared/allocTracer.hpp b/src/share/vm/gc/shared/allocTracer.hpp --- a/src/share/vm/gc/shared/allocTracer.hpp +++ b/src/share/vm/gc/shared/allocTracer.hpp @@ -32,6 +32,7 @@ public: static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size); static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size); + static void send_allocation_requiring_gc_event(size_t size, uint gcId); }; #endif /* SHARE_VM_GC_SHARED_ALLOCTRACER_HPP */ diff --git a/src/share/vm/gc/shared/gcId.cpp b/src/share/vm/gc/shared/gcId.cpp --- a/src/share/vm/gc/shared/gcId.cpp +++ b/src/share/vm/gc/shared/gcId.cpp @@ -38,6 +38,10 @@ return _next_id++; } +const uint GCId::peek() { + return _next_id; +} + const uint GCId::current() { assert(currentNamedthread()->gc_id() != undefined(), "Using undefined GC id."); return current_raw(); diff --git a/src/share/vm/gc/shared/gcId.hpp b/src/share/vm/gc/shared/gcId.hpp --- a/src/share/vm/gc/shared/gcId.hpp +++ b/src/share/vm/gc/shared/gcId.hpp @@ -39,6 +39,8 @@ static const uint current(); // Same as current() but can return undefined() if no GC id is currently active static const uint current_raw(); + // Returns the next expected GCId. + static const uint peek(); static const uint undefined() { return UNDEFINED; } }; diff --git a/src/share/vm/gc/shared/vmGCOperations.cpp b/src/share/vm/gc/shared/vmGCOperations.cpp --- a/src/share/vm/gc/shared/vmGCOperations.cpp +++ b/src/share/vm/gc/shared/vmGCOperations.cpp @@ -25,6 +25,8 @@ #include "precompiled.hpp" #include "classfile/classLoader.hpp" #include "classfile/javaClasses.hpp" +#include "gc/shared/allocTracer.hpp" +#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/vmGCOperations.hpp" @@ -187,6 +189,18 @@ gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation); } +VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, + size_t size, + Metaspace::MetadataType mdtype, + uint gc_count_before, + uint full_gc_count_before, + GCCause::Cause gc_cause) + : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), + _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { + assert(_size != 0, "An allocation should always be requested with this operation."); + AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek()); +} + // Returns true iff concurrent GCs unloads metadata. bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { #if INCLUDE_ALL_GCS @@ -291,3 +305,11 @@ set_gc_locked(); } } + +VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) + : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) { + // Only report if operation was really caused by an allocation. + if (_word_size != 0) { + AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek()); + } +} diff --git a/src/share/vm/gc/shared/vmGCOperations.hpp b/src/share/vm/gc/shared/vmGCOperations.hpp --- a/src/share/vm/gc/shared/vmGCOperations.hpp +++ b/src/share/vm/gc/shared/vmGCOperations.hpp @@ -166,8 +166,7 @@ HeapWord* _result; // Allocation result (NULL if allocation failed) public: - VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) - : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {} + VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause); HeapWord* result() const { return _result; @@ -220,10 +219,7 @@ Metaspace::MetadataType mdtype, uint gc_count_before, uint full_gc_count_before, - GCCause::Cause gc_cause) - : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), - _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { - } + GCCause::Cause gc_cause); virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } virtual void doit(); diff --git a/src/share/vm/trace/trace.xml b/src/share/vm/trace/trace.xml --- a/src/share/vm/trace/trace.xml +++ b/src/share/vm/trace/trace.xml @@ -1,6 +1,6 @@ gc_id() != undefined(), "Using undefined GC id."); return current_raw(); diff --git a/src/share/vm/gc/shared/gcId.hpp b/src/share/vm/gc/shared/gcId.hpp --- a/src/share/vm/gc/shared/gcId.hpp +++ b/src/share/vm/gc/shared/gcId.hpp @@ -39,8 +39,6 @@ static const uint current(); // Same as current() but can return undefined() if no GC id is currently active static const uint current_raw(); - // Returns the next expected GCId. - static const uint peek(); static const uint undefined() { return UNDEFINED; } }; diff --git a/src/share/vm/gc/shared/vmGCOperations.cpp b/src/share/vm/gc/shared/vmGCOperations.cpp --- a/src/share/vm/gc/shared/vmGCOperations.cpp +++ b/src/share/vm/gc/shared/vmGCOperations.cpp @@ -26,7 +26,6 @@ #include "classfile/classLoader.hpp" #include "classfile/javaClasses.hpp" #include "gc/shared/allocTracer.hpp" -#include "gc/shared/gcId.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/vmGCOperations.hpp" @@ -198,7 +197,7 @@ : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { assert(_size != 0, "An allocation should always be requested with this operation."); - AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek()); + AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize); } // Returns true iff concurrent GCs unloads metadata. @@ -310,6 +309,6 @@ : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) { // Only report if operation was really caused by an allocation. if (_word_size != 0) { - AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek()); + AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize); } } diff --git a/src/share/vm/trace/trace.xml b/src/share/vm/trace/trace.xml --- a/src/share/vm/trace/trace.xml +++ b/src/share/vm/trace/trace.xml @@ -457,7 +457,6 @@ -