# HG changeset patch # User mlarsson # Date 1425304253 -3600 # Mon Mar 02 14:50:53 2015 +0100 # Node ID 55f38d4ba31d8eee96805008657ce0b0fadaa4a7 # Parent 2ad148b553072386e8aaaadc5d27161e3c9ea6cd imported patch event1 * * * imported patch event2 diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -756,7 +756,8 @@ uint dummy_gc_count_before; uint dummy_gclocker_retry_count = 0; - return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); + uint gc_attempts = 1; + return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count, &gc_attempts); } HeapWord* @@ -765,21 +766,21 @@ assert_heap_not_locked_and_not_at_safepoint(); // Loop until the allocation is satisfied, or unsatisfied after GC. - for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { + for (uint try_count = 1, gclocker_retry_count = 0, gc_attempt = 1; /* we'll return */; try_count += 1) { uint gc_count_before; HeapWord* result = NULL; if (!is_humongous(word_size)) { - result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); + result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count, &gc_attempt); } else { - result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count); + result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count, &gc_attempt); } if (result != NULL) { return result; } // Create the garbage collection operation... - VM_G1CollectForAllocation op(gc_count_before, word_size); + VM_G1CollectForAllocation op(gc_count_before, word_size, gc_attempt++); op.set_allocation_context(AllocationContext::current()); // ...and get the VM thread to execute it. @@ -819,7 +820,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, AllocationContext_t context, uint* gc_count_before_ret, - uint* gclocker_retry_count_ret) { + uint* gclocker_retry_count_ret, + uint* gc_attempt) { // Make sure you read the note in attempt_allocation_humongous(). assert_heap_not_locked_and_not_at_safepoint(); @@ -880,7 +882,8 @@ if (should_try_gc) { bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, - GCCause::_g1_inc_collection_pause); + GCCause::_g1_inc_collection_pause, *gc_attempt); + *gc_attempt += 1; if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; @@ -935,7 +938,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, - uint* gclocker_retry_count_ret) { + uint* gclocker_retry_count_ret, + uint* gc_attempt) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if @@ -1006,7 +1010,8 @@ bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, - GCCause::_g1_humongous_allocation); + GCCause::_g1_humongous_allocation, *gc_attempt); + *gc_attempt += 1; if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; @@ -3401,14 +3406,16 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, uint gc_count_before, bool* succeeded, - GCCause::Cause gc_cause) { + GCCause::Cause gc_cause, + uint gc_attempt) { assert_heap_not_locked_and_not_at_safepoint(); g1_policy()->record_stop_world_start(); VM_G1IncCollectionPause op(gc_count_before, word_size, false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms(), - gc_cause); + gc_cause, + gc_attempt); op.set_allocation_context(AllocationContext::current()); VMThread::execute(&op); diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -499,7 +499,8 @@ // should only be used for non-humongous allocations. inline HeapWord* attempt_allocation(size_t word_size, uint* gc_count_before_ret, - uint* gclocker_retry_count_ret); + uint* gclocker_retry_count_ret, + uint* gc_attempt); // Second-level mutator allocation attempt: take the Heap_lock and // retry the allocation attempt, potentially scheduling a GC @@ -507,13 +508,15 @@ HeapWord* attempt_allocation_slow(size_t word_size, AllocationContext_t context, uint* gc_count_before_ret, - uint* gclocker_retry_count_ret); + uint* gclocker_retry_count_ret, + uint* gc_attempt); // Takes the Heap_lock and attempts a humongous allocation. It can // potentially schedule a GC pause. HeapWord* attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, - uint* gclocker_retry_count_ret); + uint* gclocker_retry_count_ret, + uint* gc_attempt); // Allocation attempt that should be called during safepoints (e.g., // at the end of a successful GC). expect_null_mutator_alloc_region @@ -746,7 +749,8 @@ HeapWord* do_collection_pause(size_t word_size, uint gc_count_before, bool* succeeded, - GCCause::Cause gc_cause); + GCCause::Cause gc_cause, + uint gc_attempt); // The guts of the incremental collection pause, executed by the vm // thread. It returns false if it is unable to do the collection due diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp @@ -132,7 +132,8 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, uint* gc_count_before_ret, - uint* gclocker_retry_count_ret) { + uint* gclocker_retry_count_ret, + uint* gc_attempt) { assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); @@ -144,7 +145,8 @@ result = attempt_allocation_slow(word_size, context, gc_count_before_ret, - gclocker_retry_count_ret); + gclocker_retry_count_ret, + gc_attempt); } assert_heap_not_locked(); if (result != NULL) { diff --git a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp @@ -35,9 +35,11 @@ #include "runtime/interfaceSupport.hpp" VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before, - size_t word_size) + size_t word_size, + uint gc_attempt) : VM_G1OperationWithAllocRequest(gc_count_before, word_size, - GCCause::_allocation_failure) { + GCCause::_allocation_failure, + gc_attempt) { guarantee(word_size != 0, "An allocation should always be requested with this operation."); } @@ -60,8 +62,9 @@ size_t word_size, bool should_initiate_conc_mark, double target_pause_time_ms, - GCCause::Cause gc_cause) - : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause), + GCCause::Cause gc_cause, + uint gc_attempt) + : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause, gc_attempt), _should_initiate_conc_mark(should_initiate_conc_mark), _target_pause_time_ms(target_pause_time_ms), _should_retry_gc(false), diff --git a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.hpp @@ -44,8 +44,9 @@ public: VM_G1OperationWithAllocRequest(uint gc_count_before, size_t word_size, - GCCause::Cause gc_cause) - : VM_CollectForAllocation(word_size, gc_count_before, gc_cause), + GCCause::Cause gc_cause, + uint gc_attempt) + : VM_CollectForAllocation(word_size, gc_count_before, gc_cause, gc_attempt), _pause_succeeded(false) {} bool pause_succeeded() { return _pause_succeeded; } void set_allocation_context(AllocationContext_t context) { _allocation_context = context; } @@ -68,7 +69,8 @@ class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest { public: VM_G1CollectForAllocation(uint gc_count_before, - size_t word_size); + size_t word_size, + uint gc_attempt); virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; } virtual void doit(); virtual const char* name() const { @@ -87,7 +89,8 @@ size_t word_size, bool should_initiate_conc_mark, double target_pause_time_ms, - GCCause::Cause gc_cause); + GCCause::Cause gc_cause, + uint gc_attempt = 0); virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; } virtual bool doit_prologue(); virtual void doit(); diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -261,6 +261,7 @@ uint loop_count = 0; uint gc_count = 0; uint gclocker_stalled_count = 0; + uint gc_attempt = 1; while (result == NULL) { // We don't want to have multiple collections for a single filled generation. @@ -319,7 +320,7 @@ if (result == NULL) { // Generate a VM operation - VM_ParallelGCFailedAllocation op(size, gc_count); + VM_ParallelGCFailedAllocation op(size, gc_count, gc_attempt++); VMThread::execute(&op); // Did the VM operation execute? If so, return the result directly. diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp @@ -33,8 +33,9 @@ // The following methods are used by the parallel scavenge collector VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size, - uint gc_count) : - VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure) { + uint gc_count, + uint gc_attempt) : + VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure, gc_attempt) { assert(word_size != 0, "An allocation should always be requested with this operation."); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.hpp @@ -31,7 +31,7 @@ class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation { public: - VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count); + VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count, uint gc_attempt); virtual VMOp_Type type() const { return VMOp_ParallelGCFailedAllocation; diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp @@ -200,6 +200,24 @@ gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level); } +void VM_CollectForMetadataAllocation::doit_epilogue() { + AllocTracer::send_collect_for_allocation_event(_size * HeapWordSize, _gcid, _gc_attempt); + VM_GC_Operation::doit_epilogue(); +} + +VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, + size_t size, + Metaspace::MetadataType mdtype, + uint gc_count_before, + uint full_gc_count_before, + GCCause::Cause gc_cause, + uint gc_attempt) + : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), + _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL), _gc_attempt(gc_attempt), _gcid(GCId::peek()) { + assert(_size != 0, "An allocation should always be requested with this operation."); + AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, _gcid, _gc_attempt); +} + // Returns true iff concurrent GCs unloads metadata. bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() { #if INCLUDE_ALL_GCS @@ -304,3 +322,19 @@ set_gc_locked(); } } + +VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause, uint gc_attempt) + : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size), _gc_attempt(gc_attempt), _gcid(GCId::peek()) { + // Only report if operation was really caused by an allocation. + if (_word_size != 0) { + AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, _gcid, _gc_attempt); + } +} + +void VM_CollectForAllocation::doit_epilogue() { + // Only report if operation was caused by an allocation. + if (_word_size != 0) { + AllocTracer::send_collect_for_allocation_event(_word_size * HeapWordSize, _gcid, _gc_attempt); + } + VM_GC_Operation::doit_epilogue(); +} diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp @@ -26,6 +26,7 @@ #define SHARE_VM_GC_IMPLEMENTATION_SHARED_VMGCOPERATIONS_HPP #include "gc_interface/collectedHeap.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "memory/heapInspection.hpp" #include "runtime/handles.hpp" #include "runtime/jniHandles.hpp" @@ -162,12 +163,17 @@ class VM_CollectForAllocation : public VM_GC_Operation { protected: - size_t _word_size; // Size of object to be allocated (in number of words) - HeapWord* _result; // Allocation result (NULL if allocation failed) + size_t _word_size; // Size of object to be allocated (in number of words) + HeapWord* _result; // Allocation result (NULL if allocation failed) + uint _gc_attempt; // Collection attempt for this allocation. + GCId _gcid; // Predicted GCId for this operation. public: - VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause) - : VM_GC_Operation(gc_count_before, cause), _result(NULL), _word_size(word_size) {} + VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause, uint gc_attempt); + + // The epilogue is run by the requesting thread after and if the collection happened. + // It is extended here to trace collections performed due to failed allocations. + void doit_epilogue(); HeapWord* result() const { return _result; @@ -180,8 +186,9 @@ public: VM_GenCollectForAllocation(size_t word_size, bool tlab, - uint gc_count_before) - : VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure), + uint gc_count_before, + uint gc_attempt) + : VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure, gc_attempt), _tlab(tlab) { assert(word_size != 0, "An allocation should always be requested with this operation."); } @@ -210,20 +217,21 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation { private: MetaWord* _result; - size_t _size; // size of object to be allocated + size_t _size; // size of object to be allocated Metaspace::MetadataType _mdtype; ClassLoaderData* _loader_data; + uint _gc_attempt; // Collection attempt for this allocation. + GCId _gcid; // Predicted GCId for this operation. public: VM_CollectForMetadataAllocation(ClassLoaderData* loader_data, size_t size, Metaspace::MetadataType mdtype, uint gc_count_before, uint full_gc_count_before, - GCCause::Cause gc_cause) - : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true), - _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) { - } + GCCause::Cause gc_cause, + uint gc_attempt); virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; } virtual void doit(); + void doit_epilogue(); MetaWord* result() const { return _result; } bool initiate_concurrent_GC(); diff --git a/src/share/vm/gc_interface/allocTracer.cpp b/src/share/vm/gc_interface/allocTracer.cpp --- a/src/share/vm/gc_interface/allocTracer.cpp +++ b/src/share/vm/gc_interface/allocTracer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "gc_implementation/shared/gcId.hpp" #include "gc_interface/allocTracer.hpp" #include "trace/tracing.hpp" #include "runtime/handles.hpp" @@ -46,3 +47,23 @@ event.commit(); } } + +void AllocTracer::send_allocation_requiring_gc_event(size_t size, const GCId& gcid, uint gc_attempt) { + EventAllocationRequiringGC event; + if (event.should_commit()) { + event.set_gcId(gcid.id()); + event.set_size(size); + event.set_gcAttempt(gc_attempt); + event.commit(); + } +} + +void AllocTracer::send_collect_for_allocation_event(size_t size, const GCId& gcid, uint gc_attempt) { + EventCollectForAllocation ev; + if (ev.should_commit()) { + ev.set_gcId(gcid.id()); + ev.set_size(size); + ev.set_gcAttempt(gc_attempt); + ev.commit(); + } +} diff --git a/src/share/vm/gc_interface/allocTracer.hpp b/src/share/vm/gc_interface/allocTracer.hpp --- a/src/share/vm/gc_interface/allocTracer.hpp +++ b/src/share/vm/gc_interface/allocTracer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,6 +32,8 @@ public: static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size); static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size); + static void send_collect_for_allocation_event(size_t size, const GCId& gcid, uint gc_attempt); + static void send_allocation_requiring_gc_event(size_t size, const GCId& gcid, uint gc_attempt); }; #endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */ diff --git a/src/share/vm/memory/collectorPolicy.cpp b/src/share/vm/memory/collectorPolicy.cpp --- a/src/share/vm/memory/collectorPolicy.cpp +++ b/src/share/vm/memory/collectorPolicy.cpp @@ -597,7 +597,7 @@ HeapWord* result = NULL; // Loop until the allocation is satisfied, or unsatisfied after GC. - for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { + for (uint try_count = 1, gclocker_stalled_count = 0, gc_attempt = 1; /* return or throw */; try_count += 1) { HandleMark hm; // Discard any handles allocated in each iteration. // First allocation attempt is lock-free. @@ -671,7 +671,7 @@ gc_count_before = Universe::heap()->total_collections(); } - VM_GenCollectForAllocation op(size, is_tlab, gc_count_before); + VM_GenCollectForAllocation op(size, is_tlab, gc_count_before, gc_attempt++); VMThread::execute(&op); if (op.prologue_succeeded()) { result = op.result(); @@ -816,6 +816,7 @@ uint loop_count = 0; uint gc_count = 0; uint full_gc_count = 0; + uint gc_attempt = 1; assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock"); @@ -865,7 +866,8 @@ mdtype, gc_count, full_gc_count, - GCCause::_metadata_GC_threshold); + GCCause::_metadata_GC_threshold, + gc_attempt++); VMThread::execute(&op); // If GC was locked out, try again. Check before checking success because the diff --git a/src/share/vm/trace/trace.xml b/src/share/vm/trace/trace.xml --- a/src/share/vm/trace/trace.xml +++ b/src/share/vm/trace/trace.xml @@ -1,6 +1,6 @@