# HG changeset patch # User Jean Christophe Beyler # Date 1506111844 25200 # Fri Sep 22 13:24:04 2017 -0700 # Node ID 7d88c86b55de405655cfaa65ab7b1724c56758da # Parent c8ac05bbe47771b3dafa2e7fc9a95d86d68d7c07 [mq]: heapz8 diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk --- a/make/test/JtregNativeHotspot.gmk +++ b/make/test/JtregNativeHotspot.gmk @@ -63,6 +63,7 @@ $(TOPDIR)/test/hotspot/jtreg/compiler/calls \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetNamedModule \ + $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/IsModifiableModule \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/AddModuleReads \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/AddModuleExportsAndOpens \ diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -77,6 +77,7 @@ #include "oops/oop.inline.hpp" #include "prims/resolvedMethodTable.hpp" #include "runtime/atomic.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/init.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" @@ -4138,6 +4139,13 @@ g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } +void G1CollectedHeap::process_heap_monitoring() { + log_develop_trace(gc, ref)("HeapSampling [other] : heap monitoring processing"); + G1STWIsAliveClosure is_alive(this); + G1KeepAliveClosure keep_alive(this); + HeapMonitoring::weak_oops_do(&is_alive, &keep_alive); +} + void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) { // Any reference objects, in the collection set, that were 'discovered' // by the CM ref processor should have already been copied (either by @@ -4369,6 +4377,7 @@ } else { ref_processor_stw()->verify_no_references_recorded(); process_weak_jni_handles(); + process_heap_monitoring(); } if (G1StringDedup::is_enabled()) { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -304,6 +304,7 @@ void trace_heap(GCWhen::Type when, const GCTracer* tracer); + void process_heap_monitoring(); void process_weak_jni_handles(); // These are macros so that, if the assert fires, we get the correct diff --git a/src/hotspot/share/gc/g1/g1MarkSweep.cpp b/src/hotspot/share/gc/g1/g1MarkSweep.cpp --- a/src/hotspot/share/gc/g1/g1MarkSweep.cpp +++ b/src/hotspot/share/gc/g1/g1MarkSweep.cpp @@ -48,6 +48,7 @@ #include "prims/jvmtiExport.hpp" #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" @@ -273,6 +274,7 @@ // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); + HeapMonitoring::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); if (G1StringDedup::is_enabled()) { G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); diff --git a/src/hotspot/share/gc/parallel/psMarkSweep.cpp b/src/hotspot/share/gc/parallel/psMarkSweep.cpp --- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp +++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp @@ -50,6 +50,7 @@ #include "logging/log.hpp" #include "oops/oop.inline.hpp" #include "runtime/biasedLocking.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -614,6 +615,7 @@ // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles JNIHandles::weak_oops_do(adjust_pointer_closure()); + HeapMonitoring::weak_oops_do(adjust_pointer_closure()); CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -60,6 +60,7 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -2176,6 +2177,7 @@ // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles JNIHandles::weak_oops_do(&oop_closure); + HeapMonitoring::weak_oops_do(&oop_closure); CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -38,6 +38,7 @@ #include "memory/resourceArea.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/init.hpp" #include "runtime/thread.inline.hpp" #include "services/heapDumper.hpp" @@ -296,7 +297,43 @@ } #endif +HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) { + // We can come here for three reasons: + // - We either really did fill the tlab. + // - We pretended to everyone we did and we want to sample. + // - Both of the above reasons are true at the same time. + if (HeapMonitoring::enabled()) { + if (thread->tlab().should_sample()) { + // If we don't have an object yet, try to allocate it. + if (obj == NULL) { + // The tlab could still have space after this sample. + thread->tlab().set_back_actual_end(); + obj = thread->tlab().allocate(size); + } + + // Is the object allocated now? + // If not, this means we have to wait till a new TLAB, let the subsequent + // call to handle_heap_sampling pick the next sample. + if (obj != NULL) { + // Object is allocated, sample it now. + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast(obj), + size); + // Pick a next sample in this case, we allocated right. + thread->tlab().pick_next_sample(); + } + } + } + + return obj; +} + HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { + HeapWord* obj = handle_heap_sampling(thread, NULL, size); + + if (obj != NULL) { + return obj; + } // Retain tlab and allocate object in shared space if // the amount free in the tlab is too large to discard. @@ -316,7 +353,7 @@ } // Allocate a new TLAB... - HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); + obj = Universe::heap()->allocate_new_tlab(new_tlab_size); if (obj == NULL) { return NULL; } @@ -337,6 +374,7 @@ #endif // ASSERT } thread->tlab().fill(obj, obj + size, new_tlab_size); + handle_heap_sampling(thread, obj, size); return obj; } diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -144,6 +144,9 @@ inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size); + // Handle if needed heap sampling. + static HeapWord* handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size); + // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS); diff --git a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp @@ -158,6 +158,7 @@ AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); + THREAD->tlab().handle_sample(THREAD, result, size); return result; } diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -49,6 +49,7 @@ #include "runtime/biasedLocking.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/java.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -722,6 +723,7 @@ void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { JNIHandles::weak_oops_do(root_closure); + HeapMonitoring::weak_oops_do(root_closure); _young_gen->ref_processor()->weak_oops_do(root_closure); _old_gen->ref_processor()->weak_oops_do(root_closure); } diff --git a/src/hotspot/share/gc/shared/referenceProcessor.cpp b/src/hotspot/share/gc/shared/referenceProcessor.cpp --- a/src/hotspot/share/gc/shared/referenceProcessor.cpp +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp @@ -35,6 +35,7 @@ #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/java.hpp" #include "runtime/jniHandles.hpp" @@ -258,9 +259,17 @@ process_phaseJNI(is_alive, keep_alive, complete_gc); } + // Heap Monitoring references + size_t handled; + { + GCTraceTime(Debug, gc, ref) tt("Heap Monitoring Weak Reference", gc_timer); + handled = process_phaseHeapSampling(is_alive, keep_alive, complete_gc, task_executor); + } + phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs()); + log_develop_trace(gc, ref)("Heap Sampler Weak Reference handled: " SIZE_FORMAT, handled); return stats; } @@ -290,6 +299,22 @@ complete_gc->do_void(); } +size_t ReferenceProcessor::process_phaseHeapSampling( + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor) { + size_t count = 0; + if (HeapMonitoring::enabled()) { + if (task_executor != NULL) { + task_executor->set_single_threaded_mode(); + } + count = HeapMonitoring::weak_oops_do(is_alive, keep_alive); + complete_gc->do_void(); + } + return count; +} + void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor, ReferenceProcessorPhaseTimes* phase_times) { // Enqueue references that are not made active again, and diff --git a/src/hotspot/share/gc/shared/referenceProcessor.hpp b/src/hotspot/share/gc/shared/referenceProcessor.hpp --- a/src/hotspot/share/gc/shared/referenceProcessor.hpp +++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp @@ -250,6 +250,11 @@ OopClosure* keep_alive, VoidClosure* complete_gc); + size_t process_phaseHeapSampling(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor); + // Work methods used by the method process_discovered_reflist // Phase1: keep alive all those referents that are otherwise // dead but which must be kept alive by policy (and their closure). diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -29,6 +29,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" @@ -121,10 +122,13 @@ set_top(NULL); set_pf_top(NULL); set_end(NULL); + set_actual_end(NULL); + set_slow_path_end(NULL); } } assert(!(retire || ZeroTLAB) || - (start() == NULL && end() == NULL && top() == NULL), + (start() == NULL && end() == NULL && top() == NULL && + actual_end() == NULL && slow_path_end() == NULL), "TLAB must be reset"); } @@ -183,7 +187,10 @@ set_top(top); set_pf_top(top); set_end(end); + set_actual_end(end); + set_slow_path_end(end); invariants(); + _bytes_until_sample = 0; } void ThreadLocalAllocBuffer::initialize() { @@ -306,13 +313,82 @@ guarantee(p == top(), "end of last object must match end of space"); } +void ThreadLocalAllocBuffer::pick_next_sample() { + if (!HeapMonitoring::enabled()) { + return; + } + + if (bytes_until_sample() == 0) { + HeapMonitoring::pick_next_sample(bytes_until_sample_addr()); + } + + // Finally, fix up the sampling bytes left and _end. + size_t heap_words_remaining = _end - _top; + size_t bytes_left = bytes_until_sample(); + size_t words_until_sample = bytes_left / HeapWordSize; + + if (heap_words_remaining > words_until_sample) { + HeapWord* new_end = _top + words_until_sample; + set_end(new_end); + set_slow_path_end(new_end); + set_bytes_until_sample(0); + } else { + bytes_left -= heap_words_remaining * HeapWordSize; + set_bytes_until_sample(bytes_left); + } + + log_trace(gc, tlab)("TLAB picked next sample: thread: " INTPTR_FORMAT " [id: %2d]" + " start: %p top: %p end: %p actual_end: %p slow_path_end: %p", + p2i(myThread()), myThread()->osthread()->thread_id(), + start(), top(), end(), + actual_end(), slow_path_end()); +} + Thread* ThreadLocalAllocBuffer::myThread() { return (Thread*)(((char *)this) + in_bytes(start_offset()) - in_bytes(Thread::tlab_start_offset())); } +void ThreadLocalAllocBuffer::set_back_actual_end() { + // Did a fast TLAB refill occur? + if (_slow_path_end != _end) { + // Fix up the actual end to be now the end of this TLAB. + _slow_path_end = _end; + _actual_end = _end; + } else { + _end = _actual_end; + } +} +void ThreadLocalAllocBuffer::handle_sample(Thread* thread, HeapWord* result, + size_t size) { + if (!HeapMonitoring::enabled()) { + return; + } + + set_bytes_until_sample(bytes_until_sample() - size); + + // Should we sample now? + set_back_actual_end(); + if (should_sample()) { + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast(result), + size); + } + pick_next_sample(); +} + +HeapWord* ThreadLocalAllocBuffer::hard_end() { + // Did a fast TLAB refill occur? + if (_slow_path_end != _end) { + // Fix up the actual end to be now the end of this TLAB. + _slow_path_end = _end; + _actual_end = _end; + } + + return _actual_end + alignment_reserve(); +} GlobalTLABStats::GlobalTLABStats() : _allocating_threads_avg(TLABAllocationWeight) { diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp @@ -37,6 +37,13 @@ // It is thread-private at any time, but maybe multiplexed over // time across multiple threads. The park()/unpark() pair is // used to make it available for such multiplexing. +// +// Heap sampling is performed via the end/actual_end fields. +// actual_end contains the real end of the tlab allocation, +// whereas end can be set to an arbitrary spot in the tlab to +// trip the return and sample the allocation. +// slow_path_end is used to track if a fast tlab refill occured +// between slowpath calls. class ThreadLocalAllocBuffer: public CHeapObj { friend class VMStructs; friend class JVMCIVMStructs; @@ -44,10 +51,15 @@ HeapWord* _start; // address of TLAB HeapWord* _top; // address after last allocation HeapWord* _pf_top; // allocation prefetch watermark - HeapWord* _end; // allocation end (excluding alignment_reserve) + HeapWord* _end; // allocation end (can be the sampling end point or + // the actual TLAB end, excluding alignment_reserve) + HeapWord* _actual_end; // allocation actual_end (actual TLAB end, excluding alignment_reserve) + HeapWord* _slow_path_end; // remember the end in case a fast refill occurs. + size_t _desired_size; // desired size (including alignment_reserve) size_t _refill_waste_limit; // hold onto tlab if free() is larger than this size_t _allocated_before_last_gc; // total bytes allocated up until the last gc + size_t _bytes_until_sample; // bytes until sample. static size_t _max_size; // maximum size of any TLAB static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB @@ -66,17 +78,20 @@ void set_start(HeapWord* start) { _start = start; } void set_end(HeapWord* end) { _end = end; } + void set_actual_end(HeapWord* actual_end) { _actual_end = actual_end; } + void set_slow_path_end(HeapWord* slow_path_end) { _slow_path_end = slow_path_end; } void set_top(HeapWord* top) { _top = top; } void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; } void set_desired_size(size_t desired_size) { _desired_size = desired_size; } void set_refill_waste_limit(size_t waste) { _refill_waste_limit = waste; } + void set_bytes_until_sample(size_t bytes) { _bytes_until_sample = bytes; } size_t initial_refill_waste_limit() { return desired_size() / TLABRefillWasteFraction; } static int target_refills() { return _target_refills; } size_t initial_desired_size(); - size_t remaining() const { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } + size_t remaining() { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } // Make parsable and release it. void reset(); @@ -115,7 +130,10 @@ HeapWord* start() const { return _start; } HeapWord* end() const { return _end; } - HeapWord* hard_end() const { return _end + alignment_reserve(); } + HeapWord* slow_path_end() const { return _slow_path_end; } + HeapWord* actual_end() const { return _actual_end; } + + HeapWord* hard_end(); HeapWord* top() const { return _top; } HeapWord* pf_top() const { return _pf_top; } size_t desired_size() const { return _desired_size; } @@ -162,11 +180,19 @@ void fill(HeapWord* start, HeapWord* top, size_t new_size); void initialize(); + void pick_next_sample(); + void set_back_actual_end(); + void handle_sample(Thread* thread, HeapWord* result, size_t size); + size_t bytes_until_sample() { return _bytes_until_sample; } + size_t *bytes_until_sample_addr() { return &_bytes_until_sample; } + bool should_sample() { return bytes_until_sample() == 0; } + static size_t refill_waste_limit_increment() { return TLABWasteIncrement; } // Code generation support static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); } static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); } + static ByteSize actual_end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _actual_end ); } static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); } static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); } static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); } diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -10351,6 +10351,13 @@ See . + + + + Can sample the heap. + If this capability is enabled then the heap sampling methods can be called. + + @@ -11529,6 +11536,269 @@ + + + + + jvmtiFrameInfo + + Pointer to the call frames. + + + + The number of frames for the trace. + + + + The size of the object allocation. + + + + The thread id number. + + + + + + + jvmtiStackTrace + + + The array with the various stack traces. + + + + + + + Number of traces pointed by the array . + + + + + + + + + The number of sampled allocations during the lifetime of the sampler. + For very long sampling, this number can overflow. + + + + + + + The number of samples already garbage collected. + For very long sampling, this number can overflow. + + + + + + + Accumulation of the sample rates chosen. + For very long sampling, this number can overflow. + + + + + + + The number of sample rates chosen. + For very long sampling, this number can overflow. + + + + + + + Accumulation of stack depths collected by the sampler. + For very long sampling, this number can overflow. + + + + + + Start Heap Sampling + + Start the heap sampler in the JVM. The function provides, via its argument, the sampling + rate requested and will fill internal data structures with heap allocation samples. The + samples are obtained via the , + , , + functions. + + Starting the heap sampler resets internal traces and counters. Therefore stopping the sampler + puts internal trace samples and counters on pause for post-processing. + + new + + + + + + + + The monitoring rate used for sampling. The sampler will use a statistical approach to + provide in average sampling every allocated bytes. + + + + + + The maximum storage used for the sampler. By default, the value is 200. + + + + + + is less than zero. + + + + + + Stop Heap Sampling + + Stop the heap sampler in the JVM. + Any sample obtained during sampling is still available via the , + , , + functions. + + Starting the heap sampler resets internal traces and counters. Therefore stopping the sampler + puts internal trace samples and counters on pause for post-processing. + + new + + + + + + + + + + + Get Live Traces + + Get Live Heap Sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns no traces. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Get Garbage Traces + + Get the recent garbage heap sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns no traces. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Get Frequent Garbage Traces + + Get the frequent garbage heap sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns no traces. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Release traces provided by the heap monitoring + + Release traces provided by any of the trace retrieval methods. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be released. + + + + + + + + + Get the heap sampling statistics + + Returns a to understand the heap sampling behavior and current + internal data storage status. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns a zeroed-out structure. + + new + + + + + + jvmtiHeapSamplingStats + + The structure to be filled with the heap sampler's statistics. + + + + + + + + diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -46,6 +46,7 @@ #include "prims/jvmtiCodeBlobEvents.hpp" #include "prims/jvmtiExtensions.hpp" #include "prims/jvmtiGetLoadedClasses.hpp" +#include "prims/jvmtiHeapTransition.hpp" #include "prims/jvmtiImpl.hpp" #include "prims/jvmtiManageCapabilities.hpp" #include "prims/jvmtiRawMonitor.hpp" @@ -55,6 +56,7 @@ #include "prims/jvmtiUtil.hpp" #include "runtime/arguments.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" @@ -1947,6 +1949,81 @@ return JVMTI_ERROR_NONE; } /* end IterateOverInstancesOfClass */ +// Start the sampler. +jvmtiError +JvmtiEnv::StartHeapSampling(jint monitoring_rate, jint max_storage) { + if (monitoring_rate < 0) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapThreadTransition htt(Thread::current()); + HeapMonitoring::initialize_profiling(monitoring_rate, max_storage); + return JVMTI_ERROR_NONE; +} /* end StartHeapSampling */ + +// Stop the sampler. +jvmtiError +JvmtiEnv::StopHeapSampling() { + HeapThreadTransition htt(Thread::current()); + HeapMonitoring::stop_profiling(); + return JVMTI_ERROR_NONE; +} /* end StopHeapSampling */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetLiveTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_live_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetLiveTraces */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetGarbageTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_garbage_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetGarbageTraces */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetFrequentGarbageTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_frequent_garbage_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetFrequentGarbageTraces */ + +// Release sampled traces. +jvmtiError +JvmtiEnv::ReleaseTraces(jvmtiStackTraces* stack_traces) { + if (stack_traces == NULL) { + return JVMTI_ERROR_NONE; + } + HeapMonitoring::release_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end ReleaseTraces */ + +// Get the heap sampling statistics. +jvmtiError +JvmtiEnv::GetHeapSamplingStats(jvmtiHeapSamplingStats* stats) { + if (stats == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + HeapMonitoring::get_sampling_statistics(stats); + return JVMTI_ERROR_NONE; +} /* end GetHeapSamplingStats */ // // Local Variable functions diff --git a/src/hotspot/share/prims/jvmtiHeapTransition.hpp b/src/hotspot/share/prims/jvmtiHeapTransition.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/prims/jvmtiHeapTransition.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP +#define SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP + +// A RAII class that handles transitions from the agent into the VM. +class HeapThreadTransition : StackObj { + private: + JavaThreadState _saved_state; + JavaThread *_jthread; + + public: + // Transitions this thread from the agent (thread_in_native) to the VM. + HeapThreadTransition(Thread *thread) { + if (thread->is_Java_thread()) { + _jthread = static_cast(thread); + _saved_state = _jthread->thread_state(); + if (_saved_state == _thread_in_native) { + ThreadStateTransition::transition_from_native(_jthread, _thread_in_vm); + } else { + ThreadStateTransition::transition(_jthread, + _saved_state, + _thread_in_vm); + } + } else { + _jthread = NULL; + _saved_state = _thread_new; + } + } + + // Transitions this thread back to the agent from the VM. + ~HeapThreadTransition() { + if (_jthread != NULL) { + ThreadStateTransition::transition(_jthread, _thread_in_vm, _saved_state); + } + } +}; + +#endif // SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP diff --git a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp --- a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp +++ b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp @@ -157,6 +157,7 @@ jc.can_generate_field_modification_events = 1; jc.can_generate_field_access_events = 1; jc.can_generate_breakpoint_events = 1; + jc.can_sample_heap = 1; return jc; } @@ -423,6 +424,8 @@ log_trace(jvmti)("can_generate_frame_pop_events"); if (cap->can_generate_breakpoint_events) log_trace(jvmti)("can_generate_breakpoint_events"); + if (cap->can_sample_heap) + log_trace(jvmti)("can_sample_heap"); if (cap->can_suspend) log_trace(jvmti)("can_suspend"); if (cap->can_redefine_any_class ) diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -0,0 +1,722 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "prims/forte.hpp" +#include "runtime/heapMonitoring.hpp" + + +static const int MaxStackDepth = 64; + +// Internal data structure representing traces. +struct StackTraceData : CHeapObj { + jvmtiStackTrace *trace; + oop obj; + int references; + + StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {} + + StackTraceData() : trace(NULL), obj(NULL), references(0) {} + + // StackTraceDatas are shared around the board between various lists. So + // handle this by hand instead of having this in the destructor. There are + // cases where the struct is on the stack but holding heap data not to be + // freed. + static void free_data(StackTraceData *data) { + if (data->trace != NULL) { + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames); + FREE_C_HEAP_OBJ(data->trace); + } + delete data; + } +}; + +// Fixed size buffer for holding garbage traces. +class GarbageTracesBuffer : public CHeapObj { + public: + GarbageTracesBuffer(uint32_t size) : _size(size) { + _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*, + size, + mtInternal); + memset(_garbage_traces, 0, sizeof(StackTraceData*) * size); + } + + virtual ~GarbageTracesBuffer() { + FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces); + } + + StackTraceData** get_traces() const { + return _garbage_traces; + } + + bool store_trace(StackTraceData *trace) { + uint32_t index; + if (!select_replacement(&index)) { + return false; + } + + StackTraceData *old_data = _garbage_traces[index]; + + if (old_data != NULL) { + old_data->references--; + + if (old_data->references == 0) { + StackTraceData::free_data(old_data); + } + } + + trace->references++; + _garbage_traces[index] = trace; + return true; + } + + uint32_t size() const { + return _size; + } + + protected: + // Subclasses select the trace to replace. Returns false if no replacement + // is to happen, otherwise stores the index of the trace to replace in + // *index. + virtual bool select_replacement(uint32_t *index) = 0; + + const uint32_t _size; + + private: + // The current garbage traces. A fixed-size ring buffer. + StackTraceData **_garbage_traces; +}; + +// Keep statistical sample of traces over the lifetime of the server. +// When the buffer is full, replace a random entry with probability +// 1/samples_seen. This strategy tends towards preserving the most frequently +// occuring traces over time. +class FrequentGarbageTraces : public GarbageTracesBuffer { + public: + FrequentGarbageTraces(int size) + : GarbageTracesBuffer(size), + _garbage_traces_pos(0), + _samples_seen(0) { + } + + virtual ~FrequentGarbageTraces() { + } + + virtual bool select_replacement(uint32_t* index) { + ++_samples_seen; + + if (_garbage_traces_pos < _size) { + *index = _garbage_traces_pos++; + return true; + } + + uint64_t random_uint64 = + (static_cast(::random()) << 32) | ::random(); + + uint32_t random_index = random_uint64 % _samples_seen; + if (random_index < _size) { + *index = random_index; + return true; + } + + return false; + } + + private: + // The current position in the buffer as we initially fill it. + uint32_t _garbage_traces_pos; + + uint64_t _samples_seen; +}; + +// Store most recent garbage traces. +class MostRecentGarbageTraces : public GarbageTracesBuffer { + public: + MostRecentGarbageTraces(int size) + : GarbageTracesBuffer(size), + _garbage_traces_pos(0) { + } + + virtual ~MostRecentGarbageTraces() { + } + + virtual bool select_replacement(uint32_t* index) { + *index = _garbage_traces_pos; + + _garbage_traces_pos = + (_garbage_traces_pos + 1) % _size; + + return true; + } + + private: + // The current position in the buffer. + uint32_t _garbage_traces_pos; +}; + +// Each object that we profile is stored as trace with the thread_id. +class StackTraceStorage : public CHeapObj { + public: + // The function that gets called to add a trace to the list of + // traces we are maintaining. + void add_trace(jvmtiStackTrace *trace, oop o); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_all_stack_traces(jvmtiStackTraces *traces); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_garbage_stack_traces(jvmtiStackTraces *traces); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces); + + // Executes whenever weak references are traversed. is_alive tells + // you if the given oop is still reachable and live. + size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); + + ~StackTraceStorage(); + StackTraceStorage(); + + static StackTraceStorage* storage() { + if (internal_storage == NULL) { + internal_storage = new StackTraceStorage(); + } + return internal_storage; + } + + static void reset_stack_trace_storage() { + delete internal_storage, internal_storage = NULL; + } + + bool is_initialized() { + return _initialized; + } + + const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { + return _stats; + } + + // Static method to set the storage in place at initialization. + static void initialize_stack_trace_storage(int max_storage) { + reset_stack_trace_storage(); + StackTraceStorage *storage = StackTraceStorage::storage(); + storage->initialize_storage(max_storage); + } + + void accumulate_sample_rate(size_t rate) { + _stats.sample_rate_accumulation += rate; + _stats.sample_rate_count++; + } + + bool initialized() { return _initialized; } + volatile bool *initialized_address() { return &_initialized; } + + private: + // The traces currently sampled. + GrowableArray *_allocated_traces; + + // Recent garbage traces. + MostRecentGarbageTraces *_recent_garbage_traces; + + // Frequent garbage traces. + FrequentGarbageTraces *_frequent_garbage_traces; + + // Heap Sampling statistics. + jvmtiHeapSamplingStats _stats; + + // Maximum amount of storage provided by the JVMTI call initialize_profiling. + int _max_storage; + + static StackTraceStorage* internal_storage; + volatile bool _initialized; + + // Support functions and classes for copying data to the external + // world. + class StackTraceDataCopier { + public: + virtual int size() const = 0; + virtual const StackTraceData *get(uint32_t i) const = 0; + }; + + class LiveStackTraceDataCopier : public StackTraceDataCopier { + public: + LiveStackTraceDataCopier(GrowableArray *data) : + _data(data) {} + int size() const { return _data ? _data->length() : 0; } + const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); } + + private: + GrowableArray *_data; + }; + + class GarbageStackTraceDataCopier : public StackTraceDataCopier { + public: + GarbageStackTraceDataCopier(StackTraceData **data, int size) : + _data(data), _size(size) {} + int size() const { return _size; } + const StackTraceData *get(uint32_t i) const { return _data[i]; } + + private: + StackTraceData **_data; + int _size; + }; + + // Instance initialization. + void initialize_storage(int max_storage); + + // Copies from StackTraceData to jvmtiStackTrace. + bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); + + // Creates a deep copy of the list of StackTraceData. + void copy_stack_traces(const StackTraceDataCopier &copier, + jvmtiStackTraces *traces); + + void store_garbage_trace(const StackTraceData &trace); + + void free_garbage(); +}; + +StackTraceStorage* StackTraceStorage::internal_storage; + +// Statics for Sampler +double HeapMonitoring::_log_table[1 << FastLogNumBits]; +bool HeapMonitoring::_enabled; +AlwaysTrueClosure HeapMonitoring::_always_true; +jint HeapMonitoring::_monitoring_rate; + +// Cheap random number generator +uint64_t HeapMonitoring::_rnd; + +StackTraceStorage::StackTraceStorage() : + _allocated_traces(NULL), + _recent_garbage_traces(NULL), + _frequent_garbage_traces(NULL), + _max_storage(0), + _initialized(false) { + memset(&_stats, 0, sizeof(_stats)); +} + +void StackTraceStorage::free_garbage() { + StackTraceData **recent_garbage = NULL; + uint32_t recent_size = 0; + + StackTraceData **frequent_garbage = NULL; + uint32_t frequent_size = 0; + + if (_recent_garbage_traces != NULL) { + recent_garbage = _recent_garbage_traces->get_traces(); + recent_size = _recent_garbage_traces->size(); + } + + if (_frequent_garbage_traces != NULL) { + frequent_garbage = _frequent_garbage_traces->get_traces(); + frequent_size = _frequent_garbage_traces->size(); + } + + // Simple solution since this happens at exit. + // Go through the recent and remove any that only are referenced there. + for (uint32_t i = 0; i < recent_size; i++) { + StackTraceData *trace = recent_garbage[i]; + if (trace != NULL) { + trace->references--; + + if (trace->references == 0) { + StackTraceData::free_data(trace); + } + } + } + + // Then go through the frequent and remove those that are now only there. + for (uint32_t i = 0; i < frequent_size; i++) { + StackTraceData *trace = frequent_garbage[i]; + if (trace != NULL) { + trace->references--; + + if (trace->references == 0) { + StackTraceData::free_data(trace); + } + } + } +} + +StackTraceStorage::~StackTraceStorage() { + delete _allocated_traces; + + free_garbage(); + delete _recent_garbage_traces; + delete _frequent_garbage_traces; + _initialized = false; +} + +void StackTraceStorage::initialize_storage(int max_storage) { + // In case multiple threads got locked and then 1 by 1 got through. + if (_initialized) { + return; + } + + _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) + GrowableArray(128, true); + + _recent_garbage_traces = new MostRecentGarbageTraces(max_storage); + _frequent_garbage_traces = new FrequentGarbageTraces(max_storage); + + _max_storage = max_storage; + _initialized = true; +} + +void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { + StackTraceData new_data(trace, o); + _stats.sample_count++; + _stats.stack_depth_accumulation += trace->frame_count; + _allocated_traces->append(new_data); +} + +size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, + OopClosure *f) { + size_t count = 0; + if (is_initialized()) { + int len = _allocated_traces->length(); + + // Compact the oop traces. Moves the live oops to the beginning of the + // growable array, potentially overwriting the dead ones. + int curr_pos = 0; + for (int i = 0; i < len; i++) { + StackTraceData &trace = _allocated_traces->at(i); + oop value = trace.obj; + if ((value != NULL && Universe::heap()->is_in_reserved(value)) && + is_alive->do_object_b(value)) { + // Update the oop to point to the new object if it is still alive. + f->do_oop(&(trace.obj)); + + // Copy the old trace, if it is still live. + _allocated_traces->at_put(curr_pos++, trace); + + count++; + } else { + // If the old trace is no longer live, add it to the list of + // recently collected garbage. + store_garbage_trace(trace); + } + } + + // Zero out remaining array elements. Even though the call to trunc_to + // below truncates these values, zeroing them out is good practice. + StackTraceData zero_trace; + for (int i = curr_pos; i < len; i++) { + _allocated_traces->at_put(i, zero_trace); + } + + // Set the array's length to the number of live elements. + _allocated_traces->trunc_to(curr_pos); + } + + return count; +} + +bool StackTraceStorage::deep_copy(jvmtiStackTrace *to, + const StackTraceData *from) { + const jvmtiStackTrace *src = from->trace; + *to = *src; + + to->frames = + NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); + + if (to->frames == NULL) { + return false; + } + + memcpy(to->frames, + src->frames, + sizeof(jvmtiFrameInfo) * MaxStackDepth); + return true; +} + +// Called by the outside world; returns a copy of the stack traces +// (because we could be replacing them as the user handles them). +// The array is secretly null-terminated (to make it easier to reclaim). +void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) { + LiveStackTraceDataCopier copier(_allocated_traces); + copy_stack_traces(copier, traces); +} + +// See comment on get_all_stack_traces +void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) { + GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), + _recent_garbage_traces->size()); + copy_stack_traces(copier, traces); +} + +// See comment on get_all_stack_traces +void StackTraceStorage::get_frequent_garbage_stack_traces( + jvmtiStackTraces *traces) { + GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), + _frequent_garbage_traces->size()); + copy_stack_traces(copier, traces); +} + + +void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, + jvmtiStackTraces *traces) { + int len = copier.size(); + + // Create a new array to store the StackTraceData objects. + // + 1 for a NULL at the end. + jvmtiStackTrace *t = + NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); + if (t == NULL) { + traces->stack_traces = NULL; + traces->trace_count = 0; + return; + } + // +1 to have a NULL at the end of the array. + memset(t, 0, (len + 1) * sizeof(*t)); + + // Copy the StackTraceData objects into the new array. + int trace_count = 0; + for (int i = 0; i < len; i++) { + const StackTraceData *stack_trace = copier.get(i); + if (stack_trace != NULL) { + jvmtiStackTrace *to = &t[trace_count]; + if (!deep_copy(to, stack_trace)) { + continue; + } + trace_count++; + } + } + + traces->stack_traces = t; + traces->trace_count = trace_count; +} + +void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) { + StackTraceData *new_trace = new StackTraceData(); + *new_trace = trace; + + bool accepted = _recent_garbage_traces->store_trace(new_trace); + + // Accepted is on the right of the boolean to force the store_trace to happen. + accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted; + + if (!accepted) { + // No one wanted to use it. + delete new_trace; + } + + _stats.garbage_collected_samples++; +} + +// Delegate the initialization question to the underlying storage system. +bool HeapMonitoring::initialized() { + return StackTraceStorage::storage()->initialized(); +} + +// Delegate the initialization question to the underlying storage system. +bool *HeapMonitoring::initialized_address() { + return + const_cast(StackTraceStorage::storage()->initialized_address()); +} + +void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_all_stack_traces(traces); +} + +void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) { + const jvmtiHeapSamplingStats& internal_stats = + StackTraceStorage::storage()->get_heap_sampling_stats(); + *stats = internal_stats; +} + +void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); +} + +void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_garbage_stack_traces(traces); +} + +void HeapMonitoring::release_traces(jvmtiStackTraces *traces) { + jint trace_count = traces->trace_count; + jvmtiStackTrace *stack_traces = traces->stack_traces; + + for (jint i = 0; i < trace_count; i++) { + jvmtiStackTrace *current_trace = stack_traces + i; + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); + } + + FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); + traces->trace_count = 0; + traces->stack_traces = NULL; +} + +// Invoked by the GC to clean up old stack traces and remove old arrays +// of instrumentation that are still lying around. +size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, + OopClosure *f) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + return StackTraceStorage::storage()->weak_oops_do(is_alive, f); +} + +void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) { + // Ignore if already enabled. + if (_enabled) { + return; + } + + _monitoring_rate = monitoring_rate; + + // Initalize and reset. + StackTraceStorage::initialize_stack_trace_storage(max_storage); + + // Populate the lookup table for fast_log2. + // This approximates the log2 curve with a step function. + // Steps have height equal to log2 of the mid-point of the step. + for (int i = 0; i < (1 << FastLogNumBits); i++) { + double half_way = static_cast(i + 0.5); + _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0)); + } + + JavaThread *t = static_cast(Thread::current()); + _rnd = static_cast(reinterpret_cast(t)); + if (_rnd == 0) { + _rnd = 1; + } + _enabled = true; +} + +void HeapMonitoring::stop_profiling() { + _enabled = false; +} + +// Generates a geometric variable with the specified mean (512K by default). +// This is done by generating a random number between 0 and 1 and applying +// the inverse cumulative distribution function for an exponential. +// Specifically: Let m be the inverse of the sample rate, then +// the probability distribution function is m*exp(-mx) so the CDF is +// p = 1 - exp(-mx), so +// q = 1 - p = exp(-mx) +// log_e(q) = -mx +// -log_e(q)/m = x +// log_2(q) * (-log_e(2) * 1/m) = x +// In the code, q is actually in the range 1 to 2**26, hence the -26 below +void HeapMonitoring::pick_next_sample(size_t *ptr) { + _rnd = next_random(_rnd); + // Take the top 26 bits as the random number + // (This plus a 1<<58 sampling bound gives a max possible step of + // 5194297183973780480 bytes. In this case, + // for sample_parameter = 1<<19, max possible step is + // 9448372 bytes (24 bits). + const uint64_t prng_mod_power = 48; // Number of bits in prng + // The uint32_t cast is to prevent a (hard-to-reproduce) NAN + // under piii debug for some binaries. + double q = static_cast(_rnd >> (prng_mod_power - 26)) + 1.0; + // Put the computed p-value through the CDF of a geometric. + // For faster performance (save ~1/20th exec time), replace + // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705) + // The value 26.000705 is used rather than 26 to compensate + // for inaccuracies in FastLog2 which otherwise result in a + // negative answer. + double log_val = (fast_log2(q) - 26); + size_t rate = static_cast( + (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1); + *ptr = rate; + + StackTraceStorage::storage()->accumulate_sample_rate(rate); +} + +// Called from the interpreter and C1 +void HeapMonitoring::object_alloc_unsized(oopDesc* o) { + JavaThread *thread = static_cast(Thread::current()); + object_alloc_do_sample(thread, o, o->size() << LogHeapWordSize); +} + +void HeapMonitoring::object_alloc(oopDesc* o, intx byte_size) { + JavaThread *thread = static_cast(Thread::current()); + assert(o->size() << LogHeapWordSize == static_cast(byte_size), + "Object size is incorrect."); + object_alloc_do_sample(thread, o, byte_size); +} + +// Called directly by C2 +void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { +#if defined(X86) || defined(PPC) + JavaThread *thread = static_cast(t); + if (StackTraceStorage::storage()->is_initialized()) { + assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); + JavaThread *thread = static_cast(t); + + jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); + if (trace == NULL) { + return; + } + + jvmtiFrameInfo *frames = + NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); + + if (frames == NULL) { + FREE_C_HEAP_OBJ(trace); + return; + } + + trace->frames = frames; + trace->thread_id = SharedRuntime::get_java_tid(thread); + trace->size = byte_size; + trace->frame_count = 0; + + if (thread->has_last_Java_frame()) { // just to be safe + vframeStream vfst(thread, true); + int count = 0; + while (!vfst.at_end() && count < MaxStackDepth) { + Method* m = vfst.method(); + frames[count].location = vfst.bci(); + frames[count].method = m->jmethod_id(); + count++; + + vfst.next(); + } + trace->frame_count = count; + } + + if (trace->frame_count> 0) { + // Success! + StackTraceStorage::storage()->add_trace(trace, o); + return; + } + + // Failure! + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); + FREE_C_HEAP_OBJ(trace); + return; + } else { + // There is something like 64K worth of allocation before the VM + // initializes. This is just in the interests of not slowing down + // startup. + assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); + } +#else + Unimplemented(); +#endif +} diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP +#define SHARE_VM_RUNTIME_HEAPMONITORING_HPP + +#include "gc/shared/referenceProcessor.hpp" +#include "runtime/sharedRuntime.hpp" + +// Support class for sampling heap allocations across the VM. +class HeapMonitoring : AllStatic { + private: + // Cheap random number generator + static uint64_t _rnd; + static bool _initialized; + static jint _monitoring_rate; + static bool _enabled; + + // Statics for the fast log + static const int FastLogNumBits = 10; + static const int FastLogMask = (1 << FastLogNumBits) - 1; + static double _log_table[1<(0)) << prng_mod_power); + return (PrngMult * rnd + prng_add) & prng_mod_mask; + } + + static inline double fast_log2(const double & d) { + assert(d>0, "bad value passed to assert"); + uint64_t x = 0; + memcpy(&x, &d, sizeof(uint64_t)); + const uint32_t x_high = x >> 32; + const uint32_t y = x_high >> (20 - FastLogNumBits) & FastLogMask; + const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023; + return exponent + _log_table[y]; + } + + public: + static void pick_next_sample(size_t *ptr); + + static void get_live_traces(jvmtiStackTraces* stack_traces); + static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); + static void get_garbage_traces(jvmtiStackTraces* stack_traces); + static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); + static void release_traces(jvmtiStackTraces *trace_info); + static void initialize_profiling(jint monitoring_rate, jint max_storage); + static void stop_profiling(); + static bool initialized(); + static bool *initialized_address(); + + // Called when o is allocated, called by interpreter and C1. + static void object_alloc_unsized(oopDesc* o); + static void object_alloc(oopDesc* o, intx byte_size); + + // Called when o is allocated from C2 directly, + // we know the thread, and we have done the sampling. + static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes); + + // Called to clean up oops that have been saved by our sampling function, + // but which no longer have other references in the heap. + static size_t weak_oops_do(BoolObjectClosure* is_alive, + OopClosure *f); + static size_t weak_oops_do(OopClosure* oop_closure) { + return weak_oops_do(&_always_true, oop_closure); + } + + static bool enabled() { + return _enabled; + } +}; + +#endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -622,6 +622,7 @@ TLAB_FIELD_OFFSET(start) TLAB_FIELD_OFFSET(end) + TLAB_FIELD_OFFSET(actual_end) TLAB_FIELD_OFFSET(top) TLAB_FIELD_OFFSET(pf_top) TLAB_FIELD_OFFSET(size) // desired_size diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/Frame.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/Frame.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/Frame.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +class Frame { + Frame(String method, String signature, String fileName, int lineNumber) { + this.method = method; + this.signature = signature; + this.fileName = fileName; + this.lineNumber = lineNumber; + } + + public String method; + public String signature; + public String fileName; + public int lineNumber; +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Checks the frequent garbage storage system. + * @build Frame + * @compile HeapMonitorFrequentTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorFrequentTest + */ + +import java.io.PrintStream; + +public class HeapMonitorFrequentTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkFrequentFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void runner(int max) { + int sum = 0; + for (int j = 0; j < max; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 60); + frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 71); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 85); + + enableSampling(); + // We are testing for the recent garbage sampler: + // First run for 10000 iterations to fill up the garbage sampler. + runner(10000); + + // Now because we are in a different stack frame line here, we can just re-use the same runner. + // Run for 3, we really should not see that many of these and most should be the first type. + runner(5000); + + // Both types should exist in frequent since it was frequent enough. + int status = checkFrequentFrames(frames); + if (status == 0) { + throw new RuntimeException("Old frames no longer exist"); + } + + // Change the last frame only since the rest is identical. + frames[2].lineNumber = 89; + + status = checkFrequentFrames(frames); + if (status == 0) { + throw new RuntimeException("New frames not in the frequent sampling list"); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor API + * @build Frame + * @compile HeapMonitorNoCapabilityTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorNoCapabilityTest + */ + +import java.io.PrintStream; + +public class HeapMonitorNoCapabilityTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int allSamplingMethodsFail(); + + public static void main(String[] args) { + + int result = allSamplingMethodsFail(); + + if (result == 0) { + throw new RuntimeException("Some methods could be called without a capability."); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies if turning on/off/on the monitor wipes out the information. + * @build Frame + * @compile HeapMonitorOnOffTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorOnOffTest + */ + +import java.io.PrintStream; + +public class HeapMonitorOnOffTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + native static int checkFrames(Frame[] frames); + native static int checkWipeOut(Frame[] frames); + native static int enableSampling(); + native static int disableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorOnOffTest.java", 53); + frames[1] = new Frame("wrapper", "()V", "HeapMonitorOnOffTest.java", 64); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorOnOffTest.java", 86); + + // Enable sampling and allocate. + enableSampling(); + wrapper(); + + // Now disable and re-enable. + disableSampling(); + + // Check that the data is still there: this allows to peruse samples after profiling. + int status = checkFrames(frames); + if (status != 0) { + throw new RuntimeException("Failed to find the traces before the wipe out."); + } + + // Enabling the sampling should wipe everything out. + enableSampling(); + + status = checkWipeOut(frames); + if (status != 0) { + throw new RuntimeException("Failed to wipe out the information."); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Checks the Recent garbage storage system. + * @build Frame + * @compile HeapMonitorRecentTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorRecentTest + */ + +import java.io.PrintStream; + +public class HeapMonitorRecentTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkLiveOrRecentFrames(Frame[] frames); + native static int checkLiveAndRecentFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void runner(int max) { + int sum = 0; + for (int j = 0; j < max; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 61); + frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 72); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 86); + + enableSampling(); + // We are testing for the recent garbage sampler: + // First run for 10000 iterations to fill up the garbage sampler. + runner(10000); + + // Now because we are in a different stack frame line here, we can just re-use the same runner. + // Run for 3, we really should not see that many of these and most should be the first type. + runner(5000); + + // We should no longer have the initial frames. + int status = checkLiveOrRecentFrames(frames); + if (status != 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + + // Change the last frame only since the rest is identical. + frames[2].lineNumber = 90; + + // We should see those new frames. + status = checkLiveAndRecentFrames(frames); + if (status == 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatTest.java new file mode 100644 diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor API + * @build Frame + * @compile HeapMonitorTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorTest + */ + +import java.io.PrintStream; + +public class HeapMonitorTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorTest.java", 60); + frames[1] = new Frame("wrapper", "()V", "HeapMonitorTest.java", 71); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorTest.java", 83); + + enableSampling(); + wrapper(); + + int status = checkFrames(frames); + if (status != 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -0,0 +1,693 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include "jvmti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef JNI_ENV_ARG + +#ifdef __cplusplus +#define JNI_ENV_ARG(x, y) y +#define JNI_ENV_PTR(x) x +#else +#define JNI_ENV_ARG(x,y) x, y +#define JNI_ENV_PTR(x) (*x) +#endif + +#endif + +#define PASSED 0 +#define FAILED 2 + +#define MAX_TRACES 400 + +static const char *EXC_CNAME = "java/lang/Exception"; +static jvmtiEnv *jvmti = NULL; + +static int check_error(jvmtiError err, const char* s) { + if (err != JVMTI_ERROR_NONE) { + printf(" ## %s error: %d\n", s, err); + return 1; + } + return 0; +} + +static int check_capability_error(jvmtiError err, const char* s) { + if (err != JVMTI_ERROR_NONE) { + if (err == JVMTI_ERROR_MUST_POSSESS_CAPABILITY) { + return 0; + } + printf(" ## %s error: %d\n", s, err); + return 1; + } + return 1; +} + +static +jint throw_exc(JNIEnv *env, char *msg) { + jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME)); + + if (exc_class == NULL) { + printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME); + return -1; + } + return JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg); +} + +static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved); + +JNIEXPORT +jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { + return JNI_VERSION_1_8; +} + +JNIEXPORT void JNICALL OnVMInit(jvmtiEnv *jvmti, JNIEnv *jni_env, jthread thread) { +} + +JNIEXPORT void JNICALL OnClassLoad(jvmtiEnv *jvmti_env, JNIEnv *jni_env, + jthread thread, jclass klass) { + // NOP. +} + +JNIEXPORT void JNICALL OnClassPrepare(jvmtiEnv *jvmti_env, JNIEnv *jni_env, + jthread thread, jclass klass) { + // We need to do this to "prime the pump", as it were -- make sure + // that all of the methodIDs have been initialized internally, for + // AsyncGetCallTrace. + jint method_count; + jmethodID *methods = 0; + jvmtiError err = (*jvmti)->GetClassMethods(jvmti, klass, &method_count, &methods); + if ((err != JVMTI_ERROR_NONE) && (err != JVMTI_ERROR_CLASS_NOT_PREPARED)) { + // JVMTI_ERROR_CLASS_NOT_PREPARED is okay because some classes may + // be loaded but not prepared at this point. + throw_exc(jni_env, "Failed to create method IDs for methods in class\n"); + } +} + +static +jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { + jint res; + + res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti), + JVMTI_VERSION_9); + if (res != JNI_OK || jvmti == NULL) { + printf(" Error: wrong result of a valid call to GetEnv!\n"); + return JNI_ERR; + } + + jvmtiEventCallbacks callbacks; + memset(&callbacks, 0, sizeof(callbacks)); + + callbacks.VMInit = &OnVMInit; + callbacks.ClassLoad = &OnClassLoad; + callbacks.ClassPrepare = &OnClassPrepare; + + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + // Get line numbers, sample heap, and filename for the test. + caps.can_get_line_numbers = 1; + caps.can_sample_heap= 1; + caps.can_get_source_file_name = 1; + if (check_error((*jvmti)->AddCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return JNI_ERR; + } + + if (check_error((*jvmti)->SetEventCallbacks(jvmti, &callbacks, + sizeof(jvmtiEventCallbacks)), + " Set Event Callbacks")) { + return JNI_ERR; + } + if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_VM_INIT, NULL), + "Set Event for VM Init")) { + return JNI_ERR; + } + if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_CLASS_LOAD, NULL), + "Set Event for Class Load")) { + return JNI_ERR; + } + if (check_error( (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_CLASS_PREPARE, NULL), + "Set Event for Class Prepare")) { + return JNI_ERR; + } + + return JNI_OK; +} + +// Given a method and a location, this method gets the line number. +// Kind of expensive, comparatively. +static +jint get_line_number(jvmtiEnv *jvmti, jmethodID method, jlocation location) { + // The location is -1 if the bci isn't known or -3 for a native method. + if (location == -1 || location == -3) { + return -1; + } + + // Read the line number table. + jvmtiLineNumberEntry *table_ptr = 0; + jint line_number_table_entries; + int jvmti_error = (*jvmti)->GetLineNumberTable(jvmti, method, + &line_number_table_entries, + &table_ptr); + + if (JVMTI_ERROR_NONE != jvmti_error) { + return -1; + } + if (line_number_table_entries <= 0) { + return -1; + } + if (line_number_table_entries == 1) { + return table_ptr[0].line_number; + } + + // Go through all the line numbers... + jint last_location = table_ptr[0].start_location; + int l; + for (l = 1; l < line_number_table_entries; l++) { + // ... and if you see one that is in the right place for your + // location, you've found the line number! + if ((location < table_ptr[l].start_location) && + (location >= last_location)) { + return table_ptr[l - 1].line_number; + } + last_location = table_ptr[l].start_location; + } + + if (location >= last_location) { + return table_ptr[line_number_table_entries - 1].line_number; + } else { + return -1; + } +} + +typedef struct _ExpectedContentFrame { + const char *name; + const char *signature; + const char *file_name; + int line_number; +} ExpectedContentFrame; + +static jint check_sample_content(JNIEnv *env, + jvmtiStackTrace *trace, + ExpectedContentFrame *expected, + int expected_count) { + int i; + + if (expected_count > trace->frame_count) { + return 0; + } + + for (i = 0; i < expected_count; i++) { + // Get basic information out of the trace. + int bci = trace->frames[i].location; + jmethodID methodid = trace->frames[i].method; + char *name = NULL, *signature = NULL, *file_name = NULL; + + if (bci < 0) { + return 0; + } + + // Transform into usable information. + int line_number = get_line_number(jvmti, methodid, bci); + (*jvmti)->GetMethodName(jvmti, methodid, &name, &signature, 0); + + jclass declaring_class; + if (JVMTI_ERROR_NONE != + (*jvmti)->GetMethodDeclaringClass(jvmti, methodid, &declaring_class)) { + return 0; + } + + jvmtiError err = (*jvmti)->GetSourceFileName(jvmti, declaring_class, + &file_name); + if (err != JVMTI_ERROR_NONE) { + return 0; + } + + // Compare now, none should be NULL. + if (name == NULL) { + return 0; + } + + if (file_name == NULL) { + return 0; + } + + if (signature == NULL) { + return 0; + } + + if (strcmp(name, expected[i].name) || + strcmp(signature, expected[i].signature) || + strcmp(file_name, expected[i].file_name) || + line_number != expected[i].line_number) { + return 0; + } + } + + return 1; +} + +static jint compare_samples(JNIEnv* env, jvmtiStackTrace* traces, int trace_count, + ExpectedContentFrame* expected_content, size_t size) { + // We expect the code to record correctly the bci, retrieve the line + // number, have the right method and the class name of the first frames. + int i; + for (i = 0; i < trace_count; i++) { + jvmtiStackTrace *trace = traces + i; + if (check_sample_content(env, trace, expected_content, size)) { + // At least one frame matched what we were looking for. + return 1; + } + } + + return 0; +} + +static jint check_samples(JNIEnv* env, ExpectedContentFrame* expected, + size_t size, + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*)) { + jvmtiStackTraces traces; + jvmtiError error = get_traces(jvmti, &traces); + + if (error != JVMTI_ERROR_NONE) { + return 0; + } + + int result = compare_samples(env, traces.stack_traces, traces.trace_count, + expected, size); + (*jvmti)->ReleaseTraces(jvmti, &traces); + return result; +} + +static jint frames_exist_live(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetLiveTraces); +} + +static jint frames_exist_recent(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetGarbageTraces); +} + +static jint frames_exist_frequent(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetFrequentGarbageTraces); +} + +// Static native API for various tests. +static void fill_native_frames(JNIEnv* env, jobjectArray frames, + ExpectedContentFrame* native_frames, size_t size) { + size_t i; + for(i = 0; i < size; i++) { + jobject obj = (*env)->GetObjectArrayElement(env, frames, i); + jclass frame_class = (*env)->GetObjectClass(env, obj); + jfieldID line_number_field_id = (*env)->GetFieldID(env, frame_class, "lineNumber", "I"); + int line_number = (*env)->GetIntField(env, obj, line_number_field_id); + + jfieldID string_id = (*env)->GetFieldID(env, frame_class, "method", "Ljava/lang/String;"); + jstring string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* method = (*env)->GetStringUTFChars(env, string_object, 0); + + string_id = (*env)->GetFieldID(env, frame_class, "fileName", "Ljava/lang/String;"); + string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* file_name = (*env)->GetStringUTFChars(env, string_object, 0); + + string_id = (*env)->GetFieldID(env, frame_class, "signature", "Ljava/lang/String;"); + string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* signature= (*env)->GetStringUTFChars(env, string_object, 0); + + native_frames[i].name = method; + native_frames[i].file_name = file_name; + native_frames[i].signature = signature; + native_frames[i].line_number = line_number; + } +} + +static jint checkAnd(JNIEnv *env, jobjectArray frames, int live, int recent, + int frequent) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + int result = 1; + + if (live) { + result = frames_exist_live(env, native_frames, size); + } + + if (recent) { + result = result && + frames_exist_recent(env, native_frames, size); + } + + if (frequent) { + result = result && + frames_exist_frequent(env, native_frames, size); + } + + return result; +} + +static jint checkOr(JNIEnv *env, jobjectArray frames, int live, int recent, + int frequent) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + int result = 0; + + if (live) { + result = frames_exist_live(env, native_frames, size); + } + + if (recent) { + result = result || + frames_exist_recent(env, native_frames, size); + } + + if (frequent) { + result = result || + frames_exist_frequent(env, native_frames, size); + } + + return result; +} + +static jint checkAll(JNIEnv *env, jobjectArray frames) { + return checkAnd(env, frames, 1, 1, 1); +} + +static jint checkNone(JNIEnv *env, jobjectArray frames) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + if ((!frames_exist_live(env, native_frames, size)) && + (!frames_exist_recent(env, native_frames, size)) && + (!frames_exist_frequent(env, native_frames, size))) { + return 1; + } + return 0; +} + +static void enable_sampling() { + check_error((*jvmti)->StartHeapSampling(jvmti, 1 << 19, MAX_TRACES), + "Start Heap Sampling"); +} + +static void enable_sampling_with_rate(int rate) { + check_error((*jvmti)->StartHeapSampling(jvmti, rate, MAX_TRACES), + "Start Heap Sampling"); +} + +static void disable_sampling() { + check_error((*jvmti)->StopHeapSampling(jvmti), "Stop Heap Sampling"); +} + +// HeapMonitorTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +// HeapMonitorOnOffTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorOnOffTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorOnOffTest_checkWipeOut(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in none of the parts. + if (!checkNone(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorOnOffTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorOnOffTest_disableSampling(JNIEnv *env, jclass cls) { + disable_sampling(); +} + +// HeapMonitorRecentTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkLiveOrRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkOr(env, frames, 1, 1, 0)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkLiveAndRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkAnd(env, frames, 1, 1, 0)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorRecentTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +// HeapMonitorFrequentTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorFrequentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorFrequentTest_checkFrequentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkAnd(env, frames, 0, 0, 1)) { + return PASSED; + } + return FAILED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorFrequentTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorNoCapabilityTest_allSamplingMethodsFail(JNIEnv *env, jclass cls) { + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + caps.can_sample_heap= 1; + if (check_error((*jvmti)->RelinquishCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return 0; + } + + if (check_capability_error((*jvmti)->StartHeapSampling(jvmti, 1<<19, + MAX_TRACES), + "Start Heap Sampling")) { + return 0; + } + + if (check_capability_error((*jvmti)->StopHeapSampling(jvmti), + "Stop Heap Sampling")) { + return 0; + } + + if (check_capability_error((*jvmti)->ReleaseTraces(jvmti, NULL), + "Release Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetHeapSamplingStats(jvmti, NULL), + "Get Heap Sampling Stats")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL), + "Get Garbage Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL), + "Get Frequent Garbage Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetLiveTraces(jvmti, NULL), + "Get Live Traces")) { + return 0; + } + + // Calling enable sampling should fail now. + return 1; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatSimpleTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +static jint stats_are_zero() { + jvmtiHeapSamplingStats stats; + check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), + "Heap Sampling Statistics"); + + jvmtiHeapSamplingStats zero; + memset(&zero, 0, sizeof(zero)); + return memcmp(&stats, &zero, sizeof(zero)) == 0; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorStatSimpleTest_statsNull(JNIEnv *env, jclass cls) { + return stats_are_zero(); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_disableSampling(JNIEnv *env, jclass cls) { + disable_sampling(); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_enableSampling(JNIEnv *env, jclass cls, jint rate) { + enable_sampling_with_rate(rate); +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_statsNull(JNIEnv *env, jclass cls) { + return stats_are_zero(); +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_statsHaveSamples(JNIEnv *env, + jclass cls, + int expected, + int percent_error) { + jvmtiHeapSamplingStats stats; + check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), + "Heap Sampling Statistics"); + + fprintf(stderr, "Statistics show:\n"); + fprintf(stderr, "\tCollected samples: %ld\n\tGarbage collected samples: %ld\n", + stats.sample_count, stats.garbage_collected_samples); + fprintf(stderr, "\tSample rate accumulated: %ld\n\tSample Rate Count: %ld\n", + stats.sample_rate_accumulation, stats.sample_rate_count); + fprintf(stderr, "\tStack depth accumulation: %ld\n", + stats.stack_depth_accumulation); + + fprintf(stderr, "Expected is %d\n", expected); + double diff_ratio = (stats.sample_count - expected); + diff_ratio = (diff_ratio < 0) ? -diff_ratio : diff_ratio; + diff_ratio /= expected; + + fprintf(stderr, "Diff ratio is %f\n", diff_ratio); + + return diff_ratio * 100 > percent_error; +} + +#ifdef __cplusplus +} +#endif # HG changeset patch # User Jean Christophe Beyler # Date 1506112017 25200 # Fri Sep 22 13:26:57 2017 -0700 # Node ID 6fd5e543efdb0850b0ede33629a8cc51fb812294 # Parent 7d88c86b55de405655cfaa65ab7b1724c56758da [mq]: heap9a diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -4140,7 +4140,7 @@ } void G1CollectedHeap::process_heap_monitoring() { - log_develop_trace(gc, ref)("HeapSampling [other] : heap monitoring processing"); + log_develop_trace(gc, ref)("Heap Sampler: heap monitoring processing"); G1STWIsAliveClosure is_alive(this); G1KeepAliveClosure keep_alive(this); HeapMonitoring::weak_oops_do(&is_alive, &keep_alive); diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -304,10 +304,12 @@ // - Both of the above reasons are true at the same time. if (HeapMonitoring::enabled()) { if (thread->tlab().should_sample()) { + HeapWord *end = thread->tlab().end(); + thread->tlab().set_back_actual_end(); + // If we don't have an object yet, try to allocate it. if (obj == NULL) { // The tlab could still have space after this sample. - thread->tlab().set_back_actual_end(); obj = thread->tlab().allocate(size); } @@ -318,9 +320,9 @@ // Object is allocated, sample it now. HeapMonitoring::object_alloc_do_sample(thread, reinterpret_cast(obj), - size); + size * HeapWordSize); // Pick a next sample in this case, we allocated right. - thread->tlab().pick_next_sample(); + thread->tlab().pick_next_sample(thread->tlab().top() - end); } } } @@ -330,6 +332,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { HeapWord* obj = handle_heap_sampling(thread, NULL, size); + bool should_sample = thread->tlab().should_sample(); if (obj != NULL) { return obj; @@ -374,8 +377,12 @@ #endif // ASSERT } thread->tlab().fill(obj, obj + size, new_tlab_size); - handle_heap_sampling(thread, obj, size); - return obj; + + if (should_sample) { + return handle_heap_sampling(thread, obj, size); + } else { + return obj; + } } void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { diff --git a/src/hotspot/share/gc/shared/referenceProcessor.cpp b/src/hotspot/share/gc/shared/referenceProcessor.cpp --- a/src/hotspot/share/gc/shared/referenceProcessor.cpp +++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp @@ -262,8 +262,8 @@ // Heap Monitoring references size_t handled; { - GCTraceTime(Debug, gc, ref) tt("Heap Monitoring Weak Reference", gc_timer); - handled = process_phaseHeapSampling(is_alive, keep_alive, complete_gc, task_executor); + GCTraceTime(Debug, gc, ref) tt("Heap Sampler Weak Reference", phase_times->gc_timer()); + handled = process_phaseHeapSampling(is_alive, keep_alive, complete_gc); } phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); @@ -302,13 +302,9 @@ size_t ReferenceProcessor::process_phaseHeapSampling( BoolObjectClosure* is_alive, OopClosure* keep_alive, - VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor) { + VoidClosure* complete_gc) { size_t count = 0; if (HeapMonitoring::enabled()) { - if (task_executor != NULL) { - task_executor->set_single_threaded_mode(); - } count = HeapMonitoring::weak_oops_do(is_alive, keep_alive); complete_gc->do_void(); } diff --git a/src/hotspot/share/gc/shared/referenceProcessor.hpp b/src/hotspot/share/gc/shared/referenceProcessor.hpp --- a/src/hotspot/share/gc/shared/referenceProcessor.hpp +++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp @@ -252,8 +252,7 @@ size_t process_phaseHeapSampling(BoolObjectClosure* is_alive, OopClosure* keep_alive, - VoidClosure* complete_gc, - AbstractRefProcTaskExecutor* task_executor); + VoidClosure* complete_gc); // Work methods used by the method process_discovered_reflist // Phase1: keep alive all those referents that are otherwise diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -174,8 +174,21 @@ _number_of_refills++; print_stats("fill"); assert(top <= start + new_size - alignment_reserve(), "size too small"); + + // Remember old bytes until sample for the next tlab only if this is our first + // actual refill. + size_t old_bytes_until_sample = 0; + if (_number_of_refills > 1) { + old_bytes_until_sample = bytes_until_sample(); + } + initialize(start, top, start + new_size - alignment_reserve()); + if (old_bytes_until_sample > 0) { + set_bytes_until_sample(old_bytes_until_sample); + set_sample_end(); + } + // Reset amount of internal fragmentation set_refill_waste_limit(initial_refill_waste_limit()); } @@ -313,16 +326,7 @@ guarantee(p == top(), "end of last object must match end of space"); } -void ThreadLocalAllocBuffer::pick_next_sample() { - if (!HeapMonitoring::enabled()) { - return; - } - - if (bytes_until_sample() == 0) { - HeapMonitoring::pick_next_sample(bytes_until_sample_addr()); - } - - // Finally, fix up the sampling bytes left and _end. +void ThreadLocalAllocBuffer::set_sample_end() { size_t heap_words_remaining = _end - _top; size_t bytes_left = bytes_until_sample(); size_t words_until_sample = bytes_left / HeapWordSize; @@ -336,6 +340,25 @@ bytes_left -= heap_words_remaining * HeapWordSize; set_bytes_until_sample(bytes_left); } +} + +void ThreadLocalAllocBuffer::pick_next_sample(size_t diff) { + if (!HeapMonitoring::enabled()) { + return; + } + + if (bytes_until_sample() == 0) { + HeapMonitoring::pick_next_sample(bytes_until_sample_addr()); + } + + if (diff > 0) { + // Try to correct sample size by removing extra space from last allocation. + if (bytes_until_sample() > diff * HeapWordSize) { + set_bytes_until_sample(bytes_until_sample() - diff * HeapWordSize); + } + } + + set_sample_end(); log_trace(gc, tlab)("TLAB picked next sample: thread: " INTPTR_FORMAT " [id: %2d]" " start: %p top: %p end: %p actual_end: %p slow_path_end: %p", @@ -367,16 +390,23 @@ return; } - set_bytes_until_sample(bytes_until_sample() - size); + size_t size_in_bytes = size * HeapWordSize; + if (bytes_until_sample() > size_in_bytes) { + set_bytes_until_sample(bytes_until_sample() - size_in_bytes); + } else { + // Technically this is not exactly right, we probably should remember how many bytes are + // negative probably to then reduce our next sample size. + set_bytes_until_sample(0); + } // Should we sample now? - set_back_actual_end(); if (should_sample()) { HeapMonitoring::object_alloc_do_sample(thread, reinterpret_cast(result), - size); + size_in_bytes); + set_back_actual_end(); + pick_next_sample(); } - pick_next_sample(); } HeapWord* ThreadLocalAllocBuffer::hard_end() { diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp @@ -132,7 +132,6 @@ HeapWord* end() const { return _end; } HeapWord* slow_path_end() const { return _slow_path_end; } HeapWord* actual_end() const { return _actual_end; } - HeapWord* hard_end(); HeapWord* top() const { return _top; } HeapWord* pf_top() const { return _pf_top; } @@ -180,7 +179,8 @@ void fill(HeapWord* start, HeapWord* top, size_t new_size); void initialize(); - void pick_next_sample(); + void pick_next_sample(size_t diff = 0); + void set_sample_end(); void set_back_actual_end(); void handle_sample(Thread* thread, HeapWord* result, size_t size); size_t bytes_until_sample() { return _bytes_until_sample; } diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -11640,6 +11640,9 @@ The monitoring rate used for sampling. The sampler will use a statistical approach to provide in average sampling every allocated bytes. + + Note: a low monitoring rate will incur a higher overhead, therefore, the sampler should + only be used when knowing it may impact performance. diff --git a/src/hotspot/share/prims/jvmtiHeapTransition.hpp b/src/hotspot/share/prims/jvmtiHeapTransition.hpp --- a/src/hotspot/share/prims/jvmtiHeapTransition.hpp +++ b/src/hotspot/share/prims/jvmtiHeapTransition.hpp @@ -25,6 +25,8 @@ #ifndef SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP #define SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP +#include "runtime/interfaceSupport.hpp" + // A RAII class that handles transitions from the agent into the VM. class HeapThreadTransition : StackObj { private: diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -23,9 +23,21 @@ */ #include "precompiled.hpp" -#include "prims/forte.hpp" + +#include "gc/shared/collectedHeap.hpp" +#include "memory/universe.hpp" #include "runtime/heapMonitoring.hpp" +#include "runtime/vframe.hpp" +// DONE: +// merged printouts +// broke up the one-liner +// talk about synchro +// cleaned up old entry points for C1/interpreter +// add statistics per GC and log start up initialization. +// removed the null pointer check during the weak_oops_do walk +// cleaned up the task_executor +// fixed the compilation using the option --disable-precompiled-header static const int MaxStackDepth = 64; @@ -209,7 +221,8 @@ } static void reset_stack_trace_storage() { - delete internal_storage, internal_storage = NULL; + delete internal_storage; + internal_storage = NULL; } bool is_initialized() { @@ -407,8 +420,8 @@ for (int i = 0; i < len; i++) { StackTraceData &trace = _allocated_traces->at(i); oop value = trace.obj; - if ((value != NULL && Universe::heap()->is_in_reserved(value)) && - is_alive->do_object_b(value)) { + if (Universe::heap()->is_in_reserved(value) + && is_alive->do_object_b(value)) { // Update the oop to point to the new object if it is still alive. f->do_oop(&(trace.obj)); @@ -647,20 +660,6 @@ StackTraceStorage::storage()->accumulate_sample_rate(rate); } -// Called from the interpreter and C1 -void HeapMonitoring::object_alloc_unsized(oopDesc* o) { - JavaThread *thread = static_cast(Thread::current()); - object_alloc_do_sample(thread, o, o->size() << LogHeapWordSize); -} - -void HeapMonitoring::object_alloc(oopDesc* o, intx byte_size) { - JavaThread *thread = static_cast(Thread::current()); - assert(o->size() << LogHeapWordSize == static_cast(byte_size), - "Object size is incorrect."); - object_alloc_do_sample(thread, o, byte_size); -} - -// Called directly by C2 void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { #if defined(X86) || defined(PPC) JavaThread *thread = static_cast(t); diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -68,24 +68,34 @@ } public: + /* + * General note: currently none of these methods are deemed thread-safe. + */ + + // First method called by user to start the profiler: + // - Note: the lower the monitoring rate, the higher the overhead incurred. + static void initialize_profiling(jint monitoring_rate, jint max_storage); + + // Pick the next sample for a given size_t pointer using a geometric variable + // with specified mean. The specified mean is provided via the + // initialize_profiling method. static void pick_next_sample(size_t *ptr); + // Get live/garbage traces and provide a method to release the traces. static void get_live_traces(jvmtiStackTraces* stack_traces); - static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); static void get_garbage_traces(jvmtiStackTraces* stack_traces); static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); static void release_traces(jvmtiStackTraces *trace_info); - static void initialize_profiling(jint monitoring_rate, jint max_storage); + + static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); static void stop_profiling(); + + // Is the profiler initialized and where is the address to the initialized + // boolean. static bool initialized(); static bool *initialized_address(); - // Called when o is allocated, called by interpreter and C1. - static void object_alloc_unsized(oopDesc* o); - static void object_alloc(oopDesc* o, intx byte_size); - - // Called when o is allocated from C2 directly, - // we know the thread, and we have done the sampling. + // Called when o is to be sampled from a given thread and a given size. static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes); // Called to clean up oops that have been saved by our sampling function, diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics + * @build Frame + * @compile HeapMonitorStatCorrectnessTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatCorrectnessTest + */ + +import java.io.PrintStream; + +public class HeapMonitorStatCorrectnessTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + // Do 100000 iterations and expect maxIteration / multiplier samples. + public static final int maxIteration = 100000; + public static int array[]; + + native static int statsNull(); + native static int statsHaveSamples(int expected, int percentError); + native static int enableSampling(int rate); + native static int disableSampling(); + + + private static void allocate(int size) { + System.out.println("With a size of " + size + ", execute " + maxIteration + " iterations"); + for (int j = 0; j < maxIteration; j++) { + array = new int[size]; + } + } + + public static void main(String[] args) { + int sizes[] = {1000, 10000, 100000}; + + for (int i = 0; i < sizes.length; i++) { + int currentSize = sizes[i]; + System.out.println("Testing size " + currentSize); + + // 111 is as good a number as any. + final int samplingMultiplier = 111; + enableSampling(samplingMultiplier * currentSize); + + if (statsNull() == 0) { + throw new RuntimeException("Statistics should be null to begin with."); + } + + allocate(currentSize); + + // For simplifications, we ignore the array memory usage for array internals (with the array + // sizes requested, it should be a negligible oversight). + // + // That means that with maxIterations, the loop in the method allocate requests: + // maxIterations * currentSize * 4 bytes (4 for integers) + // + // Via the enable sampling, the code requests a sample every samplingMultiplier * currentSize bytes. + // + // Therefore, the expected sample number is: + // (maxIterations * currentSize * 4) / (samplingMultiplier * currentSize); + double expected = maxIteration; + expected *= 4; + expected /= samplingMultiplier; + + // 10% error ensures a sanity test without becoming flaky. + if (statsHaveSamples((int) expected, 10) != 0) { + throw new RuntimeException("Statistics should show about " + expected + " samples."); + } + + disableSampling(); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics + * @build Frame + * @compile HeapMonitorStatSimpleTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatSimpleTest + */ + +import java.io.PrintStream; + +public class HeapMonitorStatSimpleTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int statsNull(); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + if (statsNull() == 0) { + throw new RuntimeException("Statistics should be null to begin with."); + } + + enableSampling(); + wrapper(); + + if (statsNull() != 0) { + throw new RuntimeException("Statistics should not be null now."); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatTest.java deleted file mode 100644 # HG changeset patch # User Jean Christophe Beyler # Date 1506963649 25200 # Mon Oct 02 10:00:49 2017 -0700 # Node ID 6ad8895fa513099eb2ceb07ffb9586cef1950db5 # Parent 6fd5e543efdb0850b0ede33629a8cc51fb812294 [mq]: heap10 diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -11539,7 +11539,7 @@ - + jvmtiFrameInfo Pointer to the call frames. @@ -11645,10 +11645,10 @@ only be used when knowing it may impact performance. - + - The maximum storage used for the sampler. By default, the value is 200. + The maximum storage used for the GC samples in the sampler. By default, the value is 200. diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -1951,13 +1951,13 @@ // Start the sampler. jvmtiError -JvmtiEnv::StartHeapSampling(jint monitoring_rate, jint max_storage) { +JvmtiEnv::StartHeapSampling(jint monitoring_rate, jint max_gc_storage) { if (monitoring_rate < 0) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } HeapThreadTransition htt(Thread::current()); - HeapMonitoring::initialize_profiling(monitoring_rate, max_storage); + HeapMonitoring::initialize_profiling(monitoring_rate, max_gc_storage); return JVMTI_ERROR_NONE; } /* end StartHeapSampling */ diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -29,17 +29,7 @@ #include "runtime/heapMonitoring.hpp" #include "runtime/vframe.hpp" -// DONE: -// merged printouts -// broke up the one-liner -// talk about synchro -// cleaned up old entry points for C1/interpreter -// add statistics per GC and log start up initialization. -// removed the null pointer check during the weak_oops_do walk -// cleaned up the task_executor -// fixed the compilation using the option --disable-precompiled-header - -static const int MaxStackDepth = 64; +const int MaxStackDepth = 1024; // Internal data structure representing traces. struct StackTraceData : CHeapObj { @@ -262,7 +252,7 @@ jvmtiHeapSamplingStats _stats; // Maximum amount of storage provided by the JVMTI call initialize_profiling. - int _max_storage; + int _max_gc_storage; static StackTraceStorage* internal_storage; volatile bool _initialized; @@ -328,7 +318,7 @@ _allocated_traces(NULL), _recent_garbage_traces(NULL), _frequent_garbage_traces(NULL), - _max_storage(0), + _max_gc_storage(0), _initialized(false) { memset(&_stats, 0, sizeof(_stats)); } @@ -385,7 +375,7 @@ _initialized = false; } -void StackTraceStorage::initialize_storage(int max_storage) { +void StackTraceStorage::initialize_storage(int max_gc_storage) { // In case multiple threads got locked and then 1 by 1 got through. if (_initialized) { return; @@ -394,10 +384,10 @@ _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(128, true); - _recent_garbage_traces = new MostRecentGarbageTraces(max_storage); - _frequent_garbage_traces = new FrequentGarbageTraces(max_storage); + _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); + _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); - _max_storage = max_storage; + _max_gc_storage = max_gc_storage; _initialized = true; } @@ -456,7 +446,7 @@ *to = *src; to->frames = - NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); + NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); if (to->frames == NULL) { return false; @@ -464,7 +454,7 @@ memcpy(to->frames, src->frames, - sizeof(jvmtiFrameInfo) * MaxStackDepth); + sizeof(jvmtiFrameInfo) * src->frame_count); return true; } @@ -593,7 +583,8 @@ return StackTraceStorage::storage()->weak_oops_do(is_alive, f); } -void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) { +void HeapMonitoring::initialize_profiling(jint monitoring_rate, + jint max_gc_storage) { // Ignore if already enabled. if (_enabled) { return; @@ -602,7 +593,7 @@ _monitoring_rate = monitoring_rate; // Initalize and reset. - StackTraceStorage::initialize_stack_trace_storage(max_storage); + StackTraceStorage::initialize_stack_trace_storage(max_gc_storage); // Populate the lookup table for fast_log2. // This approximates the log2 curve with a step function. diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -74,7 +74,7 @@ // First method called by user to start the profiler: // - Note: the lower the monitoring rate, the higher the overhead incurred. - static void initialize_profiling(jint monitoring_rate, jint max_storage); + static void initialize_profiling(jint monitoring_rate, jint max_gc_storage); // Pick the next sample for a given size_t pointer using a geometric variable // with specified mean. The specified mean is provided via the diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics + * @build Frame + * @compile HeapMonitorStatRateTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatRateTest + */ + +import java.io.PrintStream; + +public class HeapMonitorStatRateTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static double getAverageRate(); + native static int enableSampling(int rate); + native static void disableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 5000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + int[] tab = {1024, 16384, 524288}; + + for (int rateIdx = 0; rateIdx < tab.length; rateIdx++) { + int rate = tab[rateIdx]; + + enableSampling(rate); + wrapper(); + disableSampling(); + + double calculatedRate = getAverageRate(); + + double error = rate - calculatedRate; + error = error < 0 ? -error : error; + + double errorPercentage = error / rate * 100; + + if (errorPercentage > 5) { + throw new RuntimeException("Rate average over 5% for rate " + rate); + } + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -361,7 +361,7 @@ } } -static jint checkAnd(JNIEnv *env, jobjectArray frames, int live, int recent, +static jint check_and(JNIEnv *env, jobjectArray frames, int live, int recent, int frequent) { jobject loader = NULL; @@ -398,7 +398,7 @@ return result; } -static jint checkOr(JNIEnv *env, jobjectArray frames, int live, int recent, +static jint check_or(JNIEnv *env, jobjectArray frames, int live, int recent, int frequent) { jobject loader = NULL; @@ -436,7 +436,7 @@ } static jint checkAll(JNIEnv *env, jobjectArray frames) { - return checkAnd(env, frames, 1, 1, 1); + return check_and(env, frames, 1, 1, 1); } static jint checkNone(JNIEnv *env, jobjectArray frames) { @@ -534,7 +534,7 @@ JNIEXPORT jint JNICALL Java_MyPackage_HeapMonitorRecentTest_checkLiveOrRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - if (checkOr(env, frames, 1, 1, 0)) { + if (check_or(env, frames, 1, 1, 0)) { return FAILED; } return PASSED; @@ -542,7 +542,7 @@ JNIEXPORT jint JNICALL Java_MyPackage_HeapMonitorRecentTest_checkLiveAndRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - if (checkAnd(env, frames, 1, 1, 0)) { + if (check_and(env, frames, 1, 1, 0)) { return FAILED; } return PASSED; @@ -565,7 +565,7 @@ JNIEXPORT jint JNICALL Java_MyPackage_HeapMonitorFrequentTest_checkFrequentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - if (checkAnd(env, frames, 0, 0, 1)) { + if (check_and(env, frames, 0, 0, 1)) { return PASSED; } return FAILED; @@ -688,6 +688,27 @@ return diff_ratio * 100 > percent_error; } +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatRateTest_enableSampling(JNIEnv *env, jclass cls, jint rate) { + enable_sampling_with_rate(rate); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatRateTest_disableSampling(JNIEnv *env, jclass cls) { + disable_sampling(); +} + +JNIEXPORT jdouble JNICALL +Java_MyPackage_HeapMonitorStatRateTest_getAverageRate(JNIEnv *env, jclass cls) { + jvmtiHeapSamplingStats stats; + check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), + "Heap Sampling Statistics"); + fprintf(stderr, "Here we are : %d - %d\n", + (int) stats.sample_rate_accumulation, (int) stats.sample_rate_count); + + return ((double) stats.sample_rate_accumulation) / stats.sample_rate_count; +} + #ifdef __cplusplus } #endif