--- old/make/test/JtregNative.gmk 2017-06-27 14:19:04.236539685 -0700 +++ new/make/test/JtregNative.gmk 2017-06-27 14:19:04.128540067 -0700 @@ -61,6 +61,7 @@ $(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \ $(HOTSPOT_TOPDIR)/test/compiler/calls \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \ + $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/HeapMonitor \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleReads \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleExportsAndOpens \ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/AddModuleUsesAndProvides \ --- old/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-06-27 14:19:04.616538344 -0700 +++ new/src/share/vm/gc/g1/g1CollectedHeap.cpp 2017-06-27 14:19:04.484538810 -0700 @@ -76,6 +76,7 @@ #include "oops/oop.inline.hpp" #include "prims/resolvedMethodTable.hpp" #include "runtime/atomic.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/init.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vmThread.hpp" @@ -4307,6 +4308,13 @@ g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0); } +void G1CollectedHeap::process_heap_monitoring() { + log_develop_trace(gc, ref)("HeapSampling [other] : heap monitoring processing"); + G1STWIsAliveClosure is_alive(this); + G1KeepAliveClosure keep_alive(this); + HeapMonitoring::weak_oops_do(&is_alive, &keep_alive); +} + void G1CollectedHeap::preserve_cm_referents(G1ParScanThreadStateSet* per_thread_states) { // Any reference objects, in the collection set, that were 'discovered' // by the CM ref processor should have already been copied (either by @@ -4525,6 +4533,7 @@ } else { ref_processor_stw()->verify_no_references_recorded(); process_weak_jni_handles(); + process_heap_monitoring(); } if (G1StringDedup::is_enabled()) { --- old/src/share/vm/gc/g1/g1CollectedHeap.hpp 2017-06-27 14:19:05.056536791 -0700 +++ new/src/share/vm/gc/g1/g1CollectedHeap.hpp 2017-06-27 14:19:04.924537257 -0700 @@ -303,6 +303,7 @@ void trace_heap(GCWhen::Type when, const GCTracer* tracer); + void process_heap_monitoring(); void process_weak_jni_handles(); // These are macros so that, if the assert fires, we get the correct --- old/src/share/vm/gc/g1/g1MarkSweep.cpp 2017-06-27 14:19:05.372535675 -0700 +++ new/src/share/vm/gc/g1/g1MarkSweep.cpp 2017-06-27 14:19:05.280535999 -0700 @@ -48,6 +48,7 @@ #include "runtime/atomic.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fprofiler.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/synchronizer.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" @@ -250,6 +251,7 @@ // Now adjust pointers in remaining weak roots. (All of which should // have been cleared if they pointed to non-surviving objects.) JNIHandles::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); + HeapMonitoring::weak_oops_do(&GenMarkSweep::adjust_pointer_closure); if (G1StringDedup::is_enabled()) { G1StringDedup::oops_do(&GenMarkSweep::adjust_pointer_closure); --- old/src/share/vm/gc/parallel/psMarkSweep.cpp 2017-06-27 14:19:05.676534601 -0700 +++ new/src/share/vm/gc/parallel/psMarkSweep.cpp 2017-06-27 14:19:05.584534927 -0700 @@ -51,6 +51,7 @@ #include "oops/oop.inline.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fprofiler.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -610,6 +611,7 @@ // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles JNIHandles::weak_oops_do(adjust_pointer_closure()); + HeapMonitoring::weak_oops_do(adjust_pointer_closure()); CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); --- old/src/share/vm/gc/parallel/psParallelCompact.cpp 2017-06-27 14:19:06.004533444 -0700 +++ new/src/share/vm/gc/parallel/psParallelCompact.cpp 2017-06-27 14:19:05.904533796 -0700 @@ -61,6 +61,7 @@ #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/fprofiler.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/safepoint.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -2171,6 +2172,7 @@ // have been cleared if they pointed to non-surviving objects.) // Global (weak) JNI handles JNIHandles::weak_oops_do(&oop_closure); + HeapMonitoring::weak_oops_do(&oop_closure); CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); --- old/src/share/vm/gc/shared/collectedHeap.cpp 2017-06-27 14:19:06.364532173 -0700 +++ new/src/share/vm/gc/shared/collectedHeap.cpp 2017-06-27 14:19:06.240532610 -0700 @@ -38,6 +38,7 @@ #include "memory/resourceArea.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/init.hpp" #include "runtime/thread.inline.hpp" #include "services/heapDumper.hpp" @@ -295,7 +296,40 @@ } #endif +HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) { + // We can come here for three reasons: + // - We either really did fill the tlab. + // - We pretended to everyone we did and we want to sample. + // - Both of the above reasons are true at the same time. + if (HeapMonitoring::enabled()) { + if (thread->tlab().should_sample()) { + // If we don't have an object yet, try to allocate it. + if (obj == NULL) { + // The tlab could still have space after this sample. + thread->tlab().set_back_actual_end(); + obj = thread->tlab().allocate(size); + } + + // Is the object allocated now? + if (obj != NULL) { + // Object is allocated, sample it now. + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast(obj), + size); + } + } + } + + thread->tlab().pick_next_sample(); + return obj; +} + HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { + HeapWord* obj = handle_heap_sampling(thread, NULL, size); + + if (obj != NULL) { + return obj; + } // Retain tlab and allocate object in shared space if // the amount free in the tlab is too large to discard. @@ -315,7 +349,7 @@ } // Allocate a new TLAB... - HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); + obj = Universe::heap()->allocate_new_tlab(new_tlab_size); if (obj == NULL) { return NULL; } @@ -336,6 +370,7 @@ #endif // ASSERT } thread->tlab().fill(obj, obj + size, new_tlab_size); + handle_heap_sampling(thread, obj, size); return obj; } --- old/src/share/vm/gc/shared/collectedHeap.hpp 2017-06-27 14:19:06.672531086 -0700 +++ new/src/share/vm/gc/shared/collectedHeap.hpp 2017-06-27 14:19:06.576531425 -0700 @@ -143,6 +143,9 @@ inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size); + // Handle if needed heap sampling. + static HeapWord* handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size); + // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS); --- old/src/share/vm/gc/shared/collectedHeap.inline.hpp 2017-06-27 14:19:07.048529758 -0700 +++ new/src/share/vm/gc/shared/collectedHeap.inline.hpp 2017-06-27 14:19:06.912530238 -0700 @@ -157,6 +157,7 @@ AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); + THREAD->tlab().handle_sample(THREAD, result, size); return result; } --- old/src/share/vm/gc/shared/genCollectedHeap.cpp 2017-06-27 14:19:07.432528403 -0700 +++ new/src/share/vm/gc/shared/genCollectedHeap.cpp 2017-06-27 14:19:07.300528868 -0700 @@ -49,6 +49,7 @@ #include "runtime/fprofiler.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/java.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" @@ -724,6 +725,7 @@ void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) { JNIHandles::weak_oops_do(root_closure); + HeapMonitoring::weak_oops_do(root_closure); _young_gen->ref_processor()->weak_oops_do(root_closure); _old_gen->ref_processor()->weak_oops_do(root_closure); } --- old/src/share/vm/gc/shared/referenceProcessor.cpp 2017-06-27 14:19:07.772527201 -0700 +++ new/src/share/vm/gc/shared/referenceProcessor.cpp 2017-06-27 14:19:07.664527583 -0700 @@ -35,6 +35,7 @@ #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/java.hpp" #include "runtime/jniHandles.hpp" @@ -257,9 +258,16 @@ process_phaseJNI(is_alive, keep_alive, complete_gc); } + size_t handled; + { + GCTraceTime(Debug, gc, ref) tt("Heap Monitoring Weak Reference", gc_timer); + handled = process_phaseHeapSampling(is_alive, keep_alive, complete_gc, task_executor); + } + log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT, stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count()); log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs()); + log_develop_trace(gc, ref)("Heap Sampler Weak Reference handled: " SIZE_FORMAT, handled); return stats; } @@ -289,6 +297,22 @@ complete_gc->do_void(); } +size_t ReferenceProcessor::process_phaseHeapSampling( + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor) { + size_t count = 0; + if (HeapMonitoring::enabled()) { + if (task_executor != NULL) { + task_executor->set_single_threaded_mode(); + } + count = HeapMonitoring::weak_oops_do(is_alive, keep_alive); + complete_gc->do_void(); + } + return count; +} + void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { // Enqueue references that are not made active again, and // clear the decks for the next collection (cycle). --- old/src/share/vm/gc/shared/referenceProcessor.hpp 2017-06-27 14:19:08.156525846 -0700 +++ new/src/share/vm/gc/shared/referenceProcessor.hpp 2017-06-27 14:19:08.024526312 -0700 @@ -249,6 +249,11 @@ OopClosure* keep_alive, VoidClosure* complete_gc); + size_t process_phaseHeapSampling(BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc, + AbstractRefProcTaskExecutor* task_executor); + // Work methods used by the method process_discovered_reflist // Phase1: keep alive all those referents that are otherwise // dead but which must be kept alive by policy (and their closure). --- old/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp 2017-06-27 14:19:08.524524547 -0700 +++ new/src/share/vm/gc/shared/threadLocalAllocBuffer.cpp 2017-06-27 14:19:08.396524998 -0700 @@ -29,6 +29,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" @@ -121,6 +122,8 @@ set_top(NULL); set_pf_top(NULL); set_end(NULL); + set_actual_end(NULL); + set_slow_path_end(NULL); } } assert(!(retire || ZeroTLAB) || @@ -183,7 +186,10 @@ set_top(top); set_pf_top(top); set_end(end); + set_actual_end(end); + set_slow_path_end(end); invariants(); + _bytes_until_sample = 0; } void ThreadLocalAllocBuffer::initialize() { @@ -306,13 +312,74 @@ guarantee(p == top(), "end of last object must match end of space"); } +void ThreadLocalAllocBuffer::pick_next_sample() { + if (!HeapMonitoring::enabled()) { + return; + } + + if (bytes_until_sample() == 0) { + HeapMonitoring::pick_next_sample(bytes_until_sample_addr()); + } + + // Finally, fix up the sampling bytes left and _end. + size_t heap_words_remaining = _end - _top; + size_t bytes_left = bytes_until_sample(); + size_t words_until_sample = bytes_left / HeapWordSize; + + if (heap_words_remaining > words_until_sample) { + set_end(_top + words_until_sample); + set_bytes_until_sample(0); + } else { + bytes_left -= heap_words_remaining * HeapWordSize; + set_bytes_until_sample(bytes_left); + } +} + Thread* ThreadLocalAllocBuffer::myThread() { return (Thread*)(((char *)this) + in_bytes(start_offset()) - in_bytes(Thread::tlab_start_offset())); } +void ThreadLocalAllocBuffer::set_back_actual_end() { + // Did a fast TLAB refill occur? + if (_slow_path_end != _end) { + // Fix up the actual end to be now the end of this TLAB. + _slow_path_end = _end; + _actual_end = _end; + } else { + _end = _actual_end; + } +} + +void ThreadLocalAllocBuffer::handle_sample(Thread* thread, HeapWord* result, + size_t size) { + if (!HeapMonitoring::enabled()) { + return; + } + + set_bytes_until_sample(bytes_until_sample() - size); + // Should we sample now? + set_back_actual_end(); + if (should_sample()) { + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast(result), + size); + } + pick_next_sample(); +} + +HeapWord* ThreadLocalAllocBuffer::hard_end() { + // Did a fast TLAB refill occur? + if (_slow_path_end != _end) { + // Fix up the actual end to be now the end of this TLAB. + _slow_path_end = _end; + _actual_end = _end; + } + + return _actual_end + alignment_reserve(); +} GlobalTLABStats::GlobalTLABStats() : _allocating_threads_avg(TLABAllocationWeight) { --- old/src/share/vm/gc/shared/threadLocalAllocBuffer.hpp 2017-06-27 14:19:08.812523530 -0700 +++ new/src/share/vm/gc/shared/threadLocalAllocBuffer.hpp 2017-06-27 14:19:08.720523855 -0700 @@ -37,6 +37,13 @@ // It is thread-private at any time, but maybe multiplexed over // time across multiple threads. The park()/unpark() pair is // used to make it available for such multiplexing. +// +// Heap sampling is performed via the end/actual_end fields. +// actual_end contains the real end of the tlab allocation, +// whereas end can be set to an arbitrary spot in the tlab to +// trip the return and sample the allocation. +// slow_path_end is used to track if a fast tlab refill occured +// between slowpath calls. class ThreadLocalAllocBuffer: public CHeapObj { friend class VMStructs; friend class JVMCIVMStructs; @@ -44,10 +51,15 @@ HeapWord* _start; // address of TLAB HeapWord* _top; // address after last allocation HeapWord* _pf_top; // allocation prefetch watermark - HeapWord* _end; // allocation end (excluding alignment_reserve) + HeapWord* _end; // allocation end (can be the sampling end point or + // the actual TLAB end, excluding alignment_reserve) + HeapWord* _actual_end; // allocation actual_end (actual TLAB end, excluding alignment_reserve) + HeapWord* _slow_path_end; // remember the end in case a fast refill occurs. + size_t _desired_size; // desired size (including alignment_reserve) size_t _refill_waste_limit; // hold onto tlab if free() is larger than this size_t _allocated_before_last_gc; // total bytes allocated up until the last gc + size_t _bytes_until_sample; // bytes until sample. static size_t _max_size; // maximum size of any TLAB static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB @@ -66,17 +78,20 @@ void set_start(HeapWord* start) { _start = start; } void set_end(HeapWord* end) { _end = end; } + void set_actual_end(HeapWord* actual_end) { _actual_end = actual_end; } + void set_slow_path_end(HeapWord* slow_path_end) { _slow_path_end = slow_path_end; } void set_top(HeapWord* top) { _top = top; } void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; } void set_desired_size(size_t desired_size) { _desired_size = desired_size; } void set_refill_waste_limit(size_t waste) { _refill_waste_limit = waste; } + void set_bytes_until_sample(size_t bytes) { _bytes_until_sample = bytes; } size_t initial_refill_waste_limit() { return desired_size() / TLABRefillWasteFraction; } static int target_refills() { return _target_refills; } size_t initial_desired_size(); - size_t remaining() const { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } + size_t remaining() { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } // Make parsable and release it. void reset(); @@ -115,7 +130,7 @@ HeapWord* start() const { return _start; } HeapWord* end() const { return _end; } - HeapWord* hard_end() const { return _end + alignment_reserve(); } + HeapWord* hard_end(); HeapWord* top() const { return _top; } HeapWord* pf_top() const { return _pf_top; } size_t desired_size() const { return _desired_size; } @@ -162,11 +177,19 @@ void fill(HeapWord* start, HeapWord* top, size_t new_size); void initialize(); + void pick_next_sample(); + void set_back_actual_end(); + void handle_sample(Thread* thread, HeapWord* result, size_t size); + size_t bytes_until_sample() { return _bytes_until_sample; } + size_t *bytes_until_sample_addr() { return &_bytes_until_sample; } + bool should_sample() { return bytes_until_sample() == 0; } + static size_t refill_waste_limit_increment() { return TLABWasteIncrement; } // Code generation support static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); } static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); } + static ByteSize actual_end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _actual_end ); } static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); } static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); } static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); } --- old/src/share/vm/prims/jvmti.xml 2017-06-27 14:19:09.184522216 -0700 +++ new/src/share/vm/prims/jvmti.xml 2017-06-27 14:19:09.044522711 -0700 @@ -10350,6 +10350,13 @@ See . + + + + Can sample the heap. + If this capability is enabled then the heap sampling methods can be called. + + @@ -11528,6 +11535,266 @@ + + + + + BCI for the given allocation. + + + + Method ID for the given frame. + + + + + + JNIEnv + Environment where the trace was recorded. + + + + jvmtiCallFrame + + Pointer to the call frames. + + + + The number of frames for the trace. + + + + The size of the object allocation. + + + + The thread id number. + + + + + + + jvmtiStackTrace + + + The array with the various stack traces. + + + + + + + Number of traces pointed by the array . + + + + + + + + + The number of sampled allocations during the lifetime of the sampler. + For very long sampling, this number can overflow. + + + + + + + The number of samples already garbage collected. + For very long sampling, this number can overflow. + + + + + + + Accumulation of the sample rates chosen. + For very long sampling, this number can overflow. + + + + + + + The number of sample rates chosen. + For very long sampling, this number can overflow. + + + + + + + Accumulation of stack depths collected by the sampler. + For very long sampling, this number can overflow. + + + + + + Start Heap Sampling + + Start the heap sampler in the JVM. The function provides, via its argument, the sampling + rate requested and will fill internal data structures with heap allocation samples. The + samples are obtained via the , + , , + functions. + + new + + + + + + + + The monitoring rate used for sampling. The sampler will use a statistical approach to + provide in average sampling every allocated bytes. + + + + + + The maximum storage used for the sampler. By default, the value is 200. + + + + + + is less than zero. + + + + + + Start Heap Sampling + + Stop the heap sampler in the JVM. + Any sample obtained during sampling is still available via the , + , , + functions. + + new + + + + + + + + + + + Get Live Traces + + Get Live Heap Sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Get Garbage Traces + + Get the recent garbage heap sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Get Frequent Garbage Traces + + Get the frequent garbage heap sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Release traces provided by the heap monitoring + + Release traces provided by any of the trace retrieval methods. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be released. + + + + + + + + + Get the heap sampling statistics + + Returns a to understand the heap sampling behavior and current + internal data storage status. + + new + + + + + + jvmtiHeapSamplingStats + + The structure to be filled with the heap sampler's statistics. + + + + + + + + --- old/src/share/vm/prims/jvmtiEnv.cpp 2017-06-27 14:19:09.736520268 -0700 +++ new/src/share/vm/prims/jvmtiEnv.cpp 2017-06-27 14:19:09.620520677 -0700 @@ -46,6 +46,7 @@ #include "prims/jvmtiCodeBlobEvents.hpp" #include "prims/jvmtiExtensions.hpp" #include "prims/jvmtiGetLoadedClasses.hpp" +#include "prims/jvmtiHeapTransition.hpp" #include "prims/jvmtiImpl.hpp" #include "prims/jvmtiManageCapabilities.hpp" #include "prims/jvmtiRawMonitor.hpp" @@ -55,6 +56,7 @@ #include "prims/jvmtiUtil.hpp" #include "runtime/arguments.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" @@ -1947,6 +1949,81 @@ return JVMTI_ERROR_NONE; } /* end IterateOverInstancesOfClass */ +// Start the sampler. +jvmtiError +JvmtiEnv::StartHeapSampling(jint monitoring_rate, jint max_storage) { + if (monitoring_rate < 0) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapThreadTransition htt(Thread::current()); + HeapMonitoring::initialize_profiling(monitoring_rate, max_storage); + return JVMTI_ERROR_NONE; +} /* end StartHeapSampling */ + +// Stop the sampler. +jvmtiError +JvmtiEnv::StopHeapSampling() { + HeapThreadTransition htt(Thread::current()); + HeapMonitoring::stop_profiling(); + return JVMTI_ERROR_NONE; +} /* end StopHeapSampling */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetLiveTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_live_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetLiveTraces */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetGarbageTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_garbage_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetGarbageTraces */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetFrequentGarbageTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_frequent_garbage_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetFrequentGarbageTraces */ + +// Release sampled traces. +jvmtiError +JvmtiEnv::ReleaseTraces(jvmtiStackTraces* stack_traces) { + if (stack_traces == NULL) { + return JVMTI_ERROR_NONE; + } + HeapMonitoring::release_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end ReleaseTraces */ + +// Get the heap sampling statistics. +jvmtiError +JvmtiEnv::GetHeapSamplingStats(jvmtiHeapSamplingStats* stats) { + if (stats == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + HeapMonitoring::get_sampling_statistics(stats); + return JVMTI_ERROR_NONE; +} /* end GetHeapSamplingStats */ // // Local Variable functions --- old/src/share/vm/prims/jvmtiManageCapabilities.cpp 2017-06-27 14:19:10.068519095 -0700 +++ new/src/share/vm/prims/jvmtiManageCapabilities.cpp 2017-06-27 14:19:09.976519421 -0700 @@ -112,6 +112,7 @@ jc.can_generate_object_free_events = 1; jc.can_generate_resource_exhaustion_heap_events = 1; jc.can_generate_resource_exhaustion_threads_events = 1; + jc.can_sample_heap = 1; return jc; } --- old/src/share/vm/runtime/thread.hpp 2017-06-27 14:19:10.424517839 -0700 +++ new/src/share/vm/runtime/thread.hpp 2017-06-27 14:19:10.320518206 -0700 @@ -615,6 +615,7 @@ TLAB_FIELD_OFFSET(start) TLAB_FIELD_OFFSET(end) + TLAB_FIELD_OFFSET(actual_end) TLAB_FIELD_OFFSET(top) TLAB_FIELD_OFFSET(pf_top) TLAB_FIELD_OFFSET(size) // desired_size --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/src/share/vm/prims/jvmtiHeapTransition.hpp 2017-06-27 14:19:10.692516892 -0700 @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP +#define SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP + +// A RAII class that handles transitions from the agent into the VM. +class HeapThreadTransition : StackObj { + private: + JavaThreadState _saved_state; + JavaThread *_jthread; + + public: + // Transitions this thread from the agent (thread_in_native) to the VM. + HeapThreadTransition(Thread *thread) { + if (thread->is_Java_thread()) { + _jthread = static_cast(thread); + _saved_state = _jthread->thread_state(); + if (_saved_state == _thread_in_native) { + ThreadStateTransition::transition_from_native(_jthread, _thread_in_vm); + } else { + ThreadStateTransition::transition(_jthread, + _saved_state, + _thread_in_vm); + } + } else { + _jthread = NULL; + _saved_state = _thread_new; + } + } + + // Transitions this thread back to the agent from the VM. + ~HeapThreadTransition() { + if (_jthread != NULL) { + ThreadStateTransition::transition(_jthread, _thread_in_vm, _saved_state); + } + } +}; + +#endif // SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/src/share/vm/runtime/heapMonitoring.cpp 2017-06-27 14:19:10.952515975 -0700 @@ -0,0 +1,727 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "prims/forte.hpp" +#include "runtime/heapMonitoring.hpp" + + +static const int MaxStackDepth = 64; + +// Internal data structure representing traces. +struct StackTraceData : CHeapObj { + jvmtiStackTrace *trace; + oop obj; + int references; + + StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {} + + StackTraceData() : trace(NULL), obj(NULL), references(0) {} + + // StackTraceDatas are shared around the board between various lists. So + // handle this by hand instead of having this in the destructor. There are + // cases where the struct is on the stack but holding heap data not to be + // freed. + static void free_data(StackTraceData *data) { + if (data->trace != NULL) { + FREE_C_HEAP_ARRAY(jvmtiCallFrame, data->trace->frames); + FREE_C_HEAP_OBJ(data->trace); + } + delete data; + } +}; + +// Fixed size buffer for holding garbage traces. +class GarbageTracesBuffer : public CHeapObj { + public: + GarbageTracesBuffer(uint32_t size) : _size(size) { + _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*, + size, + mtInternal); + memset(_garbage_traces, 0, sizeof(StackTraceData*) * size); + } + + virtual ~GarbageTracesBuffer() { + FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces); + } + + StackTraceData** get_traces() const { + return _garbage_traces; + } + + bool store_trace(StackTraceData *trace) { + uint32_t index; + if (!select_replacement(&index)) { + return false; + } + + StackTraceData *old_data = _garbage_traces[index]; + + if (old_data != NULL) { + old_data->references--; + + if (old_data->references == 0) { + StackTraceData::free_data(old_data); + } + } + + trace->references++; + _garbage_traces[index] = trace; + return true; + } + + uint32_t size() const { + return _size; + } + + protected: + // Subclasses select the trace to replace. Returns false if no replacement + // is to happen, otherwise stores the index of the trace to replace in + // *index. + virtual bool select_replacement(uint32_t *index) = 0; + + const uint32_t _size; + + private: + // The current garbage traces. A fixed-size ring buffer. + StackTraceData **_garbage_traces; +}; + +// Keep statistical sample of traces over the lifetime of the server. +// When the buffer is full, replace a random entry with probability +// 1/samples_seen. This strategy tends towards preserving the most frequently +// occuring traces over time. +class FrequentGarbageTraces : public GarbageTracesBuffer { + public: + FrequentGarbageTraces(int size) + : GarbageTracesBuffer(size), + _garbage_traces_pos(0), + _samples_seen(0) { + } + + virtual ~FrequentGarbageTraces() { + } + + virtual bool select_replacement(uint32_t* index) { + ++_samples_seen; + + if (_garbage_traces_pos < _size) { + *index = _garbage_traces_pos++; + return true; + } + + uint64_t random_uint64 = + (static_cast(::random()) << 32) | ::random(); + + uint32_t random_index = random_uint64 % _samples_seen; + if (random_index < _size) { + *index = random_index; + return true; + } + + return false; + } + + private: + // The current position in the buffer as we initially fill it. + uint32_t _garbage_traces_pos; + + uint64_t _samples_seen; +}; + +// Store most recent garbage traces. +class MostRecentGarbageTraces : public GarbageTracesBuffer { + public: + MostRecentGarbageTraces(int size) + : GarbageTracesBuffer(size), + _garbage_traces_pos(0) { + } + + virtual ~MostRecentGarbageTraces() { + } + + virtual bool select_replacement(uint32_t* index) { + *index = _garbage_traces_pos; + + _garbage_traces_pos = + (_garbage_traces_pos + 1) % _size; + + return true; + } + + private: + // The current position in the buffer. + uint32_t _garbage_traces_pos; +}; + +// Each object that we profile is stored as trace with the thread_id. +class StackTraceStorage : public CHeapObj { + public: + // The function that gets called to add a trace to the list of + // traces we are maintaining. + void add_trace(jvmtiStackTrace *trace, oop o); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_all_stack_traces(jvmtiStackTraces *traces); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_garbage_stack_traces(jvmtiStackTraces *traces); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces); + + // Executes whenever weak references are traversed. is_alive tells + // you if the given oop is still reachable and live. + size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); + + ~StackTraceStorage(); + StackTraceStorage(); + + static StackTraceStorage* storage() { + if (internal_storage == NULL) { + internal_storage = new StackTraceStorage(); + } + return internal_storage; + } + + static void reset_stack_trace_storage() { + delete internal_storage, internal_storage = NULL; + } + + bool is_initialized() { + return _initialized; + } + + const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { + return _stats; + } + + // Static method to set the storage in place at initialization. + static void initialize_stack_trace_storage(int max_storage) { + reset_stack_trace_storage(); + StackTraceStorage *storage = StackTraceStorage::storage(); + storage->initialize_storage(max_storage); + } + + void accumulate_sample_rate(size_t rate) { + _stats.sample_rate_accumulation += rate; + _stats.sample_rate_count++; + } + + bool initialized() { return _initialized; } + volatile bool *initialized_address() { return &_initialized; } + + private: + // The traces currently sampled. + GrowableArray *_allocated_traces; + + // Recent garbage traces. + MostRecentGarbageTraces *_recent_garbage_traces; + + // Frequent garbage traces. + FrequentGarbageTraces *_frequent_garbage_traces; + + // Heap Sampling statistics. + jvmtiHeapSamplingStats _stats; + + // Maximum amount of storage provided by the JVMTI call initialize_profiling. + int _max_storage; + + static StackTraceStorage* internal_storage; + volatile bool _initialized; + + // Support functions and classes for copying data to the external + // world. + class StackTraceDataCopier { + public: + virtual int size() const = 0; + virtual const StackTraceData *get(uint32_t i) const = 0; + }; + + class LiveStackTraceDataCopier : public StackTraceDataCopier { + public: + LiveStackTraceDataCopier(GrowableArray *data) : + _data(data) {} + int size() const { return _data ? _data->length() : 0; } + const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); } + + private: + GrowableArray *_data; + }; + + class GarbageStackTraceDataCopier : public StackTraceDataCopier { + public: + GarbageStackTraceDataCopier(StackTraceData **data, int size) : + _data(data), _size(size) {} + int size() const { return _size; } + const StackTraceData *get(uint32_t i) const { return _data[i]; } + + private: + StackTraceData **_data; + int _size; + }; + + // Instance initialization. + void initialize_storage(int max_storage); + + // Copies from StackTraceData to jvmtiStackTrace. + bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); + + // Creates a deep copy of the list of StackTraceData. + void copy_stack_traces(const StackTraceDataCopier &copier, + jvmtiStackTraces *traces); + + void store_garbage_trace(const StackTraceData &trace); + + void free_garbage(); +}; + +StackTraceStorage* StackTraceStorage::internal_storage; + +// Statics for Sampler +double HeapMonitoring::_log_table[1 << FastLogNumBits]; +bool HeapMonitoring::_enabled; +AlwaysTrueClosure HeapMonitoring::_always_true; +jint HeapMonitoring::_monitoring_rate; + +// Cheap random number generator +uint64_t HeapMonitoring::_rnd; + +StackTraceStorage::StackTraceStorage() : + _allocated_traces(NULL), + _recent_garbage_traces(NULL), + _frequent_garbage_traces(NULL), + _max_storage(0), + _initialized(false) { + memset(&_stats, 0, sizeof(_stats)); +} + +void StackTraceStorage::free_garbage() { + StackTraceData **recent_garbage = NULL; + uint32_t recent_size = 0; + + StackTraceData **frequent_garbage = NULL; + uint32_t frequent_size = 0; + + if (_recent_garbage_traces != NULL) { + recent_garbage = _recent_garbage_traces->get_traces(); + recent_size = _recent_garbage_traces->size(); + } + + if (_frequent_garbage_traces != NULL) { + frequent_garbage = _frequent_garbage_traces->get_traces(); + frequent_size = _frequent_garbage_traces->size(); + } + + // Simple solution since this happens at exit. + // Go through the recent and remove any that only are referenced there. + for (uint32_t i = 0; i < recent_size; i++) { + StackTraceData *trace = recent_garbage[i]; + if (trace != NULL) { + trace->references--; + + if (trace->references == 0) { + StackTraceData::free_data(trace); + } + } + } + + // Then go through the frequent and remove those that are now only there. + for (uint32_t i = 0; i < frequent_size; i++) { + StackTraceData *trace = frequent_garbage[i]; + if (trace != NULL) { + trace->references--; + + if (trace->references == 0) { + StackTraceData::free_data(trace); + } + } + } +} + +StackTraceStorage::~StackTraceStorage() { + delete _allocated_traces; + + free_garbage(); + delete _recent_garbage_traces; + delete _frequent_garbage_traces; + _initialized = false; +} + +void StackTraceStorage::initialize_storage(int max_storage) { + // In case multiple threads got locked and then 1 by 1 got through. + if (_initialized) { + return; + } + + _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) + GrowableArray(128, true); + + _recent_garbage_traces = new MostRecentGarbageTraces(max_storage); + _frequent_garbage_traces = new FrequentGarbageTraces(max_storage); + + _max_storage = max_storage; + _initialized = true; +} + +void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { + StackTraceData new_data(trace, o); + _stats.sample_count++; + _stats.stack_depth_accumulation += trace->frame_count; + _allocated_traces->append(new_data); +} + +size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, + OopClosure *f) { + size_t count = 0; + if (is_initialized()) { + int len = _allocated_traces->length(); + + // Compact the oop traces. Moves the live oops to the beginning of the + // growable array, potentially overwriting the dead ones. + int curr_pos = 0; + for (int i = 0; i < len; i++) { + StackTraceData &trace = _allocated_traces->at(i); + oop value = trace.obj; + if ((value != NULL && Universe::heap()->is_in_reserved(value)) && + is_alive->do_object_b(value)) { + // Update the oop to point to the new object if it is still alive. + f->do_oop(&(trace.obj)); + + // Copy the old trace, if it is still live. + _allocated_traces->at_put(curr_pos++, trace); + + count++; + } else { + // If the old trace is no longer live, add it to the list of + // recently collected garbage. + store_garbage_trace(trace); + } + } + + // Zero out remaining array elements. Even though the call to trunc_to + // below truncates these values, zeroing them out is good practice. + StackTraceData zero_trace; + for (int i = curr_pos; i < len; i++) { + _allocated_traces->at_put(i, zero_trace); + } + + // Set the array's length to the number of live elements. + _allocated_traces->trunc_to(curr_pos); + } + + return count; +} + +bool StackTraceStorage::deep_copy(jvmtiStackTrace *to, + const StackTraceData *from) { + const jvmtiStackTrace *src = from->trace; + *to = *src; + + to->frames = + NEW_C_HEAP_ARRAY(jvmtiCallFrame, MaxStackDepth, mtInternal); + + if (to->frames == NULL) { + return false; + } + + memcpy(to->frames, + src->frames, + sizeof(jvmtiCallFrame) * MaxStackDepth); + return true; +} + +// Called by the outside world; returns a copy of the stack traces +// (because we could be replacing them as the user handles them). +// The array is secretly null-terminated (to make it easier to reclaim). +void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) { + LiveStackTraceDataCopier copier(_allocated_traces); + copy_stack_traces(copier, traces); +} + +// See comment on get_all_stack_traces +void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) { + GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), + _recent_garbage_traces->size()); + copy_stack_traces(copier, traces); +} + +// See comment on get_all_stack_traces +void StackTraceStorage::get_frequent_garbage_stack_traces( + jvmtiStackTraces *traces) { + GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), + _frequent_garbage_traces->size()); + copy_stack_traces(copier, traces); +} + + +void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, + jvmtiStackTraces *traces) { + int len = copier.size(); + + // Create a new array to store the StackTraceData objects. + // + 1 for a NULL at the end. + jvmtiStackTrace *t = + NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); + if (t == NULL) { + traces->stack_traces = NULL; + traces->trace_count = 0; + return; + } + // +1 to have a NULL at the end of the array. + memset(t, 0, (len + 1) * sizeof(*t)); + + // Copy the StackTraceData objects into the new array. + int trace_count = 0; + for (int i = 0; i < len; i++) { + const StackTraceData *stack_trace = copier.get(i); + if (stack_trace != NULL) { + jvmtiStackTrace *to = &t[trace_count]; + if (!deep_copy(to, stack_trace)) { + continue; + } + trace_count++; + } + } + + traces->stack_traces = t; + traces->trace_count = trace_count; +} + +void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) { + StackTraceData *new_trace = new StackTraceData(); + *new_trace = trace; + + bool accepted = _recent_garbage_traces->store_trace(new_trace); + + // Accepted is on the right of the boolean to force the store_trace to happen. + accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted; + + if (!accepted) { + // No one wanted to use it. + delete new_trace; + } + + _stats.garbage_collected_samples++; +} + +// Delegate the initialization question to the underlying storage system. +bool HeapMonitoring::initialized() { + return StackTraceStorage::storage()->initialized(); +} + +// Delegate the initialization question to the underlying storage system. +bool *HeapMonitoring::initialized_address() { + return + const_cast(StackTraceStorage::storage()->initialized_address()); +} + +void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_all_stack_traces(traces); +} + +void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) { + const jvmtiHeapSamplingStats& internal_stats = + StackTraceStorage::storage()->get_heap_sampling_stats(); + *stats = internal_stats; +} + +void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); +} + +void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_garbage_stack_traces(traces); +} + +void HeapMonitoring::release_traces(jvmtiStackTraces *traces) { + jint trace_count = traces->trace_count; + jvmtiStackTrace *stack_traces = traces->stack_traces; + + for (jint i = 0; i < trace_count; i++) { + jvmtiStackTrace *current_trace = stack_traces + i; + FREE_C_HEAP_ARRAY(jvmtiCallFrame, current_trace->frames); + } + + FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); + traces->trace_count = 0; + traces->stack_traces = NULL; +} + +// Invoked by the GC to clean up old stack traces and remove old arrays +// of instrumentation that are still lying around. +size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, + OopClosure *f) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + return StackTraceStorage::storage()->weak_oops_do(is_alive, f); +} + +void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) { + // Ignore if already enabled. + if (_enabled) { + return; + } + + _monitoring_rate = monitoring_rate; + + // Initalize and reset. + StackTraceStorage::initialize_stack_trace_storage(max_storage); + + // Populate the lookup table for fast_log2. + // This approximates the log2 curve with a step function. + // Steps have height equal to log2 of the mid-point of the step. + for (int i = 0; i < (1 << FastLogNumBits); i++) { + double half_way = static_cast(i + 0.5); + _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0)); + } + + JavaThread *t = static_cast(Thread::current()); + _rnd = static_cast(reinterpret_cast(t)); + if (_rnd == 0) { + _rnd = 1; + } + for (int i = 0; i < 20; i++) { + _rnd = next_random(_rnd); + } + + _enabled = true; +} + +void HeapMonitoring::stop_profiling() { + _enabled = false; +} + +// Generates a geometric variable with the specified mean (512K by default). +// This is done by generating a random number between 0 and 1 and applying +// the inverse cumulative distribution function for an exponential. +// Specifically: Let m be the inverse of the sample rate, then +// the probability distribution function is m*exp(-mx) so the CDF is +// p = 1 - exp(-mx), so +// q = 1 - p = exp(-mx) +// log_e(q) = -mx +// -log_e(q)/m = x +// log_2(q) * (-log_e(2) * 1/m) = x +// In the code, q is actually in the range 1 to 2**26, hence the -26 below +void HeapMonitoring::pick_next_sample(size_t *ptr) { + _rnd = next_random(_rnd); + // Take the top 26 bits as the random number + // (This plus a 1<<58 sampling bound gives a max possible step of + // 5194297183973780480 bytes. In this case, + // for sample_parameter = 1<<19, max possible step is + // 9448372 bytes (24 bits). + const uint64_t prng_mod_power = 48; // Number of bits in prng + // The uint32_t cast is to prevent a (hard-to-reproduce) NAN + // under piii debug for some binaries. + double q = static_cast(_rnd >> (prng_mod_power - 26)) + 1.0; + // Put the computed p-value through the CDF of a geometric. + // For faster performance (save ~1/20th exec time), replace + // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705) + // The value 26.000705 is used rather than 26 to compensate + // for inaccuracies in FastLog2 which otherwise result in a + // negative answer. + double log_val = (fast_log2(q) - 26); + size_t rate = static_cast( + (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1); + *ptr = rate; + + StackTraceStorage::storage()->accumulate_sample_rate(rate); +} + +// Called from the interpreter and C1 +void HeapMonitoring::object_alloc_unsized(oopDesc* o) { + JavaThread *thread = static_cast(Thread::current()); + object_alloc_do_sample(thread, o, o->size() << LogHeapWordSize); +} + +void HeapMonitoring::object_alloc(oopDesc* o, intx byte_size) { + JavaThread *thread = static_cast(Thread::current()); + assert(o->size() << LogHeapWordSize == static_cast(byte_size), + "Object size is incorrect."); + object_alloc_do_sample(thread, o, byte_size); +} + +// Called directly by C2 +void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { +#if defined(X86) || defined(PPC) + JavaThread *thread = static_cast(t); + if (StackTraceStorage::storage()->is_initialized()) { + assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); + JavaThread *thread = static_cast(t); + + jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); + if (trace == NULL) { + return; + } + + jvmtiCallFrame *frames = + NEW_C_HEAP_ARRAY(jvmtiCallFrame, MaxStackDepth, mtInternal); + + if (frames == NULL) { + FREE_C_HEAP_OBJ(trace); + return; + } + + trace->frames = frames; + trace->env_id = (JavaThread::current())->jni_environment(); + trace->thread_id = SharedRuntime::get_java_tid(thread); + trace->size = byte_size; + trace->frame_count = 0; + + if (thread->has_last_Java_frame()) { // just to be safe + vframeStream vfst(thread, true); + int count = 0; + while (!vfst.at_end() && count < MaxStackDepth) { + Method* m = vfst.method(); + frames[count].bci = vfst.bci(); + frames[count].method_id = m->jmethod_id(); + count++; + + vfst.next(); + } + trace->frame_count = count; + } + + if (trace->frame_count> 0) { + // Success! + StackTraceStorage::storage()->add_trace(trace, o); + return; + } + + // Failure! + FREE_C_HEAP_ARRAY(jvmtiCallFrame, trace->frames); + FREE_C_HEAP_OBJ(trace); + return; + } else { + // There is something like 64K worth of allocation before the VM + // initializes. This is just in the interests of not slowing down + // startup. + assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); + } +#else + Unimplemented(); +#endif +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/src/share/vm/runtime/heapMonitoring.hpp 2017-06-27 14:19:11.248514929 -0700 @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP +#define SHARE_VM_RUNTIME_HEAPMONITORING_HPP + +#include "gc/shared/referenceProcessor.hpp" +#include "runtime/sharedRuntime.hpp" + +// Support class for sampling heap allocations across the VM. +class HeapMonitoring : AllStatic { + private: + // Cheap random number generator + static uint64_t _rnd; + static bool _initialized; + static jint _monitoring_rate; + static bool _enabled; + + // Statics for the fast log + static const int FastLogNumBits = 10; + static const int FastLogMask = (1 << FastLogNumBits) - 1; + static double _log_table[1<(0)) << prng_mod_power); + return (PrngMult * rnd + prng_add) & prng_mod_mask; + } + + static inline double fast_log2(const double & d) { + assert(d>0, "bad value passed to assert"); + uint64_t x = 0; + memcpy(&x, &d, sizeof(uint64_t)); + const uint32_t x_high = x >> 32; + const uint32_t y = x_high >> (20 - FastLogNumBits) & FastLogMask; + const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023; + return exponent + _log_table[y]; + } + + public: + static void pick_next_sample(size_t *ptr); + + static void get_live_traces(jvmtiStackTraces* stack_traces); + static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); + static void get_garbage_traces(jvmtiStackTraces* stack_traces); + static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); + static void release_traces(jvmtiStackTraces *trace_info); + static void initialize_profiling(jint monitoring_rate, jint max_storage); + static void stop_profiling(); + static bool initialized(); + static bool *initialized_address(); + + // Called when o is allocated, called by interpreter and C1. + static void object_alloc_unsized(oopDesc* o); + static void object_alloc(oopDesc* o, intx byte_size); + + // Called when o is allocated from C2 directly, + // we know the thread, and we have done the sampling. + static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes); + + // Called to clean up oops that have been saved by our sampling function, + // but which no longer have other references in the heap. + static size_t weak_oops_do(BoolObjectClosure* is_alive, + OopClosure *f); + static size_t weak_oops_do(OopClosure* oop_closure) { + return weak_oops_do(&_always_true, oop_closure); + } + + static bool enabled() { + return _enabled; + } +}; + +#endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/MyPackage/Frame.java 2017-06-27 14:19:11.584513743 -0700 @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +class Frame { + Frame(String method, String signature, String fileName, int lineNumber) { + this.method = method; + this.signature = signature; + this.fileName = fileName; + this.lineNumber = lineNumber; + } + + public String method; + public String signature; + public String fileName; + public int lineNumber; +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java 2017-06-27 14:19:11.876512712 -0700 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Checks the frequent garbage storage system. + * @build Frame + * @compile HeapMonitorFrequentTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorFrequentTest + */ + +import java.io.PrintStream; + +public class HeapMonitorFrequentTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkFrequentFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void runner(int max) { + int sum = 0; + for (int j = 0; j < max; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 60); + frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 71); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 85); + + enableSampling(); + // We are testing for the recent garbage sampler: + // First run for 10000 iterations to fill up the garbage sampler. + runner(10000); + + // Now because we are in a different stack frame line here, we can just re-use the same runner. + // Run for 3, we really should not see that many of these and most should be the first type. + runner(5000); + + // Both types should exist in frequent since it was frequent enough. + int status = checkFrequentFrames(frames); + if (status == 0) { + throw new RuntimeException("Old frames no longer exist"); + } + + // Change the last frame only since the rest is identical. + frames[2].lineNumber = 89; + + status = checkFrequentFrames(frames); + if (status == 0) { + throw new RuntimeException("New frames not in the frequent sampling list"); + } + } +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java 2017-06-27 14:19:12.240511428 -0700 @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor API + * @build Frame + * @compile HeapMonitorNoCapabilityTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorNoCapabilityTest + */ + +import java.io.PrintStream; + +public class HeapMonitorNoCapabilityTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int allSamplingMethodsFail(); + + public static void main(String[] args) { + + int result = allSamplingMethodsFail(); + + if (result == 0) { + throw new RuntimeException("Some methods could be called without a capability."); + } + } +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java 2017-06-27 14:19:12.532510397 -0700 @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies if turning on/off/on the monitor wipes out the information. + * @build Frame + * @compile HeapMonitorOnOffTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorOnOffTest + */ + +import java.io.PrintStream; + +public class HeapMonitorOnOffTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + native static int checkFrames(Frame[] frames); + native static int checkWipeOut(Frame[] frames); + native static int enableSampling(); + native static int disableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorOnOffTest.java", 53); + frames[1] = new Frame("wrapper", "()V", "HeapMonitorOnOffTest.java", 64); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorOnOffTest.java", 86); + + // Enable sampling and allocate. + enableSampling(); + wrapper(); + + // Now disable and re-enable. + disableSampling(); + + // Check that the data is still there: this allows to peruse samples after profiling. + int status = checkFrames(frames); + if (status != 0) { + throw new RuntimeException("Failed to find the traces before the wipe out."); + } + + // Enabling the sampling should wipe everything out. + enableSampling(); + + status = checkWipeOut(frames); + if (status != 0) { + throw new RuntimeException("Failed to wipe out the information."); + } + } +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java 2017-06-27 14:19:12.848509281 -0700 @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Checks the Recent garbage storage system. + * @build Frame + * @compile HeapMonitorRecentTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorRecentTest + */ + +import java.io.PrintStream; + +public class HeapMonitorRecentTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkLiveOrRecentFrames(Frame[] frames); + native static int checkLiveAndRecentFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void runner(int max) { + int sum = 0; + for (int j = 0; j < max; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 61); + frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 72); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 86); + + enableSampling(); + // We are testing for the recent garbage sampler: + // First run for 10000 iterations to fill up the garbage sampler. + runner(10000); + + // Now because we are in a different stack frame line here, we can just re-use the same runner. + // Run for 3, we really should not see that many of these and most should be the first type. + runner(5000); + + // We should no longer have the initial frames. + int status = checkLiveOrRecentFrames(frames); + if (status != 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + + // Change the last frame only since the rest is identical. + frames[2].lineNumber = 90; + + // We should see those new frames. + status = checkLiveAndRecentFrames(frames); + if (status == 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + } +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatTest.java 2017-06-27 14:19:13.124508306 -0700 @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics + * @build Frame + * @compile HeapMonitorStatTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatTest + */ + +import java.io.PrintStream; + +public class HeapMonitorStatTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int statsNull(); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + + if (statsNull() == 0) { + throw new RuntimeException("Statistics should be null to begin with."); + } + + enableSampling(); + wrapper(); + + if (statsNull() != 0) { + throw new RuntimeException("Statistics should not be null now."); + } + } +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java 2017-06-27 14:19:13.464507106 -0700 @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor API + * @build Frame + * @compile HeapMonitorTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorTest + */ + +import java.io.PrintStream; + +public class HeapMonitorTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorTest.java", 60); + frames[1] = new Frame("wrapper", "()V", "HeapMonitorTest.java", 71); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorTest.java", 83); + + enableSampling(); + wrapper(); + + int status = checkFrames(frames); + if (status != 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + } +} --- /dev/null 2017-05-01 09:42:45.355096588 -0700 +++ new/test/serviceability/jvmti/HeapMonitor/libHeapMonitor.c 2017-06-27 14:19:13.796505933 -0700 @@ -0,0 +1,642 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include "jvmti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef JNI_ENV_ARG + +#ifdef __cplusplus +#define JNI_ENV_ARG(x, y) y +#define JNI_ENV_PTR(x) x +#else +#define JNI_ENV_ARG(x,y) x, y +#define JNI_ENV_PTR(x) (*x) +#endif + +#endif + +#define PASSED 0 +#define FAILED 2 + +#define MAX_TRACES 400 + +static const char *EXC_CNAME = "java/lang/Exception"; +static jvmtiEnv *jvmti = NULL; + +static int check_error(jvmtiError err, const char* s) { + if (err != JVMTI_ERROR_NONE) { + printf(" ## %s error: %d\n", s, err); + return 1; + } + return 0; +} + +static int check_capability_error(jvmtiError err, const char* s) { + if (err != JVMTI_ERROR_NONE) { + if (err == JVMTI_ERROR_MUST_POSSESS_CAPABILITY) { + return 0; + } + printf(" ## %s error: %d\n", s, err); + return 1; + } + return 1; +} + +static +jint throw_exc(JNIEnv *env, char *msg) { + jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME)); + + if (exc_class == NULL) { + printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME); + return -1; + } + return JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg); +} + +static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved); + +JNIEXPORT +jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { + return JNI_VERSION_1_8; +} + +JNIEXPORT void JNICALL OnVMInit(jvmtiEnv *jvmti, JNIEnv *jni_env, jthread thread) { +} + +JNIEXPORT void JNICALL OnClassLoad(jvmtiEnv *jvmti_env, JNIEnv *jni_env, + jthread thread, jclass klass) { + // NOP. +} + +JNIEXPORT void JNICALL OnClassPrepare(jvmtiEnv *jvmti_env, JNIEnv *jni_env, + jthread thread, jclass klass) { + // We need to do this to "prime the pump", as it were -- make sure + // that all of the methodIDs have been initialized internally, for + // AsyncGetCallTrace. + jint method_count; + jmethodID *methods = 0; + jvmtiError err = (*jvmti)->GetClassMethods(jvmti, klass, &method_count, &methods); + if ((err != JVMTI_ERROR_NONE) && (err != JVMTI_ERROR_CLASS_NOT_PREPARED)) { + // JVMTI_ERROR_CLASS_NOT_PREPARED is okay because some classes may + // be loaded but not prepared at this point. + throw_exc(jni_env, "Failed to create method IDs for methods in class\n"); + } +} + +static +jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { + jint res; + + res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti), + JVMTI_VERSION_9); + if (res != JNI_OK || jvmti == NULL) { + printf(" Error: wrong result of a valid call to GetEnv!\n"); + return JNI_ERR; + } + + jvmtiEventCallbacks callbacks; + memset(&callbacks, 0, sizeof(callbacks)); + + callbacks.VMInit = &OnVMInit; + callbacks.ClassLoad = &OnClassLoad; + callbacks.ClassPrepare = &OnClassPrepare; + + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + // Get line numbers, sample heap, and filename for the test. + caps.can_get_line_numbers = 1; + caps.can_sample_heap= 1; + caps.can_get_source_file_name = 1; + if (check_error((*jvmti)->AddCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return JNI_ERR; + } + + if (check_error((*jvmti)->SetEventCallbacks(jvmti, &callbacks, + sizeof(jvmtiEventCallbacks)), + " Set Event Callbacks")) { + return JNI_ERR; + } + if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_VM_INIT, NULL), + "Set Event for VM Init")) { + return JNI_ERR; + } + if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_CLASS_LOAD, NULL), + "Set Event for Class Load")) { + return JNI_ERR; + } + if (check_error( (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_CLASS_PREPARE, NULL), + "Set Event for Class Prepare")) { + return JNI_ERR; + } + + return JNI_OK; +} + +// Given a method and a location, this method gets the line number. +// Kind of expensive, comparatively. +static +jint get_line_number(jvmtiEnv *jvmti, jmethodID method, jlocation location) { + // The location is -1 if the bci isn't known or -3 for a native method. + if (location == -1 || location == -3) { + return -1; + } + + // Read the line number table. + jvmtiLineNumberEntry *table_ptr = 0; + jint line_number_table_entries; + int jvmti_error = (*jvmti)->GetLineNumberTable(jvmti, method, + &line_number_table_entries, + &table_ptr); + + if (JVMTI_ERROR_NONE != jvmti_error) { + return -1; + } + if (line_number_table_entries <= 0) { + return -1; + } + if (line_number_table_entries == 1) { + return table_ptr[0].line_number; + } + + // Go through all the line numbers... + jint last_location = table_ptr[0].start_location; + int l; + for (l = 1; l < line_number_table_entries; l++) { + // ... and if you see one that is in the right place for your + // location, you've found the line number! + if ((location < table_ptr[l].start_location) && + (location >= last_location)) { + return table_ptr[l - 1].line_number; + } + last_location = table_ptr[l].start_location; + } + + if (location >= last_location) { + return table_ptr[line_number_table_entries - 1].line_number; + } else { + return -1; + } +} + +typedef struct _ExpectedContentFrame { + const char *name; + const char *signature; + const char *file_name; + int line_number; +} ExpectedContentFrame; + +static jint check_sample_content(JNIEnv *env, + jvmtiStackTrace *trace, + ExpectedContentFrame *expected, + int expected_count) { + int i; + + if (expected_count > trace->frame_count) { + return 0; + } + + for (i = 0; i < expected_count; i++) { + // Get basic information out of the trace. + int bci = trace->frames[i].bci; + jmethodID methodid = trace->frames[i].method_id; + char *name = NULL, *signature = NULL, *file_name = NULL; + + if (bci < 0) { + return 0; + } + + // Transform into usable information. + int line_number = get_line_number(jvmti, methodid, bci); + (*jvmti)->GetMethodName(jvmti, methodid, &name, &signature, 0); + + jclass declaring_class; + if (JVMTI_ERROR_NONE != + (*jvmti)->GetMethodDeclaringClass(jvmti, methodid, &declaring_class)) { + return 0; + } + + jvmtiError err = (*jvmti)->GetSourceFileName(jvmti, declaring_class, + &file_name); + if (err != JVMTI_ERROR_NONE) { + return 0; + } + + // Compare now, none should be NULL. + if (name == NULL) { + return 0; + } + + if (file_name == NULL) { + return 0; + } + + if (signature == NULL) { + return 0; + } + + if (strcmp(name, expected[i].name) || + strcmp(signature, expected[i].signature) || + strcmp(file_name, expected[i].file_name) || + line_number != expected[i].line_number) { + return 0; + } + } + + return 1; +} + +static jint compare_samples(JNIEnv* env, jvmtiStackTrace* traces, int trace_count, + ExpectedContentFrame* expected_content, size_t size) { + // We expect the code to record correctly the bci, retrieve the line + // number, have the right method and the class name of the first frames. + int i; + for (i = 0; i < trace_count; i++) { + jvmtiStackTrace *trace = traces + i; + if (check_sample_content(env, trace, expected_content, size)) { + // At least one frame matched what we were looking for. + return 1; + } + } + + return 0; +} + +static jint check_samples(JNIEnv* env, ExpectedContentFrame* expected, + size_t size, + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*)) { + jvmtiStackTraces traces; + jvmtiError error = get_traces(jvmti, &traces); + + if (error != JVMTI_ERROR_NONE) { + return 0; + } + + int result = compare_samples(env, traces.stack_traces, traces.trace_count, + expected, size); + (*jvmti)->ReleaseTraces(jvmti, &traces); + return result; +} + +static jint frames_exist_live(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetLiveTraces); +} + +static jint frames_exist_recent(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetGarbageTraces); +} + +static jint frames_exist_frequent(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetFrequentGarbageTraces); +} + +// Static native API for various tests. +static void fill_native_frames(JNIEnv* env, jobjectArray frames, + ExpectedContentFrame* native_frames, size_t size) { + size_t i; + for(i = 0; i < size; i++) { + jobject obj = (*env)->GetObjectArrayElement(env, frames, i); + jclass frame_class = (*env)->GetObjectClass(env, obj); + jfieldID line_number_field_id = (*env)->GetFieldID(env, frame_class, "lineNumber", "I"); + int line_number = (*env)->GetIntField(env, obj, line_number_field_id); + + jfieldID string_id = (*env)->GetFieldID(env, frame_class, "method", "Ljava/lang/String;"); + jstring string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* method = (*env)->GetStringUTFChars(env, string_object, 0); + + string_id = (*env)->GetFieldID(env, frame_class, "fileName", "Ljava/lang/String;"); + string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* file_name = (*env)->GetStringUTFChars(env, string_object, 0); + + string_id = (*env)->GetFieldID(env, frame_class, "signature", "Ljava/lang/String;"); + string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* signature= (*env)->GetStringUTFChars(env, string_object, 0); + + native_frames[i].name = method; + native_frames[i].file_name = file_name; + native_frames[i].signature = signature; + native_frames[i].line_number = line_number; + } +} + +static jint checkAnd(JNIEnv *env, jobjectArray frames, int live, int recent, + int frequent) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + int result = 1; + + if (live) { + result = frames_exist_live(env, native_frames, size); + } + + if (recent) { + result = result && + frames_exist_recent(env, native_frames, size); + } + + if (frequent) { + result = result && + frames_exist_frequent(env, native_frames, size); + } + + return result; +} + +static jint checkOr(JNIEnv *env, jobjectArray frames, int live, int recent, + int frequent) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + int result = 0; + + if (live) { + result = frames_exist_live(env, native_frames, size); + } + + if (recent) { + result = result || + frames_exist_recent(env, native_frames, size); + } + + if (frequent) { + result = result || + frames_exist_frequent(env, native_frames, size); + } + + return result; +} + +static jint checkAll(JNIEnv *env, jobjectArray frames) { + return checkAnd(env, frames, 1, 1, 1); +} + +static jint checkNone(JNIEnv *env, jobjectArray frames) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + if ((!frames_exist_live(env, native_frames, size)) && + (!frames_exist_recent(env, native_frames, size)) && + (!frames_exist_frequent(env, native_frames, size))) { + return 1; + } + return 0; +} + +static void enable_sampling() { + check_error((*jvmti)->StartHeapSampling(jvmti, 1<<19, MAX_TRACES), + "Start Heap Sampling"); +} + +static void disable_sampling() { + check_error((*jvmti)->StopHeapSampling(jvmti), "Stop Heap Sampling"); +} + +// HeapMonitorTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +// HeapMonitorOnOffTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorOnOffTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorOnOffTest_checkWipeOut(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in none of the parts. + if (!checkNone(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorOnOffTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorOnOffTest_disableSampling(JNIEnv *env, jclass cls) { + disable_sampling(); +} + +// HeapMonitorRecentTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkLiveOrRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkOr(env, frames, 1, 1, 0)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkLiveAndRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkAnd(env, frames, 1, 1, 0)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorRecentTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +// HeapMonitorFrequentTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorFrequentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorFrequentTest_checkFrequentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkAnd(env, frames, 0, 0, 1)) { + return PASSED; + } + return FAILED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorFrequentTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorNoCapabilityTest_allSamplingMethodsFail(JNIEnv *env, jclass cls) { + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + caps.can_sample_heap= 1; + if (check_error((*jvmti)->RelinquishCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return 0; + } + + if (check_capability_error((*jvmti)->StartHeapSampling(jvmti, 1<<19, + MAX_TRACES), + "Start Heap Sampling")) { + return 0; + } + + if (check_capability_error((*jvmti)->StopHeapSampling(jvmti), + "Stop Heap Sampling")) { + return 0; + } + + if (check_capability_error((*jvmti)->ReleaseTraces(jvmti, NULL), + "Release Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetHeapSamplingStats(jvmti, NULL), + "Get Heap Sampling Stats")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL), + "Get Garbage Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL), + "Get Frequent Garbage Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetLiveTraces(jvmti, NULL), + "Get Live Traces")) { + return 0; + } + + // Calling enable sampling should fail now. + return 1; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorStatTest_statsNull(JNIEnv *env, jclass cls) { + jvmtiHeapSamplingStats stats; + check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), + "Heap Sampling Statistics"); + + jvmtiHeapSamplingStats zero; + memset(&zero, 0, sizeof(zero)); + return memcmp(&stats, &zero, sizeof(zero)) == 0; +} + +#ifdef __cplusplus +} +#endif