# HG changeset patch # User Jean Christophe Beyler # Date 1509400864 25200 # Mon Oct 30 15:01:04 2017 -0700 # Node ID de50da53f850e083ccce842ed4c719b56470c4d4 # Parent f43576cfb273dbd23544e0037aaac7b3ce8406ad [mq]: heap8 diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk --- a/make/test/JtregNativeHotspot.gmk +++ b/make/test/JtregNativeHotspot.gmk @@ -70,6 +70,7 @@ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorInfo \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetOwnedMonitorStackDepthInfo \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/GetNamedModule \ + $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/IsModifiableModule \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/AddModuleReads \ $(TOPDIR)/test/hotspot/jtreg/serviceability/jvmti/AddModuleExportsAndOpens \ diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -38,6 +38,7 @@ #include "memory/resourceArea.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/init.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" @@ -289,7 +290,46 @@ } #endif +HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) { + // We can come here for three reasons: + // - We either really did fill the tlab. + // - We pretended to everyone we did and we want to sample. + // - Both of the above reasons are true at the same time. + if (HeapMonitoring::enabled()) { + if (thread->tlab().should_sample()) { + HeapWord *end = thread->tlab().end(); + thread->tlab().set_back_actual_end(); + + // If we don't have an object yet, try to allocate it. + if (obj == NULL) { + // The tlab could still have space after this sample. + obj = thread->tlab().allocate(size); + } + + // Is the object allocated now? + // If not, this means we have to wait till a new TLAB, let the subsequent + // call to handle_heap_sampling pick the next sample. + if (obj != NULL) { + // Object is allocated, sample it now. + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast(obj), + size * HeapWordSize); + // Pick a next sample in this case, we allocated right. + thread->tlab().pick_next_sample(thread->tlab().top() - end); + } + } + } + + return obj; +} + HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { + HeapWord* obj = handle_heap_sampling(thread, NULL, size); + bool should_sample = thread->tlab().should_sample(); + + if (obj != NULL) { + return obj; + } // Retain tlab and allocate object in shared space if // the amount free in the tlab is too large to discard. @@ -309,7 +349,7 @@ } // Allocate a new TLAB... - HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); + obj = Universe::heap()->allocate_new_tlab(new_tlab_size); if (obj == NULL) { return NULL; } @@ -330,7 +370,12 @@ #endif // ASSERT } thread->tlab().fill(obj, obj + size, new_tlab_size); - return obj; + + if (should_sample) { + return handle_heap_sampling(thread, obj, size); + } else { + return obj; + } } void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -149,6 +149,9 @@ inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size); + // Handle if needed heap sampling. + static HeapWord* handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size); + // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. inline static HeapWord* common_mem_allocate_noinit(Klass* klass, size_t size, TRAPS); diff --git a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp @@ -158,6 +158,7 @@ AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD); + THREAD->tlab().handle_sample(THREAD, result, size); return result; } diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -29,6 +29,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.inline.hpp" #include "oops/oop.inline.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/thread.inline.hpp" #include "runtime/threadSMR.hpp" #include "utilities/copy.hpp" @@ -122,10 +123,13 @@ set_top(NULL); set_pf_top(NULL); set_end(NULL); + set_actual_end(NULL); + set_slow_path_end(NULL); } } assert(!(retire || ZeroTLAB) || - (start() == NULL && end() == NULL && top() == NULL), + (start() == NULL && end() == NULL && top() == NULL && + actual_end() == NULL && slow_path_end() == NULL), "TLAB must be reset"); } @@ -171,8 +175,21 @@ _number_of_refills++; print_stats("fill"); assert(top <= start + new_size - alignment_reserve(), "size too small"); + + // Remember old bytes until sample for the next tlab only if this is our first + // actual refill. + size_t old_bytes_until_sample = 0; + if (_number_of_refills > 1) { + old_bytes_until_sample = bytes_until_sample(); + } + initialize(start, top, start + new_size - alignment_reserve()); + if (old_bytes_until_sample > 0) { + set_bytes_until_sample(old_bytes_until_sample); + set_sample_end(); + } + // Reset amount of internal fragmentation set_refill_waste_limit(initial_refill_waste_limit()); } @@ -184,7 +201,10 @@ set_top(top); set_pf_top(top); set_end(end); + set_actual_end(end); + set_slow_path_end(end); invariants(); + _bytes_until_sample = 0; } void ThreadLocalAllocBuffer::initialize() { @@ -306,13 +326,99 @@ guarantee(p == top(), "end of last object must match end of space"); } +void ThreadLocalAllocBuffer::set_sample_end() { + size_t heap_words_remaining = _end - _top; + size_t bytes_left = bytes_until_sample(); + size_t words_until_sample = bytes_left / HeapWordSize; + + if (heap_words_remaining > words_until_sample) { + HeapWord* new_end = _top + words_until_sample; + set_end(new_end); + set_slow_path_end(new_end); + set_bytes_until_sample(0); + } else { + bytes_left -= heap_words_remaining * HeapWordSize; + set_bytes_until_sample(bytes_left); + } +} + +void ThreadLocalAllocBuffer::pick_next_sample(size_t diff) { + if (!HeapMonitoring::enabled()) { + return; + } + + if (bytes_until_sample() == 0) { + HeapMonitoring::pick_next_sample(bytes_until_sample_addr()); + } + + if (diff > 0) { + // Try to correct sample size by removing extra space from last allocation. + if (bytes_until_sample() > diff * HeapWordSize) { + set_bytes_until_sample(bytes_until_sample() - diff * HeapWordSize); + } + } + + set_sample_end(); + + log_trace(gc, tlab)("TLAB picked next sample: thread: " INTPTR_FORMAT " [id: %2d]" + " start: %p top: %p end: %p actual_end: %p slow_path_end: %p", + p2i(myThread()), myThread()->osthread()->thread_id(), + start(), top(), end(), + actual_end(), slow_path_end()); +} + Thread* ThreadLocalAllocBuffer::myThread() { return (Thread*)(((char *)this) + in_bytes(start_offset()) - in_bytes(Thread::tlab_start_offset())); } +void ThreadLocalAllocBuffer::set_back_actual_end() { + // Did a fast TLAB refill occur? + if (_slow_path_end != _end) { + // Fix up the actual end to be now the end of this TLAB. + _slow_path_end = _end; + _actual_end = _end; + } else { + _end = _actual_end; + } +} +void ThreadLocalAllocBuffer::handle_sample(Thread* thread, HeapWord* result, + size_t size) { + if (!HeapMonitoring::enabled()) { + return; + } + + size_t size_in_bytes = size * HeapWordSize; + if (bytes_until_sample() > size_in_bytes) { + set_bytes_until_sample(bytes_until_sample() - size_in_bytes); + } else { + // Technically this is not exactly right, we probably should remember how many bytes are + // negative probably to then reduce our next sample size. + set_bytes_until_sample(0); + } + + // Should we sample now? + if (should_sample()) { + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast(result), + size_in_bytes); + set_back_actual_end(); + pick_next_sample(); + } +} + +HeapWord* ThreadLocalAllocBuffer::hard_end() { + // Did a fast TLAB refill occur? + if (_slow_path_end != _end) { + // Fix up the actual end to be now the end of this TLAB. + _slow_path_end = _end; + _actual_end = _end; + } + + return _actual_end + alignment_reserve(); +} GlobalTLABStats::GlobalTLABStats() : _allocating_threads_avg(TLABAllocationWeight) { diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp @@ -37,6 +37,13 @@ // It is thread-private at any time, but maybe multiplexed over // time across multiple threads. The park()/unpark() pair is // used to make it available for such multiplexing. +// +// Heap sampling is performed via the end/actual_end fields. +// actual_end contains the real end of the tlab allocation, +// whereas end can be set to an arbitrary spot in the tlab to +// trip the return and sample the allocation. +// slow_path_end is used to track if a fast tlab refill occured +// between slowpath calls. class ThreadLocalAllocBuffer: public CHeapObj { friend class VMStructs; friend class JVMCIVMStructs; @@ -44,10 +51,15 @@ HeapWord* _start; // address of TLAB HeapWord* _top; // address after last allocation HeapWord* _pf_top; // allocation prefetch watermark - HeapWord* _end; // allocation end (excluding alignment_reserve) + HeapWord* _end; // allocation end (can be the sampling end point or + // the actual TLAB end, excluding alignment_reserve) + HeapWord* _actual_end; // allocation actual_end (actual TLAB end, excluding alignment_reserve) + HeapWord* _slow_path_end; // remember the end in case a fast refill occurs. + size_t _desired_size; // desired size (including alignment_reserve) size_t _refill_waste_limit; // hold onto tlab if free() is larger than this size_t _allocated_before_last_gc; // total bytes allocated up until the last gc + size_t _bytes_until_sample; // bytes until sample. static size_t _max_size; // maximum size of any TLAB static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB @@ -66,17 +78,20 @@ void set_start(HeapWord* start) { _start = start; } void set_end(HeapWord* end) { _end = end; } + void set_actual_end(HeapWord* actual_end) { _actual_end = actual_end; } + void set_slow_path_end(HeapWord* slow_path_end) { _slow_path_end = slow_path_end; } void set_top(HeapWord* top) { _top = top; } void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; } void set_desired_size(size_t desired_size) { _desired_size = desired_size; } void set_refill_waste_limit(size_t waste) { _refill_waste_limit = waste; } + void set_bytes_until_sample(size_t bytes) { _bytes_until_sample = bytes; } size_t initial_refill_waste_limit() { return desired_size() / TLABRefillWasteFraction; } static int target_refills() { return _target_refills; } size_t initial_desired_size(); - size_t remaining() const { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } + size_t remaining() { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } // Make parsable and release it. void reset(); @@ -115,7 +130,9 @@ HeapWord* start() const { return _start; } HeapWord* end() const { return _end; } - HeapWord* hard_end() const { return _end + alignment_reserve(); } + HeapWord* slow_path_end() const { return _slow_path_end; } + HeapWord* actual_end() const { return _actual_end; } + HeapWord* hard_end(); HeapWord* top() const { return _top; } HeapWord* pf_top() const { return _pf_top; } size_t desired_size() const { return _desired_size; } @@ -162,11 +179,20 @@ void fill(HeapWord* start, HeapWord* top, size_t new_size); void initialize(); + void pick_next_sample(size_t diff = 0); + void set_sample_end(); + void set_back_actual_end(); + void handle_sample(Thread* thread, HeapWord* result, size_t size); + size_t bytes_until_sample() { return _bytes_until_sample; } + size_t *bytes_until_sample_addr() { return &_bytes_until_sample; } + bool should_sample() { return bytes_until_sample() == 0; } + static size_t refill_waste_limit_increment() { return TLABWasteIncrement; } // Code generation support static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); } static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); } + static ByteSize actual_end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _actual_end ); } static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); } static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); } static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); } diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -10353,6 +10353,13 @@ See . + + + + Can sample the heap. + If this capability is enabled then the heap sampling methods can be called. + + @@ -11531,6 +11538,272 @@ + + + + + jvmtiFrameInfo + + Pointer to the call frames. + + + + The number of frames for the trace. + + + + The size of the object allocation. + + + + The thread id number. + + + + + + + jvmtiStackTrace + + + The array with the various stack traces. + + + + + + + Number of traces pointed by the array . + + + + + + + + + The number of sampled allocations during the lifetime of the sampler. + For very long sampling, this number can overflow. + + + + + + + The number of samples already garbage collected. + For very long sampling, this number can overflow. + + + + + + + Accumulation of the sample rates chosen. + For very long sampling, this number can overflow. + + + + + + + The number of sample rates chosen. + For very long sampling, this number can overflow. + + + + + + + Accumulation of stack depths collected by the sampler. + For very long sampling, this number can overflow. + + + + + + Start Heap Sampling + + Start the heap sampler in the JVM. The function provides, via its argument, the sampling + rate requested and will fill internal data structures with heap allocation samples. The + samples are obtained via the , + , , + functions. + + Starting the heap sampler resets internal traces and counters. Therefore stopping the sampler + puts internal trace samples and counters on pause for post-processing. + + new + + + + + + + + The monitoring rate used for sampling. The sampler will use a statistical approach to + provide in average sampling every allocated bytes. + + Note: a low monitoring rate will incur a higher overhead, therefore, the sampler should + only be used when knowing it may impact performance. + + + + + + The maximum storage used for the sampler. By default, the value is 200. + + + + + + is less than zero. + + + + + + Stop Heap Sampling + + Stop the heap sampler in the JVM. + Any sample obtained during sampling is still available via the , + , , + functions. + + Starting the heap sampler resets internal traces and counters. Therefore stopping the sampler + puts internal trace samples and counters on pause for post-processing. + + new + + + + + + + + + + + Get Live Traces + + Get Live Heap Sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns no traces. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Get Garbage Traces + + Get the recent garbage heap sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns no traces. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Get Frequent Garbage Traces + + Get the frequent garbage heap sampled traces. The fields of the + structure are filled in with details of the specified sampled allocation. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns no traces. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + + Release traces provided by the heap monitoring + + Release traces provided by any of the trace retrieval methods. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be released. + + + + + + + + + Get the heap sampling statistics + + Returns a to understand the heap sampling behavior and current + internal data storage status. + + This method can be called at any time but if the sampler has not been started via at least + one call to it returns a zeroed-out structure. + + new + + + + + + jvmtiHeapSamplingStats + + The structure to be filled with the heap sampler's statistics. + + + + + + + + diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -46,6 +46,7 @@ #include "prims/jvmtiCodeBlobEvents.hpp" #include "prims/jvmtiExtensions.hpp" #include "prims/jvmtiGetLoadedClasses.hpp" +#include "prims/jvmtiHeapTransition.hpp" #include "prims/jvmtiImpl.hpp" #include "prims/jvmtiManageCapabilities.hpp" #include "prims/jvmtiRawMonitor.hpp" @@ -55,6 +56,7 @@ #include "prims/jvmtiUtil.hpp" #include "runtime/arguments.hpp" #include "runtime/deoptimization.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/javaCalls.hpp" #include "runtime/jfieldIDWorkaround.hpp" @@ -2004,6 +2006,81 @@ return JVMTI_ERROR_NONE; } /* end IterateOverInstancesOfClass */ +// Start the sampler. +jvmtiError +JvmtiEnv::StartHeapSampling(jint monitoring_rate, jint max_storage) { + if (monitoring_rate < 0) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapThreadTransition htt(Thread::current()); + HeapMonitoring::initialize_profiling(monitoring_rate, max_storage); + return JVMTI_ERROR_NONE; +} /* end StartHeapSampling */ + +// Stop the sampler. +jvmtiError +JvmtiEnv::StopHeapSampling() { + HeapThreadTransition htt(Thread::current()); + HeapMonitoring::stop_profiling(); + return JVMTI_ERROR_NONE; +} /* end StopHeapSampling */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetLiveTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_live_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetLiveTraces */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetGarbageTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_garbage_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetGarbageTraces */ + +// Get the currently live sampled allocations. +jvmtiError +JvmtiEnv::GetFrequentGarbageTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_frequent_garbage_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetFrequentGarbageTraces */ + +// Release sampled traces. +jvmtiError +JvmtiEnv::ReleaseTraces(jvmtiStackTraces* stack_traces) { + if (stack_traces == NULL) { + return JVMTI_ERROR_NONE; + } + HeapMonitoring::release_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end ReleaseTraces */ + +// Get the heap sampling statistics. +jvmtiError +JvmtiEnv::GetHeapSamplingStats(jvmtiHeapSamplingStats* stats) { + if (stats == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + HeapMonitoring::get_sampling_statistics(stats); + return JVMTI_ERROR_NONE; +} /* end GetHeapSamplingStats */ // // Local Variable functions diff --git a/src/hotspot/share/prims/jvmtiHeapTransition.hpp b/src/hotspot/share/prims/jvmtiHeapTransition.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/prims/jvmtiHeapTransition.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP +#define SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP + +#include "runtime/interfaceSupport.hpp" + +// A RAII class that handles transitions from the agent into the VM. +class HeapThreadTransition : StackObj { + private: + JavaThreadState _saved_state; + JavaThread *_jthread; + + public: + // Transitions this thread from the agent (thread_in_native) to the VM. + HeapThreadTransition(Thread *thread) { + if (thread->is_Java_thread()) { + _jthread = static_cast(thread); + _saved_state = _jthread->thread_state(); + if (_saved_state == _thread_in_native) { + ThreadStateTransition::transition_from_native(_jthread, _thread_in_vm); + } else { + ThreadStateTransition::transition(_jthread, + _saved_state, + _thread_in_vm); + } + } else { + _jthread = NULL; + _saved_state = _thread_new; + } + } + + // Transitions this thread back to the agent from the VM. + ~HeapThreadTransition() { + if (_jthread != NULL) { + ThreadStateTransition::transition(_jthread, _thread_in_vm, _saved_state); + } + } +}; + +#endif // SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP diff --git a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp --- a/src/hotspot/share/prims/jvmtiManageCapabilities.cpp +++ b/src/hotspot/share/prims/jvmtiManageCapabilities.cpp @@ -157,6 +157,7 @@ jc.can_generate_field_modification_events = 1; jc.can_generate_field_access_events = 1; jc.can_generate_breakpoint_events = 1; + jc.can_sample_heap = 1; return jc; } @@ -423,6 +424,8 @@ log_trace(jvmti)("can_generate_frame_pop_events"); if (cap->can_generate_breakpoint_events) log_trace(jvmti)("can_generate_breakpoint_events"); + if (cap->can_sample_heap) + log_trace(jvmti)("can_sample_heap"); if (cap->can_suspend) log_trace(jvmti)("can_suspend"); if (cap->can_redefine_any_class ) diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -0,0 +1,721 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "gc/shared/collectedHeap.hpp" +#include "memory/universe.hpp" +#include "runtime/heapMonitoring.hpp" +#include "runtime/vframe.hpp" + +// DONE: +// merged printouts +// broke up the one-liner +// talk about synchro +// cleaned up old entry points for C1/interpreter +// add statistics per GC and log start up initialization. +// removed the null pointer check during the weak_oops_do walk +// cleaned up the task_executor +// fixed the compilation using the option --disable-precompiled-header + +static const int MaxStackDepth = 64; + +// Internal data structure representing traces. +struct StackTraceData : CHeapObj { + jvmtiStackTrace *trace; + oop obj; + int references; + + StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {} + + StackTraceData() : trace(NULL), obj(NULL), references(0) {} + + // StackTraceDatas are shared around the board between various lists. So + // handle this by hand instead of having this in the destructor. There are + // cases where the struct is on the stack but holding heap data not to be + // freed. + static void free_data(StackTraceData *data) { + if (data->trace != NULL) { + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames); + FREE_C_HEAP_OBJ(data->trace); + } + delete data; + } +}; + +// Fixed size buffer for holding garbage traces. +class GarbageTracesBuffer : public CHeapObj { + public: + GarbageTracesBuffer(uint32_t size) : _size(size) { + _garbage_traces = NEW_C_HEAP_ARRAY(StackTraceData*, + size, + mtInternal); + memset(_garbage_traces, 0, sizeof(StackTraceData*) * size); + } + + virtual ~GarbageTracesBuffer() { + FREE_C_HEAP_ARRAY(StackTraceData*, _garbage_traces); + } + + StackTraceData** get_traces() const { + return _garbage_traces; + } + + bool store_trace(StackTraceData *trace) { + uint32_t index; + if (!select_replacement(&index)) { + return false; + } + + StackTraceData *old_data = _garbage_traces[index]; + + if (old_data != NULL) { + old_data->references--; + + if (old_data->references == 0) { + StackTraceData::free_data(old_data); + } + } + + trace->references++; + _garbage_traces[index] = trace; + return true; + } + + uint32_t size() const { + return _size; + } + + protected: + // Subclasses select the trace to replace. Returns false if no replacement + // is to happen, otherwise stores the index of the trace to replace in + // *index. + virtual bool select_replacement(uint32_t *index) = 0; + + const uint32_t _size; + + private: + // The current garbage traces. A fixed-size ring buffer. + StackTraceData **_garbage_traces; +}; + +// Keep statistical sample of traces over the lifetime of the server. +// When the buffer is full, replace a random entry with probability +// 1/samples_seen. This strategy tends towards preserving the most frequently +// occuring traces over time. +class FrequentGarbageTraces : public GarbageTracesBuffer { + public: + FrequentGarbageTraces(int size) + : GarbageTracesBuffer(size), + _garbage_traces_pos(0), + _samples_seen(0) { + } + + virtual ~FrequentGarbageTraces() { + } + + virtual bool select_replacement(uint32_t* index) { + ++_samples_seen; + + if (_garbage_traces_pos < _size) { + *index = _garbage_traces_pos++; + return true; + } + + uint64_t random_uint64 = + (static_cast(::random()) << 32) | ::random(); + + uint32_t random_index = random_uint64 % _samples_seen; + if (random_index < _size) { + *index = random_index; + return true; + } + + return false; + } + + private: + // The current position in the buffer as we initially fill it. + uint32_t _garbage_traces_pos; + + uint64_t _samples_seen; +}; + +// Store most recent garbage traces. +class MostRecentGarbageTraces : public GarbageTracesBuffer { + public: + MostRecentGarbageTraces(int size) + : GarbageTracesBuffer(size), + _garbage_traces_pos(0) { + } + + virtual ~MostRecentGarbageTraces() { + } + + virtual bool select_replacement(uint32_t* index) { + *index = _garbage_traces_pos; + + _garbage_traces_pos = + (_garbage_traces_pos + 1) % _size; + + return true; + } + + private: + // The current position in the buffer. + uint32_t _garbage_traces_pos; +}; + +// Each object that we profile is stored as trace with the thread_id. +class StackTraceStorage : public CHeapObj { + public: + // The function that gets called to add a trace to the list of + // traces we are maintaining. + void add_trace(jvmtiStackTrace *trace, oop o); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_all_stack_traces(jvmtiStackTraces *traces); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_garbage_stack_traces(jvmtiStackTraces *traces); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces); + + // Executes whenever weak references are traversed. is_alive tells + // you if the given oop is still reachable and live. + size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); + + ~StackTraceStorage(); + StackTraceStorage(); + + static StackTraceStorage* storage() { + if (internal_storage == NULL) { + internal_storage = new StackTraceStorage(); + } + return internal_storage; + } + + static void reset_stack_trace_storage() { + delete internal_storage; + internal_storage = NULL; + } + + bool is_initialized() { + return _initialized; + } + + const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { + return _stats; + } + + // Static method to set the storage in place at initialization. + static void initialize_stack_trace_storage(int max_storage) { + reset_stack_trace_storage(); + StackTraceStorage *storage = StackTraceStorage::storage(); + storage->initialize_storage(max_storage); + } + + void accumulate_sample_rate(size_t rate) { + _stats.sample_rate_accumulation += rate; + _stats.sample_rate_count++; + } + + bool initialized() { return _initialized; } + volatile bool *initialized_address() { return &_initialized; } + + private: + // The traces currently sampled. + GrowableArray *_allocated_traces; + + // Recent garbage traces. + MostRecentGarbageTraces *_recent_garbage_traces; + + // Frequent garbage traces. + FrequentGarbageTraces *_frequent_garbage_traces; + + // Heap Sampling statistics. + jvmtiHeapSamplingStats _stats; + + // Maximum amount of storage provided by the JVMTI call initialize_profiling. + int _max_storage; + + static StackTraceStorage* internal_storage; + volatile bool _initialized; + + // Support functions and classes for copying data to the external + // world. + class StackTraceDataCopier { + public: + virtual int size() const = 0; + virtual const StackTraceData *get(uint32_t i) const = 0; + }; + + class LiveStackTraceDataCopier : public StackTraceDataCopier { + public: + LiveStackTraceDataCopier(GrowableArray *data) : + _data(data) {} + int size() const { return _data ? _data->length() : 0; } + const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); } + + private: + GrowableArray *_data; + }; + + class GarbageStackTraceDataCopier : public StackTraceDataCopier { + public: + GarbageStackTraceDataCopier(StackTraceData **data, int size) : + _data(data), _size(size) {} + int size() const { return _size; } + const StackTraceData *get(uint32_t i) const { return _data[i]; } + + private: + StackTraceData **_data; + int _size; + }; + + // Instance initialization. + void initialize_storage(int max_storage); + + // Copies from StackTraceData to jvmtiStackTrace. + bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); + + // Creates a deep copy of the list of StackTraceData. + void copy_stack_traces(const StackTraceDataCopier &copier, + jvmtiStackTraces *traces); + + void store_garbage_trace(const StackTraceData &trace); + + void free_garbage(); +}; + +StackTraceStorage* StackTraceStorage::internal_storage; + +// Statics for Sampler +double HeapMonitoring::_log_table[1 << FastLogNumBits]; +bool HeapMonitoring::_enabled; +AlwaysTrueClosure HeapMonitoring::_always_true; +jint HeapMonitoring::_monitoring_rate; + +// Cheap random number generator +uint64_t HeapMonitoring::_rnd; + +StackTraceStorage::StackTraceStorage() : + _allocated_traces(NULL), + _recent_garbage_traces(NULL), + _frequent_garbage_traces(NULL), + _max_storage(0), + _initialized(false) { + memset(&_stats, 0, sizeof(_stats)); +} + +void StackTraceStorage::free_garbage() { + StackTraceData **recent_garbage = NULL; + uint32_t recent_size = 0; + + StackTraceData **frequent_garbage = NULL; + uint32_t frequent_size = 0; + + if (_recent_garbage_traces != NULL) { + recent_garbage = _recent_garbage_traces->get_traces(); + recent_size = _recent_garbage_traces->size(); + } + + if (_frequent_garbage_traces != NULL) { + frequent_garbage = _frequent_garbage_traces->get_traces(); + frequent_size = _frequent_garbage_traces->size(); + } + + // Simple solution since this happens at exit. + // Go through the recent and remove any that only are referenced there. + for (uint32_t i = 0; i < recent_size; i++) { + StackTraceData *trace = recent_garbage[i]; + if (trace != NULL) { + trace->references--; + + if (trace->references == 0) { + StackTraceData::free_data(trace); + } + } + } + + // Then go through the frequent and remove those that are now only there. + for (uint32_t i = 0; i < frequent_size; i++) { + StackTraceData *trace = frequent_garbage[i]; + if (trace != NULL) { + trace->references--; + + if (trace->references == 0) { + StackTraceData::free_data(trace); + } + } + } +} + +StackTraceStorage::~StackTraceStorage() { + delete _allocated_traces; + + free_garbage(); + delete _recent_garbage_traces; + delete _frequent_garbage_traces; + _initialized = false; +} + +void StackTraceStorage::initialize_storage(int max_storage) { + // In case multiple threads got locked and then 1 by 1 got through. + if (_initialized) { + return; + } + + _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) + GrowableArray(128, true); + + _recent_garbage_traces = new MostRecentGarbageTraces(max_storage); + _frequent_garbage_traces = new FrequentGarbageTraces(max_storage); + + _max_storage = max_storage; + _initialized = true; +} + +void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { + StackTraceData new_data(trace, o); + _stats.sample_count++; + _stats.stack_depth_accumulation += trace->frame_count; + _allocated_traces->append(new_data); +} + +size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, + OopClosure *f) { + size_t count = 0; + if (is_initialized()) { + int len = _allocated_traces->length(); + + // Compact the oop traces. Moves the live oops to the beginning of the + // growable array, potentially overwriting the dead ones. + int curr_pos = 0; + for (int i = 0; i < len; i++) { + StackTraceData &trace = _allocated_traces->at(i); + oop value = trace.obj; + if (Universe::heap()->is_in_reserved(value) + && is_alive->do_object_b(value)) { + // Update the oop to point to the new object if it is still alive. + f->do_oop(&(trace.obj)); + + // Copy the old trace, if it is still live. + _allocated_traces->at_put(curr_pos++, trace); + + count++; + } else { + // If the old trace is no longer live, add it to the list of + // recently collected garbage. + store_garbage_trace(trace); + } + } + + // Zero out remaining array elements. Even though the call to trunc_to + // below truncates these values, zeroing them out is good practice. + StackTraceData zero_trace; + for (int i = curr_pos; i < len; i++) { + _allocated_traces->at_put(i, zero_trace); + } + + // Set the array's length to the number of live elements. + _allocated_traces->trunc_to(curr_pos); + } + + return count; +} + +bool StackTraceStorage::deep_copy(jvmtiStackTrace *to, + const StackTraceData *from) { + const jvmtiStackTrace *src = from->trace; + *to = *src; + + to->frames = + NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); + + if (to->frames == NULL) { + return false; + } + + memcpy(to->frames, + src->frames, + sizeof(jvmtiFrameInfo) * MaxStackDepth); + return true; +} + +// Called by the outside world; returns a copy of the stack traces +// (because we could be replacing them as the user handles them). +// The array is secretly null-terminated (to make it easier to reclaim). +void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) { + LiveStackTraceDataCopier copier(_allocated_traces); + copy_stack_traces(copier, traces); +} + +// See comment on get_all_stack_traces +void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) { + GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), + _recent_garbage_traces->size()); + copy_stack_traces(copier, traces); +} + +// See comment on get_all_stack_traces +void StackTraceStorage::get_frequent_garbage_stack_traces( + jvmtiStackTraces *traces) { + GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), + _frequent_garbage_traces->size()); + copy_stack_traces(copier, traces); +} + + +void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, + jvmtiStackTraces *traces) { + int len = copier.size(); + + // Create a new array to store the StackTraceData objects. + // + 1 for a NULL at the end. + jvmtiStackTrace *t = + NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); + if (t == NULL) { + traces->stack_traces = NULL; + traces->trace_count = 0; + return; + } + // +1 to have a NULL at the end of the array. + memset(t, 0, (len + 1) * sizeof(*t)); + + // Copy the StackTraceData objects into the new array. + int trace_count = 0; + for (int i = 0; i < len; i++) { + const StackTraceData *stack_trace = copier.get(i); + if (stack_trace != NULL) { + jvmtiStackTrace *to = &t[trace_count]; + if (!deep_copy(to, stack_trace)) { + continue; + } + trace_count++; + } + } + + traces->stack_traces = t; + traces->trace_count = trace_count; +} + +void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) { + StackTraceData *new_trace = new StackTraceData(); + *new_trace = trace; + + bool accepted = _recent_garbage_traces->store_trace(new_trace); + + // Accepted is on the right of the boolean to force the store_trace to happen. + accepted = _frequent_garbage_traces->store_trace(new_trace) || accepted; + + if (!accepted) { + // No one wanted to use it. + delete new_trace; + } + + _stats.garbage_collected_samples++; +} + +// Delegate the initialization question to the underlying storage system. +bool HeapMonitoring::initialized() { + return StackTraceStorage::storage()->initialized(); +} + +// Delegate the initialization question to the underlying storage system. +bool *HeapMonitoring::initialized_address() { + return + const_cast(StackTraceStorage::storage()->initialized_address()); +} + +void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_all_stack_traces(traces); +} + +void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) { + const jvmtiHeapSamplingStats& internal_stats = + StackTraceStorage::storage()->get_heap_sampling_stats(); + *stats = internal_stats; +} + +void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); +} + +void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_garbage_stack_traces(traces); +} + +void HeapMonitoring::release_traces(jvmtiStackTraces *traces) { + jint trace_count = traces->trace_count; + jvmtiStackTrace *stack_traces = traces->stack_traces; + + for (jint i = 0; i < trace_count; i++) { + jvmtiStackTrace *current_trace = stack_traces + i; + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); + } + + FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); + traces->trace_count = 0; + traces->stack_traces = NULL; +} + +// Invoked by the GC to clean up old stack traces and remove old arrays +// of instrumentation that are still lying around. +size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, + OopClosure *f) { + assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + return StackTraceStorage::storage()->weak_oops_do(is_alive, f); +} + +void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) { + // Ignore if already enabled. + if (_enabled) { + return; + } + + _monitoring_rate = monitoring_rate; + + // Initalize and reset. + StackTraceStorage::initialize_stack_trace_storage(max_storage); + + // Populate the lookup table for fast_log2. + // This approximates the log2 curve with a step function. + // Steps have height equal to log2 of the mid-point of the step. + for (int i = 0; i < (1 << FastLogNumBits); i++) { + double half_way = static_cast(i + 0.5); + _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0)); + } + + JavaThread *t = static_cast(Thread::current()); + _rnd = static_cast(reinterpret_cast(t)); + if (_rnd == 0) { + _rnd = 1; + } + _enabled = true; +} + +void HeapMonitoring::stop_profiling() { + _enabled = false; +} + +// Generates a geometric variable with the specified mean (512K by default). +// This is done by generating a random number between 0 and 1 and applying +// the inverse cumulative distribution function for an exponential. +// Specifically: Let m be the inverse of the sample rate, then +// the probability distribution function is m*exp(-mx) so the CDF is +// p = 1 - exp(-mx), so +// q = 1 - p = exp(-mx) +// log_e(q) = -mx +// -log_e(q)/m = x +// log_2(q) * (-log_e(2) * 1/m) = x +// In the code, q is actually in the range 1 to 2**26, hence the -26 below +void HeapMonitoring::pick_next_sample(size_t *ptr) { + _rnd = next_random(_rnd); + // Take the top 26 bits as the random number + // (This plus a 1<<58 sampling bound gives a max possible step of + // 5194297183973780480 bytes. In this case, + // for sample_parameter = 1<<19, max possible step is + // 9448372 bytes (24 bits). + const uint64_t prng_mod_power = 48; // Number of bits in prng + // The uint32_t cast is to prevent a (hard-to-reproduce) NAN + // under piii debug for some binaries. + double q = static_cast(_rnd >> (prng_mod_power - 26)) + 1.0; + // Put the computed p-value through the CDF of a geometric. + // For faster performance (save ~1/20th exec time), replace + // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705) + // The value 26.000705 is used rather than 26 to compensate + // for inaccuracies in FastLog2 which otherwise result in a + // negative answer. + double log_val = (fast_log2(q) - 26); + size_t rate = static_cast( + (0.0 < log_val ? 0.0 : log_val) * (-log(2.0) * (_monitoring_rate)) + 1); + *ptr = rate; + + StackTraceStorage::storage()->accumulate_sample_rate(rate); +} + +void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { +#if defined(X86) || defined(PPC) + JavaThread *thread = static_cast(t); + if (StackTraceStorage::storage()->is_initialized()) { + assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); + JavaThread *thread = static_cast(t); + + jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); + if (trace == NULL) { + return; + } + + jvmtiFrameInfo *frames = + NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); + + if (frames == NULL) { + FREE_C_HEAP_OBJ(trace); + return; + } + + trace->frames = frames; + trace->thread_id = SharedRuntime::get_java_tid(thread); + trace->size = byte_size; + trace->frame_count = 0; + + if (thread->has_last_Java_frame()) { // just to be safe + vframeStream vfst(thread, true); + int count = 0; + while (!vfst.at_end() && count < MaxStackDepth) { + Method* m = vfst.method(); + frames[count].location = vfst.bci(); + frames[count].method = m->jmethod_id(); + count++; + + vfst.next(); + } + trace->frame_count = count; + } + + if (trace->frame_count> 0) { + // Success! + StackTraceStorage::storage()->add_trace(trace, o); + return; + } + + // Failure! + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); + FREE_C_HEAP_OBJ(trace); + return; + } else { + // There is something like 64K worth of allocation before the VM + // initializes. This is just in the interests of not slowing down + // startup. + assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); + } +#else + Unimplemented(); +#endif +} diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp new file mode 100644 --- /dev/null +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP +#define SHARE_VM_RUNTIME_HEAPMONITORING_HPP + +#include "gc/shared/referenceProcessor.hpp" +#include "runtime/sharedRuntime.hpp" + +// Support class for sampling heap allocations across the VM. +class HeapMonitoring : AllStatic { + private: + // Cheap random number generator + static uint64_t _rnd; + static bool _initialized; + static jint _monitoring_rate; + static bool _enabled; + + // Statics for the fast log + static const int FastLogNumBits = 10; + static const int FastLogMask = (1 << FastLogNumBits) - 1; + static double _log_table[1<(0)) << prng_mod_power); + return (PrngMult * rnd + prng_add) & prng_mod_mask; + } + + static inline double fast_log2(const double & d) { + assert(d>0, "bad value passed to assert"); + uint64_t x = 0; + memcpy(&x, &d, sizeof(uint64_t)); + const uint32_t x_high = x >> 32; + const uint32_t y = x_high >> (20 - FastLogNumBits) & FastLogMask; + const int32_t exponent = ((x_high >> 20) & 0x7FF) - 1023; + return exponent + _log_table[y]; + } + + public: + /* + * General note: currently none of these methods are deemed thread-safe. + */ + + // First method called by user to start the profiler: + // - Note: the lower the monitoring rate, the higher the overhead incurred. + static void initialize_profiling(jint monitoring_rate, jint max_storage); + + // Pick the next sample for a given size_t pointer using a geometric variable + // with specified mean. The specified mean is provided via the + // initialize_profiling method. + static void pick_next_sample(size_t *ptr); + + // Get live/garbage traces and provide a method to release the traces. + static void get_live_traces(jvmtiStackTraces* stack_traces); + static void get_garbage_traces(jvmtiStackTraces* stack_traces); + static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); + static void release_traces(jvmtiStackTraces *trace_info); + + static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); + static void stop_profiling(); + + // Is the profiler initialized and where is the address to the initialized + // boolean. + static bool initialized(); + static bool *initialized_address(); + + // Called when o is to be sampled from a given thread and a given size. + static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes); + + // Called to clean up oops that have been saved by our sampling function, + // but which no longer have other references in the heap. + static size_t weak_oops_do(BoolObjectClosure* is_alive, + OopClosure *f); + static size_t weak_oops_do(OopClosure* oop_closure) { + return weak_oops_do(&_always_true, oop_closure); + } + + static bool enabled() { + return _enabled; + } +}; + +#endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/Frame.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/Frame.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/Frame.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +class Frame { + Frame(String method, String signature, String fileName, int lineNumber) { + this.method = method; + this.signature = signature; + this.fileName = fileName; + this.lineNumber = lineNumber; + } + + public String method; + public String signature; + public String fileName; + public int lineNumber; +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Checks the frequent garbage storage system. + * @build Frame + * @compile HeapMonitorFrequentTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorFrequentTest + */ + +import java.io.PrintStream; + +public class HeapMonitorFrequentTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkFrequentFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void runner(int max) { + int sum = 0; + for (int j = 0; j < max; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 60); + frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 71); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 85); + + enableSampling(); + // We are testing for the recent garbage sampler: + // First run for 10000 iterations to fill up the garbage sampler. + runner(10000); + + // Now because we are in a different stack frame line here, we can just re-use the same runner. + // Run for 3, we really should not see that many of these and most should be the first type. + runner(5000); + + // Both types should exist in frequent since it was frequent enough. + int status = checkFrequentFrames(frames); + if (status == 0) { + throw new RuntimeException("Old frames no longer exist"); + } + + // Change the last frame only since the rest is identical. + frames[2].lineNumber = 89; + + status = checkFrequentFrames(frames); + if (status == 0) { + throw new RuntimeException("New frames not in the frequent sampling list"); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor API + * @build Frame + * @compile HeapMonitorNoCapabilityTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorNoCapabilityTest + */ + +import java.io.PrintStream; + +public class HeapMonitorNoCapabilityTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int allSamplingMethodsFail(); + + public static void main(String[] args) { + + int result = allSamplingMethodsFail(); + + if (result == 0) { + throw new RuntimeException("Some methods could be called without a capability."); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies if turning on/off/on the monitor wipes out the information. + * @build Frame + * @compile HeapMonitorOnOffTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorOnOffTest + */ + +import java.io.PrintStream; + +public class HeapMonitorOnOffTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + native static int checkFrames(Frame[] frames); + native static int checkWipeOut(Frame[] frames); + native static int enableSampling(); + native static int disableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorOnOffTest.java", 53); + frames[1] = new Frame("wrapper", "()V", "HeapMonitorOnOffTest.java", 64); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorOnOffTest.java", 86); + + // Enable sampling and allocate. + enableSampling(); + wrapper(); + + // Now disable and re-enable. + disableSampling(); + + // Check that the data is still there: this allows to peruse samples after profiling. + int status = checkFrames(frames); + if (status != 0) { + throw new RuntimeException("Failed to find the traces before the wipe out."); + } + + // Enabling the sampling should wipe everything out. + enableSampling(); + + status = checkWipeOut(frames); + if (status != 0) { + throw new RuntimeException("Failed to wipe out the information."); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Checks the Recent garbage storage system. + * @build Frame + * @compile HeapMonitorRecentTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorRecentTest + */ + +import java.io.PrintStream; + +public class HeapMonitorRecentTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkLiveOrRecentFrames(Frame[] frames); + native static int checkLiveAndRecentFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void runner(int max) { + int sum = 0; + for (int j = 0; j < max; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 61); + frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 72); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 86); + + enableSampling(); + // We are testing for the recent garbage sampler: + // First run for 10000 iterations to fill up the garbage sampler. + runner(10000); + + // Now because we are in a different stack frame line here, we can just re-use the same runner. + // Run for 3, we really should not see that many of these and most should be the first type. + runner(5000); + + // We should no longer have the initial frames. + int status = checkLiveOrRecentFrames(frames); + if (status != 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + + // Change the last frame only since the rest is identical. + frames[2].lineNumber = 90; + + // We should see those new frames. + status = checkLiveAndRecentFrames(frames); + if (status == 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics + * @build Frame + * @compile HeapMonitorStatCorrectnessTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatCorrectnessTest + */ + +import java.io.PrintStream; + +public class HeapMonitorStatCorrectnessTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + // Do 100000 iterations and expect maxIteration / multiplier samples. + public static final int maxIteration = 100000; + public static int array[]; + + native static int statsNull(); + native static int statsHaveSamples(int expected, int percentError); + native static int enableSampling(int rate); + native static int disableSampling(); + + + private static void allocate(int size) { + System.out.println("With a size of " + size + ", execute " + maxIteration + " iterations"); + for (int j = 0; j < maxIteration; j++) { + array = new int[size]; + } + } + + public static void main(String[] args) { + int sizes[] = {1000, 10000, 100000}; + + for (int i = 0; i < sizes.length; i++) { + int currentSize = sizes[i]; + System.out.println("Testing size " + currentSize); + + // 111 is as good a number as any. + final int samplingMultiplier = 111; + enableSampling(samplingMultiplier * currentSize); + + if (statsNull() == 0) { + throw new RuntimeException("Statistics should be null to begin with."); + } + + allocate(currentSize); + + // For simplifications, we ignore the array memory usage for array internals (with the array + // sizes requested, it should be a negligible oversight). + // + // That means that with maxIterations, the loop in the method allocate requests: + // maxIterations * currentSize * 4 bytes (4 for integers) + // + // Via the enable sampling, the code requests a sample every samplingMultiplier * currentSize bytes. + // + // Therefore, the expected sample number is: + // (maxIterations * currentSize * 4) / (samplingMultiplier * currentSize); + double expected = maxIteration; + expected *= 4; + expected /= samplingMultiplier; + + // 10% error ensures a sanity test without becoming flaky. + if (statsHaveSamples((int) expected, 10) != 0) { + throw new RuntimeException("Statistics should show about " + expected + " samples."); + } + + disableSampling(); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics + * @build Frame + * @compile HeapMonitorStatSimpleTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatSimpleTest + */ + +import java.io.PrintStream; + +public class HeapMonitorStatSimpleTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int statsNull(); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + if (statsNull() == 0) { + throw new RuntimeException("Statistics should be null to begin with."); + } + + enableSampling(); + wrapper(); + + if (statsNull() != 0) { + throw new RuntimeException("Statistics should not be null now."); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor API + * @build Frame + * @compile HeapMonitorTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorTest + */ + +import java.io.PrintStream; + +public class HeapMonitorTest { + + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + + System.getProperty("java.library.path")); + throw ule; + } + } + + native static int checkFrames(Frame[] frames); + native static int enableSampling(); + + public static int cnt; + public static int g_tmp[]; + public int array[]; + + public static int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + public static void wrapper() { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += helper(); + } + System.out.println(sum); + } + + public static void main(String[] args) { + Frame[] frames = new Frame[3]; + frames[0] = new Frame("helper", "()I", "HeapMonitorTest.java", 60); + frames[1] = new Frame("wrapper", "()V", "HeapMonitorTest.java", 71); + frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorTest.java", 83); + + enableSampling(); + wrapper(); + + int status = checkFrames(frames); + if (status != 0) { + throw new RuntimeException("Non-zero status returned from the agent: " + status); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -0,0 +1,693 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include +#include +#include "jvmti.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef JNI_ENV_ARG + +#ifdef __cplusplus +#define JNI_ENV_ARG(x, y) y +#define JNI_ENV_PTR(x) x +#else +#define JNI_ENV_ARG(x,y) x, y +#define JNI_ENV_PTR(x) (*x) +#endif + +#endif + +#define PASSED 0 +#define FAILED 2 + +#define MAX_TRACES 400 + +static const char *EXC_CNAME = "java/lang/Exception"; +static jvmtiEnv *jvmti = NULL; + +static int check_error(jvmtiError err, const char* s) { + if (err != JVMTI_ERROR_NONE) { + printf(" ## %s error: %d\n", s, err); + return 1; + } + return 0; +} + +static int check_capability_error(jvmtiError err, const char* s) { + if (err != JVMTI_ERROR_NONE) { + if (err == JVMTI_ERROR_MUST_POSSESS_CAPABILITY) { + return 0; + } + printf(" ## %s error: %d\n", s, err); + return 1; + } + return 1; +} + +static +jint throw_exc(JNIEnv *env, char *msg) { + jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME)); + + if (exc_class == NULL) { + printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME); + return -1; + } + return JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg); +} + +static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved); + +JNIEXPORT +jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) { + return Agent_Initialize(jvm, options, reserved); +} + +JNIEXPORT +jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) { + return JNI_VERSION_1_8; +} + +JNIEXPORT void JNICALL OnVMInit(jvmtiEnv *jvmti, JNIEnv *jni_env, jthread thread) { +} + +JNIEXPORT void JNICALL OnClassLoad(jvmtiEnv *jvmti_env, JNIEnv *jni_env, + jthread thread, jclass klass) { + // NOP. +} + +JNIEXPORT void JNICALL OnClassPrepare(jvmtiEnv *jvmti_env, JNIEnv *jni_env, + jthread thread, jclass klass) { + // We need to do this to "prime the pump", as it were -- make sure + // that all of the methodIDs have been initialized internally, for + // AsyncGetCallTrace. + jint method_count; + jmethodID *methods = 0; + jvmtiError err = (*jvmti)->GetClassMethods(jvmti, klass, &method_count, &methods); + if ((err != JVMTI_ERROR_NONE) && (err != JVMTI_ERROR_CLASS_NOT_PREPARED)) { + // JVMTI_ERROR_CLASS_NOT_PREPARED is okay because some classes may + // be loaded but not prepared at this point. + throw_exc(jni_env, "Failed to create method IDs for methods in class\n"); + } +} + +static +jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { + jint res; + + res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti), + JVMTI_VERSION_9); + if (res != JNI_OK || jvmti == NULL) { + printf(" Error: wrong result of a valid call to GetEnv!\n"); + return JNI_ERR; + } + + jvmtiEventCallbacks callbacks; + memset(&callbacks, 0, sizeof(callbacks)); + + callbacks.VMInit = &OnVMInit; + callbacks.ClassLoad = &OnClassLoad; + callbacks.ClassPrepare = &OnClassPrepare; + + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + // Get line numbers, sample heap, and filename for the test. + caps.can_get_line_numbers = 1; + caps.can_sample_heap= 1; + caps.can_get_source_file_name = 1; + if (check_error((*jvmti)->AddCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return JNI_ERR; + } + + if (check_error((*jvmti)->SetEventCallbacks(jvmti, &callbacks, + sizeof(jvmtiEventCallbacks)), + " Set Event Callbacks")) { + return JNI_ERR; + } + if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_VM_INIT, NULL), + "Set Event for VM Init")) { + return JNI_ERR; + } + if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_CLASS_LOAD, NULL), + "Set Event for Class Load")) { + return JNI_ERR; + } + if (check_error( (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, + JVMTI_EVENT_CLASS_PREPARE, NULL), + "Set Event for Class Prepare")) { + return JNI_ERR; + } + + return JNI_OK; +} + +// Given a method and a location, this method gets the line number. +// Kind of expensive, comparatively. +static +jint get_line_number(jvmtiEnv *jvmti, jmethodID method, jlocation location) { + // The location is -1 if the bci isn't known or -3 for a native method. + if (location == -1 || location == -3) { + return -1; + } + + // Read the line number table. + jvmtiLineNumberEntry *table_ptr = 0; + jint line_number_table_entries; + int jvmti_error = (*jvmti)->GetLineNumberTable(jvmti, method, + &line_number_table_entries, + &table_ptr); + + if (JVMTI_ERROR_NONE != jvmti_error) { + return -1; + } + if (line_number_table_entries <= 0) { + return -1; + } + if (line_number_table_entries == 1) { + return table_ptr[0].line_number; + } + + // Go through all the line numbers... + jint last_location = table_ptr[0].start_location; + int l; + for (l = 1; l < line_number_table_entries; l++) { + // ... and if you see one that is in the right place for your + // location, you've found the line number! + if ((location < table_ptr[l].start_location) && + (location >= last_location)) { + return table_ptr[l - 1].line_number; + } + last_location = table_ptr[l].start_location; + } + + if (location >= last_location) { + return table_ptr[line_number_table_entries - 1].line_number; + } else { + return -1; + } +} + +typedef struct _ExpectedContentFrame { + const char *name; + const char *signature; + const char *file_name; + int line_number; +} ExpectedContentFrame; + +static jint check_sample_content(JNIEnv *env, + jvmtiStackTrace *trace, + ExpectedContentFrame *expected, + int expected_count) { + int i; + + if (expected_count > trace->frame_count) { + return 0; + } + + for (i = 0; i < expected_count; i++) { + // Get basic information out of the trace. + int bci = trace->frames[i].location; + jmethodID methodid = trace->frames[i].method; + char *name = NULL, *signature = NULL, *file_name = NULL; + + if (bci < 0) { + return 0; + } + + // Transform into usable information. + int line_number = get_line_number(jvmti, methodid, bci); + (*jvmti)->GetMethodName(jvmti, methodid, &name, &signature, 0); + + jclass declaring_class; + if (JVMTI_ERROR_NONE != + (*jvmti)->GetMethodDeclaringClass(jvmti, methodid, &declaring_class)) { + return 0; + } + + jvmtiError err = (*jvmti)->GetSourceFileName(jvmti, declaring_class, + &file_name); + if (err != JVMTI_ERROR_NONE) { + return 0; + } + + // Compare now, none should be NULL. + if (name == NULL) { + return 0; + } + + if (file_name == NULL) { + return 0; + } + + if (signature == NULL) { + return 0; + } + + if (strcmp(name, expected[i].name) || + strcmp(signature, expected[i].signature) || + strcmp(file_name, expected[i].file_name) || + line_number != expected[i].line_number) { + return 0; + } + } + + return 1; +} + +static jint compare_samples(JNIEnv* env, jvmtiStackTrace* traces, int trace_count, + ExpectedContentFrame* expected_content, size_t size) { + // We expect the code to record correctly the bci, retrieve the line + // number, have the right method and the class name of the first frames. + int i; + for (i = 0; i < trace_count; i++) { + jvmtiStackTrace *trace = traces + i; + if (check_sample_content(env, trace, expected_content, size)) { + // At least one frame matched what we were looking for. + return 1; + } + } + + return 0; +} + +static jint check_samples(JNIEnv* env, ExpectedContentFrame* expected, + size_t size, + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*)) { + jvmtiStackTraces traces; + jvmtiError error = get_traces(jvmti, &traces); + + if (error != JVMTI_ERROR_NONE) { + return 0; + } + + int result = compare_samples(env, traces.stack_traces, traces.trace_count, + expected, size); + (*jvmti)->ReleaseTraces(jvmti, &traces); + return result; +} + +static jint frames_exist_live(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetLiveTraces); +} + +static jint frames_exist_recent(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetGarbageTraces); +} + +static jint frames_exist_frequent(JNIEnv* env, ExpectedContentFrame* expected, + size_t size) { + return check_samples(env, expected, size, (*jvmti)->GetFrequentGarbageTraces); +} + +// Static native API for various tests. +static void fill_native_frames(JNIEnv* env, jobjectArray frames, + ExpectedContentFrame* native_frames, size_t size) { + size_t i; + for(i = 0; i < size; i++) { + jobject obj = (*env)->GetObjectArrayElement(env, frames, i); + jclass frame_class = (*env)->GetObjectClass(env, obj); + jfieldID line_number_field_id = (*env)->GetFieldID(env, frame_class, "lineNumber", "I"); + int line_number = (*env)->GetIntField(env, obj, line_number_field_id); + + jfieldID string_id = (*env)->GetFieldID(env, frame_class, "method", "Ljava/lang/String;"); + jstring string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* method = (*env)->GetStringUTFChars(env, string_object, 0); + + string_id = (*env)->GetFieldID(env, frame_class, "fileName", "Ljava/lang/String;"); + string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* file_name = (*env)->GetStringUTFChars(env, string_object, 0); + + string_id = (*env)->GetFieldID(env, frame_class, "signature", "Ljava/lang/String;"); + string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + const char* signature= (*env)->GetStringUTFChars(env, string_object, 0); + + native_frames[i].name = method; + native_frames[i].file_name = file_name; + native_frames[i].signature = signature; + native_frames[i].line_number = line_number; + } +} + +static jint checkAnd(JNIEnv *env, jobjectArray frames, int live, int recent, + int frequent) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + int result = 1; + + if (live) { + result = frames_exist_live(env, native_frames, size); + } + + if (recent) { + result = result && + frames_exist_recent(env, native_frames, size); + } + + if (frequent) { + result = result && + frames_exist_frequent(env, native_frames, size); + } + + return result; +} + +static jint checkOr(JNIEnv *env, jobjectArray frames, int live, int recent, + int frequent) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + int result = 0; + + if (live) { + result = frames_exist_live(env, native_frames, size); + } + + if (recent) { + result = result || + frames_exist_recent(env, native_frames, size); + } + + if (frequent) { + result = result || + frames_exist_frequent(env, native_frames, size); + } + + return result; +} + +static jint checkAll(JNIEnv *env, jobjectArray frames) { + return checkAnd(env, frames, 1, 1, 1); +} + +static jint checkNone(JNIEnv *env, jobjectArray frames) { + jobject loader = NULL; + + if (frames == NULL) { + return 0; + } + + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return 0; + } + + if ((!frames_exist_live(env, native_frames, size)) && + (!frames_exist_recent(env, native_frames, size)) && + (!frames_exist_frequent(env, native_frames, size))) { + return 1; + } + return 0; +} + +static void enable_sampling() { + check_error((*jvmti)->StartHeapSampling(jvmti, 1 << 19, MAX_TRACES), + "Start Heap Sampling"); +} + +static void enable_sampling_with_rate(int rate) { + check_error((*jvmti)->StartHeapSampling(jvmti, rate, MAX_TRACES), + "Start Heap Sampling"); +} + +static void disable_sampling() { + check_error((*jvmti)->StopHeapSampling(jvmti), "Stop Heap Sampling"); +} + +// HeapMonitorTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +// HeapMonitorOnOffTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorOnOffTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorOnOffTest_checkWipeOut(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in none of the parts. + if (!checkNone(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorOnOffTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorOnOffTest_disableSampling(JNIEnv *env, jclass cls) { + disable_sampling(); +} + +// HeapMonitorRecentTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkLiveOrRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkOr(env, frames, 1, 1, 0)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorRecentTest_checkLiveAndRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkAnd(env, frames, 1, 1, 0)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorRecentTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +// HeapMonitorFrequentTest JNI. +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorFrequentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + // We want the frames in each part. + if (!checkAll(env, frames)) { + return FAILED; + } + return PASSED; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorFrequentTest_checkFrequentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { + if (checkAnd(env, frames, 0, 0, 1)) { + return PASSED; + } + return FAILED; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorFrequentTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorNoCapabilityTest_allSamplingMethodsFail(JNIEnv *env, jclass cls) { + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + caps.can_sample_heap= 1; + if (check_error((*jvmti)->RelinquishCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return 0; + } + + if (check_capability_error((*jvmti)->StartHeapSampling(jvmti, 1<<19, + MAX_TRACES), + "Start Heap Sampling")) { + return 0; + } + + if (check_capability_error((*jvmti)->StopHeapSampling(jvmti), + "Stop Heap Sampling")) { + return 0; + } + + if (check_capability_error((*jvmti)->ReleaseTraces(jvmti, NULL), + "Release Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetHeapSamplingStats(jvmti, NULL), + "Get Heap Sampling Stats")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL), + "Get Garbage Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL), + "Get Frequent Garbage Traces")) { + return 0; + } + + if (check_capability_error((*jvmti)->GetLiveTraces(jvmti, NULL), + "Get Live Traces")) { + return 0; + } + + // Calling enable sampling should fail now. + return 1; +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatSimpleTest_enableSampling(JNIEnv *env, jclass cls) { + enable_sampling(); +} + +static jint stats_are_zero() { + jvmtiHeapSamplingStats stats; + check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), + "Heap Sampling Statistics"); + + jvmtiHeapSamplingStats zero; + memset(&zero, 0, sizeof(zero)); + return memcmp(&stats, &zero, sizeof(zero)) == 0; +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorStatSimpleTest_statsNull(JNIEnv *env, jclass cls) { + return stats_are_zero(); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_disableSampling(JNIEnv *env, jclass cls) { + disable_sampling(); +} + +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_enableSampling(JNIEnv *env, jclass cls, jint rate) { + enable_sampling_with_rate(rate); +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_statsNull(JNIEnv *env, jclass cls) { + return stats_are_zero(); +} + +JNIEXPORT jint JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_statsHaveSamples(JNIEnv *env, + jclass cls, + int expected, + int percent_error) { + jvmtiHeapSamplingStats stats; + check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), + "Heap Sampling Statistics"); + + fprintf(stderr, "Statistics show:\n"); + fprintf(stderr, "\tCollected samples: %ld\n\tGarbage collected samples: %ld\n", + stats.sample_count, stats.garbage_collected_samples); + fprintf(stderr, "\tSample rate accumulated: %ld\n\tSample Rate Count: %ld\n", + stats.sample_rate_accumulation, stats.sample_rate_count); + fprintf(stderr, "\tStack depth accumulation: %ld\n", + stats.stack_depth_accumulation); + + fprintf(stderr, "Expected is %d\n", expected); + double diff_ratio = (stats.sample_count - expected); + diff_ratio = (diff_ratio < 0) ? -diff_ratio : diff_ratio; + diff_ratio /= expected; + + fprintf(stderr, "Diff ratio is %f\n", diff_ratio); + + return diff_ratio * 100 > percent_error; +} + +#ifdef __cplusplus +} +#endif # HG changeset patch # User Jean Christophe Beyler # Date 1509401947 25200 # Mon Oct 30 15:19:07 2017 -0700 # Node ID 2ee404f35143ccee32b1d6fb41c34c227a86f4e0 # Parent de50da53f850e083ccce842ed4c719b56470c4d4 [mq]: heap10a diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -290,45 +290,43 @@ } #endif -HeapWord* CollectedHeap::handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size) { - // We can come here for three reasons: - // - We either really did fill the tlab. - // - We pretended to everyone we did and we want to sample. - // - Both of the above reasons are true at the same time. - if (HeapMonitoring::enabled()) { - if (thread->tlab().should_sample()) { - HeapWord *end = thread->tlab().end(); - thread->tlab().set_back_actual_end(); - // If we don't have an object yet, try to allocate it. - if (obj == NULL) { - // The tlab could still have space after this sample. - obj = thread->tlab().allocate(size); - } +void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj, + size_t size, size_t fix_sample_rate) { + // Object is allocated, sample it now. + HeapMonitoring::object_alloc_do_sample(thread, + reinterpret_cast(obj), + size * HeapWordSize); + // Pick a next sample in this case, we allocated right. + thread->tlab().pick_next_sample(fix_sample_rate); +} - // Is the object allocated now? - // If not, this means we have to wait till a new TLAB, let the subsequent - // call to handle_heap_sampling pick the next sample. - if (obj != NULL) { - // Object is allocated, sample it now. - HeapMonitoring::object_alloc_do_sample(thread, - reinterpret_cast(obj), - size * HeapWordSize); - // Pick a next sample in this case, we allocated right. - thread->tlab().pick_next_sample(thread->tlab().top() - end); - } - } - } +HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) { + thread->tlab().set_back_actual_end(); - return obj; + // The tlab could still have space after this sample. + return thread->tlab().allocate(size); } HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { - HeapWord* obj = handle_heap_sampling(thread, NULL, size); - bool should_sample = thread->tlab().should_sample(); + // In case the tlab changes, remember if this one wanted a sample. + bool should_sample = thread->tlab().should_sample() && HeapMonitoring::enabled(); - if (obj != NULL) { - return obj; + HeapWord* obj = NULL; + if (should_sample) { + // Remember the tlab end to fix up the sampling rate. + HeapWord *tlab_old_end = thread->tlab().end(); + obj = allocate_sampled_object(thread, size); + + // If we did allocate in this tlab, sample it. Otherwise, we wait for the + // new tlab's first allocation at the end of this method. + if (obj != NULL) { + // Fix sample rate by removing the extra bytes allocated in this last + // sample. + size_t fix_sample_rate = thread->tlab().top() - tlab_old_end; + sample_allocation(thread, obj, size, fix_sample_rate); + return obj; + } } // Retain tlab and allocate object in shared space if @@ -371,11 +369,11 @@ } thread->tlab().fill(obj, obj + size, new_tlab_size); + // Did we initially want to sample? if (should_sample) { - return handle_heap_sampling(thread, obj, size); - } else { - return obj; + sample_allocation(thread, obj, size); } + return obj; } void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -149,8 +149,12 @@ inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size); - // Handle if needed heap sampling. - static HeapWord* handle_heap_sampling(Thread* thread, HeapWord* obj, size_t size); + // Sample the allocation via HeapMonitoring. + static void sample_allocation(Thread* thread, HeapWord* obj, size_t size, + size_t fix_sample_rate = 0); + // Try to allocate the object we want to sample in this tlab, returns NULL if + // fails to allocate. + static HeapWord* allocate_sampled_object(Thread* thread, size_t size); // Allocate an uninitialized block of the given size, or returns NULL if // this is impossible. diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -11541,7 +11541,7 @@ - + jvmtiFrameInfo Pointer to the call frames. @@ -11647,10 +11647,10 @@ only be used when knowing it may impact performance. - + - The maximum storage used for the sampler. By default, the value is 200. + The maximum storage used for the GC samples in the sampler. By default, the value is 200. diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -2008,13 +2008,13 @@ // Start the sampler. jvmtiError -JvmtiEnv::StartHeapSampling(jint monitoring_rate, jint max_storage) { +JvmtiEnv::StartHeapSampling(jint monitoring_rate, jint max_gc_storage) { if (monitoring_rate < 0) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } HeapThreadTransition htt(Thread::current()); - HeapMonitoring::initialize_profiling(monitoring_rate, max_storage); + HeapMonitoring::initialize_profiling(monitoring_rate, max_gc_storage); return JVMTI_ERROR_NONE; } /* end StartHeapSampling */ diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -29,17 +29,7 @@ #include "runtime/heapMonitoring.hpp" #include "runtime/vframe.hpp" -// DONE: -// merged printouts -// broke up the one-liner -// talk about synchro -// cleaned up old entry points for C1/interpreter -// add statistics per GC and log start up initialization. -// removed the null pointer check during the weak_oops_do walk -// cleaned up the task_executor -// fixed the compilation using the option --disable-precompiled-header - -static const int MaxStackDepth = 64; +static const int MaxStackDepth = 1024; // Internal data structure representing traces. struct StackTraceData : CHeapObj { @@ -208,45 +198,33 @@ // Executes whenever weak references are traversed. is_alive tells // you if the given oop is still reachable and live. - size_t weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); + void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); ~StackTraceStorage(); StackTraceStorage(); static StackTraceStorage* storage() { - if (internal_storage == NULL) { - internal_storage = new StackTraceStorage(); - } - return internal_storage; + static StackTraceStorage internal_storage; + return &internal_storage; } - static void reset_stack_trace_storage() { - delete internal_storage; - internal_storage = NULL; - } - - bool is_initialized() { - return _initialized; + void initialize(int max_storage) { + MutexLocker mu(HeapMonitor_lock); + free_storage(); + allocate_storage(max_storage); + memset(&_stats, 0, sizeof(_stats)); } const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { return _stats; } - // Static method to set the storage in place at initialization. - static void initialize_stack_trace_storage(int max_storage) { - reset_stack_trace_storage(); - StackTraceStorage *storage = StackTraceStorage::storage(); - storage->initialize_storage(max_storage); - } - void accumulate_sample_rate(size_t rate) { _stats.sample_rate_accumulation += rate; _stats.sample_rate_count++; } bool initialized() { return _initialized; } - volatile bool *initialized_address() { return &_initialized; } private: // The traces currently sampled. @@ -262,7 +240,7 @@ jvmtiHeapSamplingStats _stats; // Maximum amount of storage provided by the JVMTI call initialize_profiling. - int _max_storage; + int _max_gc_storage; static StackTraceStorage* internal_storage; volatile bool _initialized; @@ -298,9 +276,6 @@ int _size; }; - // Instance initialization. - void initialize_storage(int max_storage); - // Copies from StackTraceData to jvmtiStackTrace. bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); @@ -311,6 +286,8 @@ void store_garbage_trace(const StackTraceData &trace); void free_garbage(); + void free_storage(); + void allocate_storage(int max_gc_storage); }; StackTraceStorage* StackTraceStorage::internal_storage; @@ -328,9 +305,8 @@ _allocated_traces(NULL), _recent_garbage_traces(NULL), _frequent_garbage_traces(NULL), - _max_storage(0), + _max_gc_storage(0), _initialized(false) { - memset(&_stats, 0, sizeof(_stats)); } void StackTraceStorage::free_garbage() { @@ -376,7 +352,7 @@ } } -StackTraceStorage::~StackTraceStorage() { +void StackTraceStorage::free_storage() { delete _allocated_traces; free_garbage(); @@ -385,7 +361,11 @@ _initialized = false; } -void StackTraceStorage::initialize_storage(int max_storage) { +StackTraceStorage::~StackTraceStorage() { + free_storage(); +} + +void StackTraceStorage::allocate_storage(int max_gc_storage) { // In case multiple threads got locked and then 1 by 1 got through. if (_initialized) { return; @@ -394,24 +374,26 @@ _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(128, true); - _recent_garbage_traces = new MostRecentGarbageTraces(max_storage); - _frequent_garbage_traces = new FrequentGarbageTraces(max_storage); + _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); + _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); - _max_storage = max_storage; + _max_gc_storage = max_gc_storage; _initialized = true; } void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { + MutexLocker mu(HeapMonitor_lock); StackTraceData new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->frame_count; _allocated_traces->append(new_data); } -size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, - OopClosure *f) { +void StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, + OopClosure *f) { + MutexLocker mu(HeapMonitor_lock); size_t count = 0; - if (is_initialized()) { + if (initialized()) { int len = _allocated_traces->length(); // Compact the oop traces. Moves the live oops to the beginning of the @@ -447,7 +429,7 @@ _allocated_traces->trunc_to(curr_pos); } - return count; + log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count); } bool StackTraceStorage::deep_copy(jvmtiStackTrace *to, @@ -456,7 +438,7 @@ *to = *src; to->frames = - NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); + NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); if (to->frames == NULL) { return false; @@ -464,7 +446,7 @@ memcpy(to->frames, src->frames, - sizeof(jvmtiFrameInfo) * MaxStackDepth); + sizeof(jvmtiFrameInfo) * src->frame_count); return true; } @@ -494,6 +476,7 @@ void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, jvmtiStackTraces *traces) { + MutexLocker mu(HeapMonitor_lock); int len = copier.size(); // Create a new array to store the StackTraceData objects. @@ -542,17 +525,6 @@ _stats.garbage_collected_samples++; } -// Delegate the initialization question to the underlying storage system. -bool HeapMonitoring::initialized() { - return StackTraceStorage::storage()->initialized(); -} - -// Delegate the initialization question to the underlying storage system. -bool *HeapMonitoring::initialized_address() { - return - const_cast(StackTraceStorage::storage()->initialized_address()); -} - void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) { StackTraceStorage::storage()->get_all_stack_traces(traces); } @@ -587,13 +559,13 @@ // Invoked by the GC to clean up old stack traces and remove old arrays // of instrumentation that are still lying around. -size_t HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, - OopClosure *f) { +void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - return StackTraceStorage::storage()->weak_oops_do(is_alive, f); + StackTraceStorage::storage()->weak_oops_do(is_alive, f); } -void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) { +void HeapMonitoring::initialize_profiling(jint monitoring_rate, + jint max_gc_storage) { // Ignore if already enabled. if (_enabled) { return; @@ -601,9 +573,6 @@ _monitoring_rate = monitoring_rate; - // Initalize and reset. - StackTraceStorage::initialize_stack_trace_storage(max_storage); - // Populate the lookup table for fast_log2. // This approximates the log2 curve with a step function. // Steps have height equal to log2 of the mid-point of the step. @@ -617,6 +586,8 @@ if (_rnd == 0) { _rnd = 1; } + + StackTraceStorage::storage()->initialize(max_gc_storage); _enabled = true; } @@ -663,7 +634,7 @@ void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { #if defined(X86) || defined(PPC) JavaThread *thread = static_cast(t); - if (StackTraceStorage::storage()->is_initialized()) { + if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); JavaThread *thread = static_cast(t); @@ -708,12 +679,6 @@ // Failure! FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); FREE_C_HEAP_OBJ(trace); - return; - } else { - // There is something like 64K worth of allocation before the VM - // initializes. This is just in the interests of not slowing down - // startup. - assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); } #else Unimplemented(); diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -33,7 +33,6 @@ private: // Cheap random number generator static uint64_t _rnd; - static bool _initialized; static jint _monitoring_rate; static bool _enabled; @@ -74,7 +73,7 @@ // First method called by user to start the profiler: // - Note: the lower the monitoring rate, the higher the overhead incurred. - static void initialize_profiling(jint monitoring_rate, jint max_storage); + static void initialize_profiling(jint monitoring_rate, jint max_gc_storage); // Pick the next sample for a given size_t pointer using a geometric variable // with specified mean. The specified mean is provided via the @@ -90,20 +89,14 @@ static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); static void stop_profiling(); - // Is the profiler initialized and where is the address to the initialized - // boolean. - static bool initialized(); - static bool *initialized_address(); - // Called when o is to be sampled from a given thread and a given size. static void object_alloc_do_sample(Thread *t, oopDesc *o, intx size_in_bytes); // Called to clean up oops that have been saved by our sampling function, // but which no longer have other references in the heap. - static size_t weak_oops_do(BoolObjectClosure* is_alive, - OopClosure *f); - static size_t weak_oops_do(OopClosure* oop_closure) { - return weak_oops_do(&_always_true, oop_closure); + static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); + static void weak_oops_do(OopClosure* oop_closure) { + weak_oops_do(&_always_true, oop_closure); } static bool enabled() { diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -127,6 +127,8 @@ Monitor* PeriodicTask_lock = NULL; Monitor* RedefineClasses_lock = NULL; +Monitor* HeapMonitor_lock = NULL; + #if INCLUDE_TRACE Mutex* JfrStacktrace_lock = NULL; Monitor* JfrMsg_lock = NULL; @@ -283,6 +285,9 @@ def(CompileThread_lock , PaddedMonitor, nonleaf+5, false, Monitor::_safepoint_check_always); def(PeriodicTask_lock , PaddedMonitor, nonleaf+5, true, Monitor::_safepoint_check_sometimes); def(RedefineClasses_lock , PaddedMonitor, nonleaf+5, true, Monitor::_safepoint_check_always); + + def(HeapMonitor_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always); + if (WhiteBoxAPI) { def(Compilation_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_never); } diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -126,6 +126,8 @@ extern Monitor* PeriodicTask_lock; // protects the periodic task structure extern Monitor* RedefineClasses_lock; // locks classes from parallel redefinition +extern Monitor* HeapMonitor_lock; // protects internal storage in HeapMonitoring + #if INCLUDE_TRACE extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table extern Monitor* JfrMsg_lock; // protects JFR messaging diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +import java.util.ArrayList; +import java.util.List; + +/** API for handling the underlying heap sampling monitoring system. */ +public class HeapMonitor { + static { + try { + System.loadLibrary("HeapMonitor"); + } catch (UnsatisfiedLinkError ule) { + System.err.println("Could not load HeapMonitor library"); + System.err.println("java.library.path: " + System.getProperty("java.library.path")); + throw ule; + } + } + + private static int g_tmp[]; + + /** Enable heap monitoring sampling given a rate and maximum garbage to keep in memory. */ + public native static void enableSampling(int rate, int maximumGarbage); + + /** Enable heap monitoring sampling given a rate. */ + public static void enableSampling(int rate) { + enableSampling(rate, 200); + } + + /** Enable heap monitoring sampling with default values for rate and maximum garbage. */ + public static void enableSampling() { + enableSampling(1 << 19); + } + + public native static void disableSampling(); + public native static boolean areSamplingStatisticsZero(); + + /** Do the frames provided exist in live, recent garbage, and frequent garbage. */ + public native static boolean framesExistEverywhere(Frame[] frames); + /** Do the frames provided not exist in live, recent garbage, and frequent garbage. */ + public native static boolean framesExistNowhere(Frame[] frames); + + /** + * Allocate memory but first create a stack trace of a particular depth. + * + * @return list of frames for the allocation. + */ + public static List allocate(int depth) { + List frames = new ArrayList(); + if (depth > 1) { + createStackDepth(depth - 1, frames); + frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 72)); + } else { + actuallyAllocate(frames); + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 118)); + frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 75)); + } + return frames; + } + + /** + * Allocate memory but first create a stack trace. + * + * @return list of frames for the allocation. + */ + public static List allocate() { + int sum = 0; + List frames = new ArrayList(); + allocate(frames); + frames.add(new Frame("allocate", "()Ljava/util/List;", "HeapMonitor.java", 90)); + return frames; + } + + private static void createStackDepth(int depth, List frames) { + if (depth > 1) { + createStackDepth(depth - 1, frames); + frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 97)); + } else { + allocate(frames); + frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 100)); + } + } + + private static void allocate(List frames) { + int sum = 0; + for (int j = 0; j < 1000; j++) { + sum += actuallyAllocate(frames); + } + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 118)); + frames.add(new Frame("allocate", "(Ljava/util/List;)V", "HeapMonitor.java", 108)); + } + + private static int actuallyAllocate(List frames) { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java @@ -23,83 +23,52 @@ package MyPackage; +import java.util.List; + /** * @test * @summary Checks the frequent garbage storage system. - * @build Frame + * @build Frame HeapMonitor * @compile HeapMonitorFrequentTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorFrequentTest */ -import java.io.PrintStream; - public class HeapMonitorFrequentTest { - static { - try { - System.loadLibrary("HeapMonitor"); - } catch (UnsatisfiedLinkError ule) { - System.err.println("Could not load HeapMonitor library"); - System.err.println("java.library.path: " - + System.getProperty("java.library.path")); - throw ule; - } - } - - native static int checkFrequentFrames(Frame[] frames); - native static int enableSampling(); - - public static int cnt; - public static int g_tmp[]; - public int array[]; + private native static boolean framesExistInFrequent(Frame[] frames); - public static int helper() { - int sum = 0; - // Let us assume that the array is 24 bytes of memory. - for (int i = 0; i < 127000 / 6; i++) { - int tmp[] = new int[1]; - // Force it to be kept. - g_tmp = tmp; - sum += g_tmp[0]; + private static List runner(int max) { + List frameList = null; + for (int j = 0; j < max; j++) { + frameList = HeapMonitor.allocate(); } - return sum; - } - - public static void runner(int max) { - int sum = 0; - for (int j = 0; j < max; j++) { - sum += helper(); - } - System.out.println(sum); + frameList.add(new Frame("runner", "(I)Ljava/util/List;", "HeapMonitorFrequentTest.java", 43)); + return frameList; } public static void main(String[] args) { - Frame[] frames = new Frame[3]; - frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 60); - frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 71); - frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 85); - - enableSampling(); + HeapMonitor.enableSampling(); // We are testing for the recent garbage sampler: // First run for 10000 iterations to fill up the garbage sampler. - runner(10000); + List firstFrameList = runner(10); + firstFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorFrequentTest.java", + 53)); // Now because we are in a different stack frame line here, we can just re-use the same runner. // Run for 3, we really should not see that many of these and most should be the first type. - runner(5000); + List secondFrameList = runner(10); + secondFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorFrequentTest.java", + 59)); // Both types should exist in frequent since it was frequent enough. - int status = checkFrequentFrames(frames); - if (status == 0) { - throw new RuntimeException("Old frames no longer exist"); + boolean status = framesExistInFrequent(firstFrameList.toArray(new Frame[0])); + if (!status) { + throw new RuntimeException("Old frames no longer exist in the frequent list."); } - // Change the last frame only since the rest is identical. - frames[2].lineNumber = 89; - - status = checkFrequentFrames(frames); - if (status == 0) { - throw new RuntimeException("New frames not in the frequent sampling list"); + status = framesExistInFrequent(secondFrameList.toArray(new Frame[0])); + if (!status) { + throw new RuntimeException("New frames no longer exist in the frequent list"); } } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorNoCapabilityTest.java @@ -25,16 +25,12 @@ /** * @test - * @summary Verifies the JVMTI Heap Monitor API - * @build Frame + * @summary Verifies the JVMTI Heap Monitor does not work without the required capability. * @compile HeapMonitorNoCapabilityTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorNoCapabilityTest */ -import java.io.PrintStream; - public class HeapMonitorNoCapabilityTest { - static { try { System.loadLibrary("HeapMonitor"); @@ -46,10 +42,9 @@ } } - native static int allSamplingMethodsFail(); + private native static int allSamplingMethodsFail(); public static void main(String[] args) { - int result = allSamplingMethodsFail(); if (result == 0) { diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java @@ -23,82 +23,35 @@ package MyPackage; +import java.util.List; + /** * @test * @summary Verifies if turning on/off/on the monitor wipes out the information. - * @build Frame + * @build Frame HeapMonitor * @compile HeapMonitorOnOffTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorOnOffTest */ -import java.io.PrintStream; - public class HeapMonitorOnOffTest { - - static { - try { - System.loadLibrary("HeapMonitor"); - } catch (UnsatisfiedLinkError ule) { - System.err.println("Could not load HeapMonitor library"); - System.err.println("java.library.path: " - + System.getProperty("java.library.path")); - throw ule; - } - } - - public static int helper() { - int sum = 0; - // Let us assume that the array is 24 bytes of memory. - for (int i = 0; i < 127000 / 6; i++) { - int tmp[] = new int[1]; - // Force it to be kept. - g_tmp = tmp; - sum += g_tmp[0]; - } - return sum; - } - - public static void wrapper() { - int sum = 0; - for (int j = 0; j < 1000; j++) { - sum += helper(); - } - System.out.println(sum); - } - - native static int checkFrames(Frame[] frames); - native static int checkWipeOut(Frame[] frames); - native static int enableSampling(); - native static int disableSampling(); - - public static int cnt; - public static int g_tmp[]; - public int array[]; - public static void main(String[] args) { - Frame[] frames = new Frame[3]; - frames[0] = new Frame("helper", "()I", "HeapMonitorOnOffTest.java", 53); - frames[1] = new Frame("wrapper", "()V", "HeapMonitorOnOffTest.java", 64); - frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorOnOffTest.java", 86); - - // Enable sampling and allocate. - enableSampling(); - wrapper(); - - // Now disable and re-enable. - disableSampling(); + HeapMonitor.enableSampling(); + List frameList = HeapMonitor.allocate(); + frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorOnOffTest.java", 39)); + Frame[] frames = frameList.toArray(new Frame[0]); + HeapMonitor.disableSampling(); // Check that the data is still there: this allows to peruse samples after profiling. - int status = checkFrames(frames); - if (status != 0) { + boolean status = HeapMonitor.framesExistEverywhere(frames); + if (!status) { throw new RuntimeException("Failed to find the traces before the wipe out."); } // Enabling the sampling should wipe everything out. - enableSampling(); + HeapMonitor.enableSampling(); - status = checkWipeOut(frames); - if (status != 0) { + status = HeapMonitor.framesExistNowhere(frames); + if (!status) { throw new RuntimeException("Failed to wipe out the information."); } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java @@ -23,85 +23,52 @@ package MyPackage; +import java.util.List; + /** * @test * @summary Checks the Recent garbage storage system. - * @build Frame + * @build Frame HeapMonitor * @compile HeapMonitorRecentTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorRecentTest */ -import java.io.PrintStream; - public class HeapMonitorRecentTest { - static { - try { - System.loadLibrary("HeapMonitor"); - } catch (UnsatisfiedLinkError ule) { - System.err.println("Could not load HeapMonitor library"); - System.err.println("java.library.path: " - + System.getProperty("java.library.path")); - throw ule; - } - } - - native static int checkLiveOrRecentFrames(Frame[] frames); - native static int checkLiveAndRecentFrames(Frame[] frames); - native static int enableSampling(); + private native static boolean framesNotInLiveOrRecent(Frame[] frames); + private native static boolean framesExistInLiveAndRecent(Frame[] frames); - public static int cnt; - public static int g_tmp[]; - public int array[]; - - public static int helper() { - int sum = 0; - // Let us assume that the array is 24 bytes of memory. - for (int i = 0; i < 127000 / 6; i++) { - int tmp[] = new int[1]; - // Force it to be kept. - g_tmp = tmp; - sum += g_tmp[0]; + private static List runner(int max) { + List frameList = null; + for (int j = 0; j < max; j++) { + frameList = HeapMonitor.allocate(); } - return sum; - } - - public static void runner(int max) { - int sum = 0; - for (int j = 0; j < max; j++) { - sum += helper(); - } - System.out.println(sum); + frameList.add(new Frame("runner", "(I)Ljava/util/List;", "HeapMonitorRecentTest.java", 44)); + return frameList; } public static void main(String[] args) { - Frame[] frames = new Frame[3]; - frames[0] = new Frame("helper", "()I", "HeapMonitorRecentTest.java", 61); - frames[1] = new Frame("runner", "(I)V", "HeapMonitorRecentTest.java", 72); - frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 86); - - enableSampling(); + HeapMonitor.enableSampling(); // We are testing for the recent garbage sampler: // First run for 10000 iterations to fill up the garbage sampler. - runner(10000); + List firstFrameList = runner(10); + firstFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 54)); // Now because we are in a different stack frame line here, we can just re-use the same runner. // Run for 3, we really should not see that many of these and most should be the first type. - runner(5000); + List secondFrameList = runner(10); + secondFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 59)); // We should no longer have the initial frames. - int status = checkLiveOrRecentFrames(frames); - if (status != 0) { - throw new RuntimeException("Non-zero status returned from the agent: " + status); + boolean status = framesNotInLiveOrRecent(firstFrameList.toArray(new Frame[0])); + if (!status) { + throw new RuntimeException("Initial frames still internally there."); } - // Change the last frame only since the rest is identical. - frames[2].lineNumber = 90; - // We should see those new frames. - status = checkLiveAndRecentFrames(frames); - if (status == 0) { - throw new RuntimeException("Non-zero status returned from the agent: " + status); + status = framesExistInLiveAndRecent(secondFrameList.toArray(new Frame[0])); + if (!status) { + throw new RuntimeException("Second frame list not found in both live and recent."); } } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor stack depth handling. + * @build Frame HeapMonitor + * @compile HeapMonitorStackDepthTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStackDepthTest + */ + +public class HeapMonitorStackDepthTest { + private native static double getAverageStackDepth(); + + private static double calculateErrorPercentage(double expected, double actual) { + double error = expected - actual; + error = error < 0 ? -error : error; + return error / expected * 100; + } + + public static void main(String[] args) { + int[] depths = {10, 100, 500}; + + for (int depthIdx = 0; depthIdx < depths.length; depthIdx++) { + int depth = depths[depthIdx]; + + HeapMonitor.enableSampling(); + HeapMonitor.allocate(depth); + HeapMonitor.disableSampling(); + + // baseDepth represents the helper method depth: main, finalWrapper, and helper. + // To get the requested depth, remove this from the count. + final int baseDepth = 3; + double averageDepth = getAverageStackDepth() - baseDepth; + double errorPercentage = calculateErrorPercentage(depth, averageDepth); + + // 1% error should be close enough. + if (errorPercentage > 1) { + throw new RuntimeException("Stack depth average over 5% for depth " + depth + " : " + averageDepth + " , error: " + errorPercentage); + } + } + + // Last test is 1024, which is the current maximum. + HeapMonitor.enableSampling(); + final int maximumDepth = 1024; + HeapMonitor.allocate(1024); + HeapMonitor.disableSampling(); + // Because of the extra frames, we should be at (maximumDepth + a few frames). Due to the + // maximum depth allowed, we hit it and so should still be at an average of 1024. + double averageDepth = getAverageStackDepth(); + double errorPercentage = calculateErrorPercentage(maximumDepth, averageDepth); + + // 1% error should be close enough. + if (errorPercentage > 1) { + throw new RuntimeException("Stack depth average over 5% for depth 1024 : " + averageDepth + " , error: " + errorPercentage); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java @@ -25,39 +25,21 @@ /** * @test + * @build Frame HeapMonitor * @summary Verifies the JVMTI Heap Monitor Statistics - * @build Frame * @compile HeapMonitorStatCorrectnessTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatCorrectnessTest */ -import java.io.PrintStream; - public class HeapMonitorStatCorrectnessTest { - static { - try { - System.loadLibrary("HeapMonitor"); - } catch (UnsatisfiedLinkError ule) { - System.err.println("Could not load HeapMonitor library"); - System.err.println("java.library.path: " - + System.getProperty("java.library.path")); - throw ule; - } - } + // Do 100000 iterations and expect maxIteration / multiplier samples. + private static final int maxIteration = 100000; + private static int array[]; - // Do 100000 iterations and expect maxIteration / multiplier samples. - public static final int maxIteration = 100000; - public static int array[]; - - native static int statsNull(); - native static int statsHaveSamples(int expected, int percentError); - native static int enableSampling(int rate); - native static int disableSampling(); - + private native static boolean statsHaveExpectedNumberSamples(int expected, int percentError); private static void allocate(int size) { - System.out.println("With a size of " + size + ", execute " + maxIteration + " iterations"); for (int j = 0; j < maxIteration; j++) { array = new int[size]; } @@ -66,15 +48,14 @@ public static void main(String[] args) { int sizes[] = {1000, 10000, 100000}; - for (int i = 0; i < sizes.length; i++) { - int currentSize = sizes[i]; + for (int currentSize : sizes) { System.out.println("Testing size " + currentSize); // 111 is as good a number as any. final int samplingMultiplier = 111; - enableSampling(samplingMultiplier * currentSize); + HeapMonitor.enableSampling(samplingMultiplier * currentSize); - if (statsNull() == 0) { + if (!HeapMonitor.areSamplingStatisticsZero()) { throw new RuntimeException("Statistics should be null to begin with."); } @@ -95,11 +76,11 @@ expected /= samplingMultiplier; // 10% error ensures a sanity test without becoming flaky. - if (statsHaveSamples((int) expected, 10) != 0) { + if (!statsHaveExpectedNumberSamples((int) expected, 10)) { throw new RuntimeException("Statistics should show about " + expected + " samples."); } - disableSampling(); + HeapMonitor.disableSampling(); } } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor sampling rate average + * @build Frame HeapMonitor + * @compile HeapMonitorStatRateTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatRateTest + */ + +public class HeapMonitorStatRateTest { + + private native static double getAverageRate(); + + public static void main(String[] args) { + int[] tab = {1024, 16384, 524288}; + + for (int rateIdx = 0; rateIdx < tab.length; rateIdx++) { + int rate = tab[rateIdx]; + + HeapMonitor.enableSampling(rate); + HeapMonitor.allocate(); + HeapMonitor.disableSampling(); + + double calculatedRate = getAverageRate(); + + double error = rate - calculatedRate; + error = error < 0 ? -error : error; + + double errorPercentage = error / rate * 100; + + if (errorPercentage > 5) { + throw new RuntimeException("Rate average over 5% for rate " + rate + " -> " + calculatedRate); + } + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatSimpleTest.java @@ -25,63 +25,24 @@ /** * @test + * @build Frame HeapMonitor * @summary Verifies the JVMTI Heap Monitor Statistics - * @build Frame * @compile HeapMonitorStatSimpleTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatSimpleTest */ -import java.io.PrintStream; - public class HeapMonitorStatSimpleTest { - - static { - try { - System.loadLibrary("HeapMonitor"); - } catch (UnsatisfiedLinkError ule) { - System.err.println("Could not load HeapMonitor library"); - System.err.println("java.library.path: " - + System.getProperty("java.library.path")); - throw ule; - } - } - - native static int statsNull(); - native static int enableSampling(); - - public static int cnt; - public static int g_tmp[]; - public int array[]; - - public static int helper() { - int sum = 0; - // Let us assume that the array is 24 bytes of memory. - for (int i = 0; i < 127000 / 6; i++) { - int tmp[] = new int[1]; - // Force it to be kept. - g_tmp = tmp; - sum += g_tmp[0]; - } - return sum; - } - - public static void wrapper() { - int sum = 0; - for (int j = 0; j < 1000; j++) { - sum += helper(); - } - System.out.println(sum); - } + private native static int areSamplingStatisticsZero(); public static void main(String[] args) { - if (statsNull() == 0) { + if (!HeapMonitor.areSamplingStatisticsZero()) { throw new RuntimeException("Statistics should be null to begin with."); } - enableSampling(); - wrapper(); + HeapMonitor.enableSampling(); + HeapMonitor.allocate(); - if (statsNull() != 0) { + if (HeapMonitor.areSamplingStatisticsZero()) { throw new RuntimeException("Statistics should not be null now."); } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java @@ -23,67 +23,25 @@ package MyPackage; +import java.util.List; + /** * @test * @summary Verifies the JVMTI Heap Monitor API - * @build Frame + * @build Frame HeapMonitor * @compile HeapMonitorTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorTest */ -import java.io.PrintStream; - public class HeapMonitorTest { - static { - try { - System.loadLibrary("HeapMonitor"); - } catch (UnsatisfiedLinkError ule) { - System.err.println("Could not load HeapMonitor library"); - System.err.println("java.library.path: " - + System.getProperty("java.library.path")); - throw ule; - } - } - - native static int checkFrames(Frame[] frames); - native static int enableSampling(); - - public static int cnt; - public static int g_tmp[]; - public int array[]; + public static void main(String[] args) { + HeapMonitor.enableSampling(); + List frameList = HeapMonitor.allocate(); + frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorTest.java", 40)); - public static int helper() { - int sum = 0; - // Let us assume that the array is 24 bytes of memory. - for (int i = 0; i < 127000 / 6; i++) { - int tmp[] = new int[1]; - // Force it to be kept. - g_tmp = tmp; - sum += g_tmp[0]; - } - return sum; - } - - public static void wrapper() { - int sum = 0; - for (int j = 0; j < 1000; j++) { - sum += helper(); - } - System.out.println(sum); - } - - public static void main(String[] args) { - Frame[] frames = new Frame[3]; - frames[0] = new Frame("helper", "()I", "HeapMonitorTest.java", 60); - frames[1] = new Frame("wrapper", "()V", "HeapMonitorTest.java", 71); - frames[2] = new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorTest.java", 83); - - enableSampling(); - wrapper(); - - int status = checkFrames(frames); - if (status != 0) { + boolean status = HeapMonitor.framesExistEverywhere(frameList.toArray(new Frame[0])); + if (!status) { throw new RuntimeException("Non-zero status returned from the agent: " + status); } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorThreadOnOffTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorThreadOnOffTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorThreadOnOffTest.java @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @build Frame HeapMonitor + * @summary Verifies the JVMTI Heap Monitor Thread sanity + * @compile HeapMonitorThreadOnOffTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorThreadOnOffTest + */ + +import java.util.ArrayList; +import java.util.List; + +public class HeapMonitorThreadOnOffTest { + public static void main(String[] args) { + final int numThreads = 24; + ArrayList list = new ArrayList<>(); + + // Add one thread that consistently turns on/off the sampler to ensure correctness with + // potential resets. + Switch switchPlayer = new Switch(); + Thread switchThread = new Thread(switchPlayer, "Switch Player"); + switchThread.start(); + + for (int i = 0 ; i < numThreads; i++) { + Thread thread = new Thread(new Allocator(i), "Allocator" + i); + thread.start(); + list.add(thread); + } + + for (Thread elem : list) { + try { + elem.join(); + } catch(InterruptedException e) { + throw new RuntimeException("Thread got interrupted..."); + } + } + + switchPlayer.stop(); + try { + switchThread.join(); + } catch(InterruptedException e) { + throw new RuntimeException("Thread got interrupted while waiting for the switch player..."); + } + + // We don't check here for correctness of data. If we made it here, the test succeeded: + // Threads can allocate like crazy + // Other threads can turn on/off the system + } +} + +class Allocator implements Runnable { + private int depth; + private int g_tmp[]; + + public Allocator(int depth) { + this.depth = depth; + } + + private int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + private int recursiveWrapper(int depth) { + if (depth > 0) { + return recursiveWrapper(depth - 1); + } + return helper(); + } + + public void run() { + int sum = 0; + for (int j = 0; j < 500; j++) { + sum += recursiveWrapper(depth); + } + } +} + +class Switch implements Runnable { + private volatile boolean keepGoing; + + public Switch() { + keepGoing = true; + } + + public void stop() { + keepGoing = false; + } + + public void run() { + while (keepGoing) { + HeapMonitor.disableSampling(); + HeapMonitor.enableSampling(); + } + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorThreadTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorThreadTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorThreadTest.java @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @build Frame HeapMonitor + * @summary Verifies the JVMTI Heap Monitor Thread sanity + * @compile HeapMonitorThreadTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorThreadTest + */ + +import java.util.ArrayList; +import java.util.List; + +public class HeapMonitorThreadTest { + private native static boolean checkSamples(int[] threads); + + public static void main(String[] args) { + final int numThreads = 24; + ArrayList list = new ArrayList<>(); + + // Remember a lot of garbage to have space for all thread samples. + HeapMonitor.enableSampling(1 << 19, 10000); + + for (int i = 0 ; i < numThreads; i++) { + Thread thread = new Thread(new Allocator(i), "Allocator" + i); + thread.start(); + list.add(thread); + } + + for (Thread elem : list) { + try { + elem.join(); + } catch(InterruptedException e) { + throw new RuntimeException("Thread got interrupted..."); + } + } + + int[] threads = new int[numThreads]; + if (!checkSamples(threads)) { + throw new RuntimeException("Problem with checkSamples..."); + } + + for (int elem : threads) { + if (elem == 0) { + throw new RuntimeException("Missing at least one thread in the array..."); + } + } + } +} + +class Allocator implements Runnable { + private int depth; + private int g_tmp[]; + + public Allocator(int depth) { + this.depth = depth; + } + + private int helper() { + int sum = 0; + // Let us assume that the array is 24 bytes of memory. + for (int i = 0; i < 127000 / 6; i++) { + int tmp[] = new int[1]; + // Force it to be kept. + g_tmp = tmp; + sum += g_tmp[0]; + } + return sum; + } + + private int recursiveWrapper(int depth) { + if (depth > 0) { + return recursiveWrapper(depth - 1); + } + return helper(); + } + + public void run() { + int sum = 0; + for (int j = 0; j < 50; j++) { + sum += recursiveWrapper(depth); + } + System.out.println(sum); + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -22,6 +22,7 @@ */ #include +#include #include #include "jvmti.h" @@ -41,15 +42,15 @@ #endif -#define PASSED 0 -#define FAILED 2 - +#define TRUE 1 +#define FALSE 0 +#define PRINT_OUT 0 #define MAX_TRACES 400 static const char *EXC_CNAME = "java/lang/Exception"; static jvmtiEnv *jvmti = NULL; -static int check_error(jvmtiError err, const char* s) { +static int check_error(jvmtiError err, const char *s) { if (err != JVMTI_ERROR_NONE) { printf(" ## %s error: %d\n", s, err); return 1; @@ -57,7 +58,7 @@ return 0; } -static int check_capability_error(jvmtiError err, const char* s) { +static int check_capability_error(jvmtiError err, const char *s) { if (err != JVMTI_ERROR_NONE) { if (err == JVMTI_ERROR_MUST_POSSESS_CAPABILITY) { return 0; @@ -96,29 +97,6 @@ return JNI_VERSION_1_8; } -JNIEXPORT void JNICALL OnVMInit(jvmtiEnv *jvmti, JNIEnv *jni_env, jthread thread) { -} - -JNIEXPORT void JNICALL OnClassLoad(jvmtiEnv *jvmti_env, JNIEnv *jni_env, - jthread thread, jclass klass) { - // NOP. -} - -JNIEXPORT void JNICALL OnClassPrepare(jvmtiEnv *jvmti_env, JNIEnv *jni_env, - jthread thread, jclass klass) { - // We need to do this to "prime the pump", as it were -- make sure - // that all of the methodIDs have been initialized internally, for - // AsyncGetCallTrace. - jint method_count; - jmethodID *methods = 0; - jvmtiError err = (*jvmti)->GetClassMethods(jvmti, klass, &method_count, &methods); - if ((err != JVMTI_ERROR_NONE) && (err != JVMTI_ERROR_CLASS_NOT_PREPARED)) { - // JVMTI_ERROR_CLASS_NOT_PREPARED is okay because some classes may - // be loaded but not prepared at this point. - throw_exc(jni_env, "Failed to create method IDs for methods in class\n"); - } -} - static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) { jint res; @@ -133,54 +111,28 @@ jvmtiEventCallbacks callbacks; memset(&callbacks, 0, sizeof(callbacks)); - callbacks.VMInit = &OnVMInit; - callbacks.ClassLoad = &OnClassLoad; - callbacks.ClassPrepare = &OnClassPrepare; - jvmtiCapabilities caps; memset(&caps, 0, sizeof(caps)); // Get line numbers, sample heap, and filename for the test. caps.can_get_line_numbers = 1; - caps.can_sample_heap= 1; + caps.can_sample_heap = 1; caps.can_get_source_file_name = 1; if (check_error((*jvmti)->AddCapabilities(jvmti, &caps), "Add capabilities\n")){ return JNI_ERR; } - if (check_error((*jvmti)->SetEventCallbacks(jvmti, &callbacks, sizeof(jvmtiEventCallbacks)), " Set Event Callbacks")) { return JNI_ERR; } - if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, - JVMTI_EVENT_VM_INIT, NULL), - "Set Event for VM Init")) { - return JNI_ERR; - } - if (check_error((*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, - JVMTI_EVENT_CLASS_LOAD, NULL), - "Set Event for Class Load")) { - return JNI_ERR; - } - if (check_error( (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE, - JVMTI_EVENT_CLASS_PREPARE, NULL), - "Set Event for Class Prepare")) { - return JNI_ERR; - } - return JNI_OK; } // Given a method and a location, this method gets the line number. -// Kind of expensive, comparatively. static -jint get_line_number(jvmtiEnv *jvmti, jmethodID method, jlocation location) { - // The location is -1 if the bci isn't known or -3 for a native method. - if (location == -1 || location == -3) { - return -1; - } - +jint get_line_number(jvmtiEnv *jvmti, jmethodID method, + jlocation location) { // Read the line number table. jvmtiLineNumberEntry *table_ptr = 0; jint line_number_table_entries; @@ -225,14 +177,15 @@ int line_number; } ExpectedContentFrame; -static jint check_sample_content(JNIEnv *env, - jvmtiStackTrace *trace, - ExpectedContentFrame *expected, - int expected_count) { +static jboolean check_sample_content(JNIEnv *env, + jvmtiStackTrace *trace, + ExpectedContentFrame *expected, + int expected_count, + int print_out_comparisons) { int i; if (expected_count > trace->frame_count) { - return 0; + return FALSE; } for (i = 0; i < expected_count; i++) { @@ -242,7 +195,7 @@ char *name = NULL, *signature = NULL, *file_name = NULL; if (bci < 0) { - return 0; + return FALSE; } // Transform into usable information. @@ -252,84 +205,113 @@ jclass declaring_class; if (JVMTI_ERROR_NONE != (*jvmti)->GetMethodDeclaringClass(jvmti, methodid, &declaring_class)) { - return 0; + return FALSE; } jvmtiError err = (*jvmti)->GetSourceFileName(jvmti, declaring_class, &file_name); if (err != JVMTI_ERROR_NONE) { - return 0; + return FALSE; } // Compare now, none should be NULL. if (name == NULL) { - return 0; + return FALSE; } if (file_name == NULL) { - return 0; + return FALSE; } if (signature == NULL) { - return 0; + return FALSE; + } + + if (print_out_comparisons) { + fprintf(stderr, "Comparing:\n"); + fprintf(stderr, "\tNames: %s and %s\n", name, expected[i].name); + fprintf(stderr, "\tSignatures: %s and %s\n", signature, expected[i].signature); + fprintf(stderr, "\tFile name: %s and %s\n", file_name, expected[i].file_name); + fprintf(stderr, "\tLines: %d and %d\n", line_number, expected[i].line_number); + fprintf(stderr, "\tResult is %d\n", + (strcmp(name, expected[i].name) || + strcmp(signature, expected[i].signature) || + strcmp(file_name, expected[i].file_name) || + line_number != expected[i].line_number)); } if (strcmp(name, expected[i].name) || strcmp(signature, expected[i].signature) || strcmp(file_name, expected[i].file_name) || line_number != expected[i].line_number) { - return 0; + return FALSE; } } - return 1; + return TRUE; } -static jint compare_samples(JNIEnv* env, jvmtiStackTrace* traces, int trace_count, - ExpectedContentFrame* expected_content, size_t size) { +static jboolean compare_samples(JNIEnv* env, jvmtiStackTrace* traces, + int trace_count, + ExpectedContentFrame* expected_content, + size_t size, + int print_out_comparisons) { // We expect the code to record correctly the bci, retrieve the line // number, have the right method and the class name of the first frames. int i; for (i = 0; i < trace_count; i++) { jvmtiStackTrace *trace = traces + i; - if (check_sample_content(env, trace, expected_content, size)) { + if (check_sample_content(env, trace, expected_content, size, + print_out_comparisons)) { // At least one frame matched what we were looking for. - return 1; + return TRUE; } } - return 0; + return FALSE; } -static jint check_samples(JNIEnv* env, ExpectedContentFrame* expected, - size_t size, - jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*)) { +static jboolean +check_samples(JNIEnv* env, + ExpectedContentFrame* expected, + size_t size, + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*), + int print_out_comparisons) { jvmtiStackTraces traces; jvmtiError error = get_traces(jvmti, &traces); if (error != JVMTI_ERROR_NONE) { - return 0; + return FALSE; } int result = compare_samples(env, traces.stack_traces, traces.trace_count, - expected, size); + expected, size, print_out_comparisons); (*jvmti)->ReleaseTraces(jvmti, &traces); return result; } -static jint frames_exist_live(JNIEnv* env, ExpectedContentFrame* expected, - size_t size) { - return check_samples(env, expected, size, (*jvmti)->GetLiveTraces); +static jboolean frames_exist_live(JNIEnv* env, + ExpectedContentFrame* expected, + size_t size, + int print_out_comparisons) { + return check_samples(env, expected, size, (*jvmti)->GetLiveTraces, + print_out_comparisons); } -static jint frames_exist_recent(JNIEnv* env, ExpectedContentFrame* expected, - size_t size) { - return check_samples(env, expected, size, (*jvmti)->GetGarbageTraces); +static jboolean frames_exist_recent(JNIEnv* env, + ExpectedContentFrame* expected, + size_t size, + int print_out_comparisons) { + return check_samples(env, expected, size, (*jvmti)->GetGarbageTraces, + print_out_comparisons); } -static jint frames_exist_frequent(JNIEnv* env, ExpectedContentFrame* expected, - size_t size) { - return check_samples(env, expected, size, (*jvmti)->GetFrequentGarbageTraces); +static jboolean frames_exist_frequent(JNIEnv* env, + ExpectedContentFrame* expected, + size_t size, + int print_out_comparisons) { + return check_samples(env, expected, size, (*jvmti)->GetFrequentGarbageTraces, + print_out_comparisons); } // Static native API for various tests. @@ -339,18 +321,23 @@ for(i = 0; i < size; i++) { jobject obj = (*env)->GetObjectArrayElement(env, frames, i); jclass frame_class = (*env)->GetObjectClass(env, obj); - jfieldID line_number_field_id = (*env)->GetFieldID(env, frame_class, "lineNumber", "I"); + jfieldID line_number_field_id = (*env)->GetFieldID(env, frame_class, + "lineNumber", "I"); int line_number = (*env)->GetIntField(env, obj, line_number_field_id); - jfieldID string_id = (*env)->GetFieldID(env, frame_class, "method", "Ljava/lang/String;"); - jstring string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); + jfieldID string_id = (*env)->GetFieldID(env, frame_class, "method", + "Ljava/lang/String;"); + jstring string_object = (jstring) (*env)->GetObjectField(env, obj, + string_id); const char* method = (*env)->GetStringUTFChars(env, string_object, 0); - string_id = (*env)->GetFieldID(env, frame_class, "fileName", "Ljava/lang/String;"); + string_id = (*env)->GetFieldID(env, frame_class, "fileName", + "Ljava/lang/String;"); string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); const char* file_name = (*env)->GetStringUTFChars(env, string_object, 0); - string_id = (*env)->GetFieldID(env, frame_class, "signature", "Ljava/lang/String;"); + string_id = (*env)->GetFieldID(env, frame_class, "signature", + "Ljava/lang/String;"); string_object = (jstring) (*env)->GetObjectField(env, obj, string_id); const char* signature= (*env)->GetStringUTFChars(env, string_object, 0); @@ -361,89 +348,12 @@ } } -static jint checkAnd(JNIEnv *env, jobjectArray frames, int live, int recent, - int frequent) { +static jboolean check_and(JNIEnv *env, jobjectArray frames, int live, + int recent, int frequent, int print_out) { jobject loader = NULL; if (frames == NULL) { - return 0; - } - - // Start by transforming the frames into a C-friendly structure. - jsize size = (*env)->GetArrayLength(env, frames); - ExpectedContentFrame native_frames[size]; - fill_native_frames(env, frames, native_frames, size); - - if (jvmti == NULL) { - throw_exc(env, "JVMTI client was not properly loaded!\n"); - return 0; - } - - int result = 1; - - if (live) { - result = frames_exist_live(env, native_frames, size); - } - - if (recent) { - result = result && - frames_exist_recent(env, native_frames, size); - } - - if (frequent) { - result = result && - frames_exist_frequent(env, native_frames, size); - } - - return result; -} - -static jint checkOr(JNIEnv *env, jobjectArray frames, int live, int recent, - int frequent) { - jobject loader = NULL; - - if (frames == NULL) { - return 0; - } - - // Start by transforming the frames into a C-friendly structure. - jsize size = (*env)->GetArrayLength(env, frames); - ExpectedContentFrame native_frames[size]; - fill_native_frames(env, frames, native_frames, size); - - if (jvmti == NULL) { - throw_exc(env, "JVMTI client was not properly loaded!\n"); - return 0; - } - - int result = 0; - - if (live) { - result = frames_exist_live(env, native_frames, size); - } - - if (recent) { - result = result || - frames_exist_recent(env, native_frames, size); - } - - if (frequent) { - result = result || - frames_exist_frequent(env, native_frames, size); - } - - return result; -} - -static jint checkAll(JNIEnv *env, jobjectArray frames) { - return checkAnd(env, frames, 1, 1, 1); -} - -static jint checkNone(JNIEnv *env, jobjectArray frames) { - jobject loader = NULL; - - if (frames == NULL) { - return 0; + return FALSE; } // Start by transforming the frames into a C-friendly structure. @@ -453,185 +363,109 @@ if (jvmti == NULL) { throw_exc(env, "JVMTI client was not properly loaded!\n"); - return 0; + return FALSE; } - if ((!frames_exist_live(env, native_frames, size)) && - (!frames_exist_recent(env, native_frames, size)) && - (!frames_exist_frequent(env, native_frames, size))) { - return 1; - } - return 0; -} + int result = TRUE; -static void enable_sampling() { - check_error((*jvmti)->StartHeapSampling(jvmti, 1 << 19, MAX_TRACES), - "Start Heap Sampling"); -} + if (live) { + result = frames_exist_live(env, native_frames, size, print_out); + } -static void enable_sampling_with_rate(int rate) { - check_error((*jvmti)->StartHeapSampling(jvmti, rate, MAX_TRACES), - "Start Heap Sampling"); -} + if (recent) { + result = result && + frames_exist_recent(env, native_frames, size, print_out); + } -static void disable_sampling() { - check_error((*jvmti)->StopHeapSampling(jvmti), "Stop Heap Sampling"); + if (frequent) { + result = result && + frames_exist_frequent(env, native_frames, size, print_out); + } + + return result; } -// HeapMonitorTest JNI. -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - // We want the frames in each part. - if (!checkAll(env, frames)) { - return FAILED; +static jboolean check_or(JNIEnv *env, jobjectArray frames, int live, int recent, + int frequent, int print_out) { + jobject loader = NULL; + + if (frames == NULL) { + return FALSE; } - return PASSED; -} -JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorTest_enableSampling(JNIEnv *env, jclass cls) { - enable_sampling(); + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return FALSE; + } + + jboolean result = FALSE; + + if (live) { + result = frames_exist_live(env, native_frames, size, print_out); + } + + if (recent) { + result = result || + frames_exist_recent(env, native_frames, size, print_out); + } + + if (frequent) { + result = result || + frames_exist_frequent(env, native_frames, size, print_out); + } + + return result; } -// HeapMonitorOnOffTest JNI. -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorOnOffTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - // We want the frames in each part. - if (!checkAll(env, frames)) { - return FAILED; - } - return PASSED; +static jboolean checkAll(JNIEnv *env, jobjectArray frames, int print_out) { + return check_and(env, frames, 1, 1, 1, print_out); } -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorOnOffTest_checkWipeOut(JNIEnv *env, jclass cls, jobjectArray frames) { - // We want the frames in none of the parts. - if (!checkNone(env, frames)) { - return FAILED; +static jboolean checkNone(JNIEnv *env, jobjectArray frames, + int print_out) { + jobject loader = NULL; + + if (frames == NULL) { + return FALSE; } - return PASSED; -} -JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorOnOffTest_enableSampling(JNIEnv *env, jclass cls) { - enable_sampling(); + // Start by transforming the frames into a C-friendly structure. + jsize size = (*env)->GetArrayLength(env, frames); + ExpectedContentFrame native_frames[size]; + fill_native_frames(env, frames, native_frames, size); + + if (jvmti == NULL) { + throw_exc(env, "JVMTI client was not properly loaded!\n"); + return FALSE ; + } + + if ((!frames_exist_live(env, native_frames, size, print_out)) && + (!frames_exist_recent(env, native_frames, size, print_out)) && + (!frames_exist_frequent(env, native_frames, size, print_out))) { + return TRUE; + } + return FALSE; } JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorOnOffTest_disableSampling(JNIEnv *env, jclass cls) { - disable_sampling(); -} - -// HeapMonitorRecentTest JNI. -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorRecentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - // We want the frames in each part. - if (!checkAll(env, frames)) { - return FAILED; - } - return PASSED; -} - -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorRecentTest_checkLiveOrRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - if (checkOr(env, frames, 1, 1, 0)) { - return FAILED; - } - return PASSED; -} - -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorRecentTest_checkLiveAndRecentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - if (checkAnd(env, frames, 1, 1, 0)) { - return FAILED; - } - return PASSED; -} - -JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorRecentTest_enableSampling(JNIEnv *env, jclass cls) { - enable_sampling(); -} - -// HeapMonitorFrequentTest JNI. -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorFrequentTest_checkFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - // We want the frames in each part. - if (!checkAll(env, frames)) { - return FAILED; - } - return PASSED; -} - -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorFrequentTest_checkFrequentFrames(JNIEnv *env, jclass cls, jobjectArray frames) { - if (checkAnd(env, frames, 0, 0, 1)) { - return PASSED; - } - return FAILED; +Java_MyPackage_HeapMonitor_enableSampling(JNIEnv *env, jclass cls, int rate, + int max_traces) { + check_error((*jvmti)->StartHeapSampling(jvmti, rate, max_traces), + "Start Heap Sampling"); } JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorFrequentTest_enableSampling(JNIEnv *env, jclass cls) { - enable_sampling(); +Java_MyPackage_HeapMonitor_disableSampling(JNIEnv *env, jclass cls) { + check_error((*jvmti)->StopHeapSampling(jvmti), "Stop Heap Sampling"); } JNIEXPORT jboolean JNICALL -Java_MyPackage_HeapMonitorNoCapabilityTest_allSamplingMethodsFail(JNIEnv *env, jclass cls) { - jvmtiCapabilities caps; - memset(&caps, 0, sizeof(caps)); - caps.can_sample_heap= 1; - if (check_error((*jvmti)->RelinquishCapabilities(jvmti, &caps), - "Add capabilities\n")){ - return 0; - } - - if (check_capability_error((*jvmti)->StartHeapSampling(jvmti, 1<<19, - MAX_TRACES), - "Start Heap Sampling")) { - return 0; - } - - if (check_capability_error((*jvmti)->StopHeapSampling(jvmti), - "Stop Heap Sampling")) { - return 0; - } - - if (check_capability_error((*jvmti)->ReleaseTraces(jvmti, NULL), - "Release Traces")) { - return 0; - } - - if (check_capability_error((*jvmti)->GetHeapSamplingStats(jvmti, NULL), - "Get Heap Sampling Stats")) { - return 0; - } - - if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL), - "Get Garbage Traces")) { - return 0; - } - - if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL), - "Get Frequent Garbage Traces")) { - return 0; - } - - if (check_capability_error((*jvmti)->GetLiveTraces(jvmti, NULL), - "Get Live Traces")) { - return 0; - } - - // Calling enable sampling should fail now. - return 1; -} - -JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorStatSimpleTest_enableSampling(JNIEnv *env, jclass cls) { - enable_sampling(); -} - -static jint stats_are_zero() { +Java_MyPackage_HeapMonitor_areSamplingStatisticsZero(JNIEnv *env, jclass cls) { jvmtiHeapSamplingStats stats; check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), "Heap Sampling Statistics"); @@ -641,51 +475,219 @@ return memcmp(&stats, &zero, sizeof(zero)) == 0; } -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorStatSimpleTest_statsNull(JNIEnv *env, jclass cls) { - return stats_are_zero(); -} - -JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorStatCorrectnessTest_disableSampling(JNIEnv *env, jclass cls) { - disable_sampling(); +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitor_framesExistEverywhere(JNIEnv *env, jclass cls, + jobjectArray frames) { + // We want the frames in each part. + return checkAll(env, frames, PRINT_OUT); } -JNIEXPORT void JNICALL -Java_MyPackage_HeapMonitorStatCorrectnessTest_enableSampling(JNIEnv *env, jclass cls, jint rate) { - enable_sampling_with_rate(rate); +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitor_framesExistNowhere(JNIEnv *env, jclass cls, + jobjectArray frames) { + // We want the frames in none of the parts. + return checkNone(env, frames, PRINT_OUT); } -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorStatCorrectnessTest_statsNull(JNIEnv *env, jclass cls) { - return stats_are_zero(); +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorRecentTest_framesNotInLiveOrRecent(JNIEnv *env, + jclass cls, + jobjectArray frames) { + return !check_or(env, frames, TRUE, TRUE, FALSE, PRINT_OUT); } -JNIEXPORT jint JNICALL -Java_MyPackage_HeapMonitorStatCorrectnessTest_statsHaveSamples(JNIEnv *env, - jclass cls, - int expected, - int percent_error) { +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorRecentTest_framesExistInLiveAndRecent(JNIEnv *env, + jclass cls, + jobjectArray frames) { + return check_and(env, frames, TRUE, TRUE, FALSE, PRINT_OUT); +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorFrequentTest_framesExistInFrequent(JNIEnv *env, + jclass cls, + jobjectArray frames) { + return check_and(env, frames, FALSE, FALSE, TRUE, PRINT_OUT); +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorNoCapabilityTest_allSamplingMethodsFail(JNIEnv *env, + jclass cls) { + jvmtiCapabilities caps; + memset(&caps, 0, sizeof(caps)); + caps.can_sample_heap= 1; + if (check_error((*jvmti)->RelinquishCapabilities(jvmti, &caps), + "Add capabilities\n")){ + return FALSE; + } + + if (check_capability_error((*jvmti)->StartHeapSampling(jvmti, 1<<19, + MAX_TRACES), + "Start Heap Sampling")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->StopHeapSampling(jvmti), + "Stop Heap Sampling")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->ReleaseTraces(jvmti, NULL), + "Release Traces")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->GetHeapSamplingStats(jvmti, NULL), + "Get Heap Sampling Stats")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL), + "Get Garbage Traces")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL), + "Get Frequent Garbage Traces")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->GetLiveTraces(jvmti, NULL), + "Get Live Traces")) { + return FALSE; + } + return TRUE; +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorStatCorrectnessTest_statsHaveExpectedNumberSamples(JNIEnv *env, + jclass cls, + int expected, + int percent_error) { jvmtiHeapSamplingStats stats; check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), "Heap Sampling Statistics"); - fprintf(stderr, "Statistics show:\n"); - fprintf(stderr, "\tCollected samples: %ld\n\tGarbage collected samples: %ld\n", - stats.sample_count, stats.garbage_collected_samples); - fprintf(stderr, "\tSample rate accumulated: %ld\n\tSample Rate Count: %ld\n", - stats.sample_rate_accumulation, stats.sample_rate_count); - fprintf(stderr, "\tStack depth accumulation: %ld\n", - stats.stack_depth_accumulation); - - fprintf(stderr, "Expected is %d\n", expected); double diff_ratio = (stats.sample_count - expected); diff_ratio = (diff_ratio < 0) ? -diff_ratio : diff_ratio; diff_ratio /= expected; - fprintf(stderr, "Diff ratio is %f\n", diff_ratio); + return diff_ratio * 100 < percent_error; +} - return diff_ratio * 100 > percent_error; +JNIEXPORT jdouble JNICALL +Java_MyPackage_HeapMonitorStatRateTest_getAverageRate(JNIEnv *env, jclass cls) { + jvmtiHeapSamplingStats stats; + check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), + "Heap Sampling Statistics"); + return ((double) stats.sample_rate_accumulation) / stats.sample_rate_count; +} + +JNIEXPORT jdouble JNICALL +Java_MyPackage_HeapMonitorStackDepthTest_getAverageStackDepth(JNIEnv *env, + jclass cls) { + jvmtiStackTraces traces; + jvmtiError error = (*jvmti)->GetLiveTraces(jvmti, &traces);; + + if (error != JVMTI_ERROR_NONE) { + return 0; + } + + int trace_count = traces.trace_count; + + if (trace_count == 0) { + return 0; + } + + int i; + jvmtiStackTrace* stack_traces = traces.stack_traces; + double sum = 0; + for (i = 0; i < trace_count; i++) { + jvmtiStackTrace *stack_trace = stack_traces + i; + sum += stack_trace->frame_count; + } + + return sum / i; +} + +typedef struct sThreadsFound { + jint *threads; + int num_threads; +} ThreadsFound; + +static void find_threads_in_traces(jvmtiStackTraces* traces, + ThreadsFound* thread_data) { + int i; + jvmtiStackTrace* stack_traces = traces->stack_traces; + int trace_count = traces->trace_count; + + jint *threads = thread_data->threads; + int num_threads = thread_data->num_threads; + + // We are looking for at last expected_num_threads different traces. + for (i = 0; i < trace_count; i++) { + jvmtiStackTrace *stack_trace = stack_traces + i; + jlong thread_id = stack_trace->thread_id; + + // Check it is the right frame: only accept helper top framed traces. + jmethodID methodid = stack_trace->frames[0].method; + char *name = NULL, *signature = NULL, *file_name = NULL; + (*jvmti)->GetMethodName(jvmti, methodid, &name, &signature, 0); + + if (strcmp(name, "helper")) { + continue; + } + + // Really not efficient look-up but it's for a test... + int found = 0; + int j; + for (j = 0; j < num_threads; j++) { + if (thread_id == threads[j]) { + found = 1; + break; + } + } + + if (!found) { + threads[num_threads] = thread_id; + num_threads++; + } + } + thread_data->num_threads = num_threads; +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorThreadTest_checkSamples(JNIEnv* env, jclass cls, + jintArray threads) { + jvmtiStackTraces traces; + ThreadsFound thread_data; + thread_data.threads = (*env)->GetIntArrayElements(env, threads, 0); + thread_data.num_threads = 0; + + // Get live and garbage traces to ensure we capture all the threads that have + // been sampled. + if ((*jvmti)->GetLiveTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + + find_threads_in_traces(&traces, &thread_data); + + if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + + if ((*jvmti)->GetGarbageTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + + find_threads_in_traces(&traces, &thread_data); + + if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + + (*env)->ReleaseIntArrayElements(env, threads, thread_data.threads, 0); + return TRUE; } #ifdef __cplusplus # HG changeset patch # User Jean Christophe Beyler # Date 1509482649 25200 # Tue Oct 31 13:44:09 2017 -0700 # Node ID 15ff781c423647274c1894543683bd198d520d76 # Parent 2ee404f35143ccee32b1d6fb41c34c227a86f4e0 [mq]: heap14_rebased diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -292,13 +292,13 @@ void CollectedHeap::sample_allocation(Thread* thread, HeapWord* obj, - size_t size, size_t fix_sample_rate) { + size_t size, size_t overflowed_words) { // Object is allocated, sample it now. HeapMonitoring::object_alloc_do_sample(thread, reinterpret_cast(obj), size * HeapWordSize); // Pick a next sample in this case, we allocated right. - thread->tlab().pick_next_sample(fix_sample_rate); + thread->tlab().pick_next_sample(overflowed_words); } HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) { @@ -310,7 +310,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) { // In case the tlab changes, remember if this one wanted a sample. - bool should_sample = thread->tlab().should_sample() && HeapMonitoring::enabled(); + bool should_sample = HeapMonitoring::enabled() && thread->tlab().should_sample(); HeapWord* obj = NULL; if (should_sample) { @@ -321,10 +321,10 @@ // If we did allocate in this tlab, sample it. Otherwise, we wait for the // new tlab's first allocation at the end of this method. if (obj != NULL) { - // Fix sample rate by removing the extra bytes allocated in this last + // Fix sample rate by removing the extra words allocated in this last // sample. - size_t fix_sample_rate = thread->tlab().top() - tlab_old_end; - sample_allocation(thread, obj, size, fix_sample_rate); + size_t overflowed_words = pointer_delta(thread->tlab().top(), tlab_old_end); + sample_allocation(thread, obj, size, overflowed_words); return obj; } } diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp @@ -150,8 +150,10 @@ static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size); // Sample the allocation via HeapMonitoring. + // overflowed_words represents the number of HeapWords that went past the + // sampling boundary. This is used to fix the next sampling rate. static void sample_allocation(Thread* thread, HeapWord* obj, size_t size, - size_t fix_sample_rate = 0); + size_t overflowed_words = 0); // Try to allocate the object we want to sample in this tlab, returns NULL if // fails to allocate. static HeapWord* allocate_sampled_object(Thread* thread, size_t size); diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -129,7 +129,7 @@ } assert(!(retire || ZeroTLAB) || (start() == NULL && end() == NULL && top() == NULL && - actual_end() == NULL && slow_path_end() == NULL), + _actual_end == NULL && _slow_path_end == NULL), "TLAB must be reset"); } @@ -180,7 +180,7 @@ // actual refill. size_t old_bytes_until_sample = 0; if (_number_of_refills > 1) { - old_bytes_until_sample = bytes_until_sample(); + old_bytes_until_sample = _bytes_until_sample; } initialize(start, top, start + new_size - alignment_reserve()); @@ -327,8 +327,8 @@ } void ThreadLocalAllocBuffer::set_sample_end() { - size_t heap_words_remaining = _end - _top; - size_t bytes_left = bytes_until_sample(); + size_t heap_words_remaining = pointer_delta(_end, _top); + size_t bytes_left = _bytes_until_sample; size_t words_until_sample = bytes_left / HeapWordSize; if (heap_words_remaining > words_until_sample) { @@ -342,29 +342,30 @@ } } -void ThreadLocalAllocBuffer::pick_next_sample(size_t diff) { +void ThreadLocalAllocBuffer::pick_next_sample(size_t overflowed_words) { if (!HeapMonitoring::enabled()) { return; } - if (bytes_until_sample() == 0) { - HeapMonitoring::pick_next_sample(bytes_until_sample_addr()); + if (_bytes_until_sample == 0) { + HeapMonitoring::pick_next_sample(&_bytes_until_sample); } - if (diff > 0) { + if (overflowed_words > 0) { // Try to correct sample size by removing extra space from last allocation. - if (bytes_until_sample() > diff * HeapWordSize) { - set_bytes_until_sample(bytes_until_sample() - diff * HeapWordSize); + if (_bytes_until_sample > overflowed_words * HeapWordSize) { + set_bytes_until_sample(_bytes_until_sample - overflowed_words * HeapWordSize); } } set_sample_end(); log_trace(gc, tlab)("TLAB picked next sample: thread: " INTPTR_FORMAT " [id: %2d]" - " start: %p top: %p end: %p actual_end: %p slow_path_end: %p", + " start: " INTPTR_FORMAT " top: " INTPTR_FORMAT " end: " INTPTR_FORMAT " actual_end:" + INTPTR_FORMAT " slow_path_end: " INTPTR_FORMAT, p2i(myThread()), myThread()->osthread()->thread_id(), - start(), top(), end(), - actual_end(), slow_path_end()); + p2i(start()), p2i(top()), p2i(end()), + p2i(_actual_end), p2i(_slow_path_end)); } Thread* ThreadLocalAllocBuffer::myThread() { @@ -391,8 +392,8 @@ } size_t size_in_bytes = size * HeapWordSize; - if (bytes_until_sample() > size_in_bytes) { - set_bytes_until_sample(bytes_until_sample() - size_in_bytes); + if (_bytes_until_sample > size_in_bytes) { + set_bytes_until_sample(_bytes_until_sample - size_in_bytes); } else { // Technically this is not exactly right, we probably should remember how many bytes are // negative probably to then reduce our next sample size. diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp @@ -93,6 +93,10 @@ size_t remaining() { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } + // Obtain the actual end of the TLAB. + HeapWord* hard_end(); + void set_sample_end(); + // Make parsable and release it. void reset(); @@ -130,9 +134,6 @@ HeapWord* start() const { return _start; } HeapWord* end() const { return _end; } - HeapWord* slow_path_end() const { return _slow_path_end; } - HeapWord* actual_end() const { return _actual_end; } - HeapWord* hard_end(); HeapWord* top() const { return _top; } HeapWord* pf_top() const { return _pf_top; } size_t desired_size() const { return _desired_size; } @@ -180,19 +181,15 @@ void initialize(); void pick_next_sample(size_t diff = 0); - void set_sample_end(); void set_back_actual_end(); void handle_sample(Thread* thread, HeapWord* result, size_t size); - size_t bytes_until_sample() { return _bytes_until_sample; } - size_t *bytes_until_sample_addr() { return &_bytes_until_sample; } - bool should_sample() { return bytes_until_sample() == 0; } + bool should_sample() { return _bytes_until_sample == 0; } static size_t refill_waste_limit_increment() { return TLABWasteIncrement; } // Code generation support static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); } static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); } - static ByteSize actual_end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _actual_end ); } static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); } static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); } static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); } diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -209,7 +209,7 @@ } void initialize(int max_storage) { - MutexLocker mu(HeapMonitor_lock); + MutexLocker mu(HeapMonitorStorage_lock); free_storage(); allocate_storage(max_storage); memset(&_stats, 0, sizeof(_stats)); @@ -295,7 +295,6 @@ // Statics for Sampler double HeapMonitoring::_log_table[1 << FastLogNumBits]; bool HeapMonitoring::_enabled; -AlwaysTrueClosure HeapMonitoring::_always_true; jint HeapMonitoring::_monitoring_rate; // Cheap random number generator @@ -382,7 +381,7 @@ } void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { - MutexLocker mu(HeapMonitor_lock); + MutexLocker mu(HeapMonitorStorage_lock); StackTraceData new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->frame_count; @@ -391,7 +390,7 @@ void StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, OopClosure *f) { - MutexLocker mu(HeapMonitor_lock); + MutexLocker mu(HeapMonitorStorage_lock); size_t count = 0; if (initialized()) { int len = _allocated_traces->length(); @@ -476,7 +475,7 @@ void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, jvmtiStackTraces *traces) { - MutexLocker mu(HeapMonitor_lock); + MutexLocker mu(HeapMonitorStorage_lock); int len = copier.size(); // Create a new array to store the StackTraceData objects. @@ -566,6 +565,7 @@ void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_gc_storage) { + MutexLocker mu(HeapMonitor_lock); // Ignore if already enabled. if (_enabled) { return; @@ -592,6 +592,7 @@ } void HeapMonitoring::stop_profiling() { + MutexLocker mu(HeapMonitor_lock); _enabled = false; } @@ -613,10 +614,10 @@ // 5194297183973780480 bytes. In this case, // for sample_parameter = 1<<19, max possible step is // 9448372 bytes (24 bits). - const uint64_t prng_mod_power = 48; // Number of bits in prng + const uint64_t PrngModPower = 48; // Number of bits in prng // The uint32_t cast is to prevent a (hard-to-reproduce) NAN // under piii debug for some binaries. - double q = static_cast(_rnd >> (prng_mod_power - 26)) + 1.0; + double q = static_cast(_rnd >> (PrngModPower - 26)) + 1.0; // Put the computed p-value through the CDF of a geometric. // For faster performance (save ~1/20th exec time), replace // min(0.0, FastLog2(q) - 26) by (Fastlog2(q) - 26.000705) diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -40,7 +40,6 @@ static const int FastLogNumBits = 10; static const int FastLogMask = (1 << FastLogNumBits) - 1; static double _log_table[1<(0)) << prng_mod_power); - return (PrngMult * rnd + prng_add) & prng_mod_mask; + const uint64_t PrngAdd = 0xB; + const uint64_t PrngModPower = 48; + const uint64_t PrngModMask = right_n_bits(PrngModPower); + return (PrngMult * rnd + PrngAdd) & PrngModMask; } static inline double fast_log2(const double & d) { @@ -96,6 +94,7 @@ // but which no longer have other references in the heap. static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); static void weak_oops_do(OopClosure* oop_closure) { + AlwaysTrueClosure _always_true; weak_oops_do(&_always_true, oop_closure); } diff --git a/src/hotspot/share/runtime/mutexLocker.cpp b/src/hotspot/share/runtime/mutexLocker.cpp --- a/src/hotspot/share/runtime/mutexLocker.cpp +++ b/src/hotspot/share/runtime/mutexLocker.cpp @@ -128,6 +128,7 @@ Monitor* RedefineClasses_lock = NULL; Monitor* HeapMonitor_lock = NULL; +Monitor* HeapMonitorStorage_lock = NULL; #if INCLUDE_TRACE Mutex* JfrStacktrace_lock = NULL; @@ -286,7 +287,8 @@ def(PeriodicTask_lock , PaddedMonitor, nonleaf+5, true, Monitor::_safepoint_check_sometimes); def(RedefineClasses_lock , PaddedMonitor, nonleaf+5, true, Monitor::_safepoint_check_always); - def(HeapMonitor_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_always); + def(HeapMonitorStorage_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); + def(HeapMonitor_lock , PaddedMonitor, nonleaf+1, true, Monitor::_safepoint_check_always); if (WhiteBoxAPI) { def(Compilation_lock , PaddedMonitor, leaf, false, Monitor::_safepoint_check_never); diff --git a/src/hotspot/share/runtime/mutexLocker.hpp b/src/hotspot/share/runtime/mutexLocker.hpp --- a/src/hotspot/share/runtime/mutexLocker.hpp +++ b/src/hotspot/share/runtime/mutexLocker.hpp @@ -126,7 +126,8 @@ extern Monitor* PeriodicTask_lock; // protects the periodic task structure extern Monitor* RedefineClasses_lock; // locks classes from parallel redefinition -extern Monitor* HeapMonitor_lock; // protects internal storage in HeapMonitoring +extern Monitor* HeapMonitor_lock; // protects HeapMonitor initialize and stop calls +extern Monitor* HeapMonitorStorage_lock; // protects internal storage in HeapMonitoring #if INCLUDE_TRACE extern Mutex* JfrStacktrace_lock; // used to guard access to the JFR stacktrace table diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java @@ -55,6 +55,7 @@ public native static void disableSampling(); public native static boolean areSamplingStatisticsZero(); + public native static boolean statsHaveExpectedNumberSamples(int expected, int percentError); /** Do the frames provided exist in live, recent garbage, and frequent garbage. */ public native static boolean framesExistEverywhere(Frame[] frames); @@ -70,11 +71,11 @@ List frames = new ArrayList(); if (depth > 1) { createStackDepth(depth - 1, frames); - frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 72)); + frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 73)); } else { actuallyAllocate(frames); - frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 118)); - frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 75)); + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 119)); + frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 76)); } return frames; } @@ -88,17 +89,17 @@ int sum = 0; List frames = new ArrayList(); allocate(frames); - frames.add(new Frame("allocate", "()Ljava/util/List;", "HeapMonitor.java", 90)); + frames.add(new Frame("allocate", "()Ljava/util/List;", "HeapMonitor.java", 91)); return frames; } private static void createStackDepth(int depth, List frames) { if (depth > 1) { createStackDepth(depth - 1, frames); - frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 97)); + frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 98)); } else { allocate(frames); - frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 100)); + frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 101)); } } @@ -107,8 +108,8 @@ for (int j = 0; j < 1000; j++) { sum += actuallyAllocate(frames); } - frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 118)); - frames.add(new Frame("allocate", "(Ljava/util/List;)V", "HeapMonitor.java", 108)); + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 119)); + frames.add(new Frame("allocate", "(Ljava/util/List;)V", "HeapMonitor.java", 109)); } private static int actuallyAllocate(List frames) { diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatArrayCorrectnessTest.java rename from test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java rename to test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatArrayCorrectnessTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatArrayCorrectnessTest.java @@ -27,18 +27,16 @@ * @test * @build Frame HeapMonitor * @summary Verifies the JVMTI Heap Monitor Statistics - * @compile HeapMonitorStatCorrectnessTest.java - * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatCorrectnessTest + * @compile HeapMonitorStatArrayCorrectnessTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatArrayCorrectnessTest */ -public class HeapMonitorStatCorrectnessTest { +public class HeapMonitorStatArrayCorrectnessTest { // Do 100000 iterations and expect maxIteration / multiplier samples. private static final int maxIteration = 100000; private static int array[]; - private native static boolean statsHaveExpectedNumberSamples(int expected, int percentError); - private static void allocate(int size) { for (int j = 0; j < maxIteration; j++) { array = new int[size]; @@ -76,7 +74,7 @@ expected /= samplingMultiplier; // 10% error ensures a sanity test without becoming flaky. - if (!statsHaveExpectedNumberSamples((int) expected, 10)) { + if (!HeapMonitor.statsHaveExpectedNumberSamples((int) expected, 10)) { throw new RuntimeException("Statistics should show about " + expected + " samples."); } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessTest.java copy from test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java copy to test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatCorrectnessTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessTest.java @@ -27,60 +27,88 @@ * @test * @build Frame HeapMonitor * @summary Verifies the JVMTI Heap Monitor Statistics - * @compile HeapMonitorStatCorrectnessTest.java - * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatCorrectnessTest + * @compile HeapMonitorStatObjectCorrectnessTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorStatObjectCorrectnessTest */ -public class HeapMonitorStatCorrectnessTest { +/** This test is checking the object allocation path works with heap sampling. */ +public class HeapMonitorStatObjectCorrectnessTest { // Do 100000 iterations and expect maxIteration / multiplier samples. private static final int maxIteration = 100000; - private static int array[]; + private static BigObject obj; private native static boolean statsHaveExpectedNumberSamples(int expected, int percentError); - private static void allocate(int size) { + private static void allocate() { for (int j = 0; j < maxIteration; j++) { - array = new int[size]; + obj = new BigObject(); } } public static void main(String[] args) { - int sizes[] = {1000, 10000, 100000}; - - for (int currentSize : sizes) { - System.out.println("Testing size " + currentSize); - - // 111 is as good a number as any. - final int samplingMultiplier = 111; - HeapMonitor.enableSampling(samplingMultiplier * currentSize); - - if (!HeapMonitor.areSamplingStatisticsZero()) { - throw new RuntimeException("Statistics should be null to begin with."); - } - - allocate(currentSize); + final int sizeObject = 1400; - // For simplifications, we ignore the array memory usage for array internals (with the array - // sizes requested, it should be a negligible oversight). - // - // That means that with maxIterations, the loop in the method allocate requests: - // maxIterations * currentSize * 4 bytes (4 for integers) - // - // Via the enable sampling, the code requests a sample every samplingMultiplier * currentSize bytes. - // - // Therefore, the expected sample number is: - // (maxIterations * currentSize * 4) / (samplingMultiplier * currentSize); - double expected = maxIteration; - expected *= 4; - expected /= samplingMultiplier; + // 111 is as good a number as any. + final int samplingMultiplier = 111; + HeapMonitor.enableSampling(samplingMultiplier * sizeObject); - // 10% error ensures a sanity test without becoming flaky. - if (!statsHaveExpectedNumberSamples((int) expected, 10)) { - throw new RuntimeException("Statistics should show about " + expected + " samples."); - } + if (!HeapMonitor.areSamplingStatisticsZero()) { + throw new RuntimeException("Statistics should be null to begin with."); + } - HeapMonitor.disableSampling(); + allocate(); + + // For simplifications, the code is allocating: + // (BigObject size) * maxIteration. + // + // We ignore the class memory usage apart from field memory usage for BigObject. BigObject + // allocates 250 long, so 2000 bytes, so whatever is used for the class is negligible. + // + // That means that with maxIterations, the loop in the method allocate requests: + // maxIterations * 2000 bytes. + // + // Via the enable sampling, the code requests a sample every samplingMultiplier * sizeObject bytes. + // + // Therefore, the expected sample number is: + // (maxIterations * sizeObject) / (samplingMultiplier * sizeObject); + // + // Which becomes: + // maxIterations / samplingMultiplier + double expected = maxIteration; + expected /= samplingMultiplier; + + // 10% error ensures a sanity test without becoming flaky. + if (!HeapMonitor.statsHaveExpectedNumberSamples((int) expected, 10)) { + throw new RuntimeException("Statistics should show about " + expected + " samples."); } + + HeapMonitor.disableSampling(); + } + + /** + * Big class on purpose to just be able to ignore the class memory space overhead. + * + * Class contains 175 long fields, so 175 * 8 = 1400 bytes. + */ + private static class BigObject { + private long a0_0, a0_1, a0_2, a0_3, a0_4, a0_5, a0_6, a0_7, a0_8, a0_9; + private long a1_0, a1_1, a1_2, a1_3, a1_4, a1_5, a1_6, a1_7, a1_8, a1_9; + private long a2_0, a2_1, a2_2, a2_3, a2_4, a2_5, a2_6, a2_7, a2_8, a2_9; + private long a3_0, a3_1, a3_2, a3_3, a3_4, a3_5, a3_6, a3_7, a3_8, a3_9; + private long a4_0, a4_1, a4_2, a4_3, a4_4, a4_5, a4_6, a4_7, a4_8, a4_9; + private long a5_0, a5_1, a5_2, a5_3, a5_4, a5_5, a5_6, a5_7, a5_8, a5_9; + private long a6_0, a6_1, a6_2, a6_3, a6_4, a6_5, a6_6, a6_7, a6_8, a6_9; + private long a7_0, a7_1, a7_2, a7_3, a7_4, a7_5, a7_6, a7_7, a7_8, a7_9; + private long a8_0, a8_1, a8_2, a8_3, a8_4, a8_5, a8_6, a8_7, a8_8, a8_9; + private long a9_0, a9_1, a9_2, a9_3, a9_4, a9_5, a9_6, a9_7, a9_8, a9_9; + private long a10_0, a10_1, a10_2, a10_3, a10_4, a10_5, a10_6, a10_7, a10_8, a10_9; + private long a11_0, a11_1, a11_2, a11_3, a11_4, a11_5, a11_6, a11_7, a11_8, a11_9; + private long a12_0, a12_1, a12_2, a12_3, a12_4, a12_5, a12_6, a12_7, a12_8, a12_9; + private long a13_0, a13_1, a13_2, a13_3, a13_4, a13_5, a13_6, a13_7, a13_8, a13_9; + private long a14_0, a14_1, a14_2, a14_3, a14_4, a14_5, a14_6, a14_7, a14_8, a14_9; + private long a15_0, a15_1, a15_2, a15_3, a15_4, a15_5, a15_6, a15_7, a15_8, a15_9; + private long a16_0, a16_1, a16_2, a16_3, a16_4, a16_5, a16_6, a16_7, a16_8, a16_9; + private long a17_0, a17_1, a17_2, a17_3, a17_4; } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -44,7 +44,7 @@ #define TRUE 1 #define FALSE 0 -#define PRINT_OUT 0 +#define PRINT_OUT 1 #define MAX_TRACES 400 static const char *EXC_CNAME = "java/lang/Exception"; @@ -560,14 +560,16 @@ } JNIEXPORT jboolean JNICALL -Java_MyPackage_HeapMonitorStatCorrectnessTest_statsHaveExpectedNumberSamples(JNIEnv *env, - jclass cls, - int expected, - int percent_error) { +Java_MyPackage_HeapMonitor_statsHaveExpectedNumberSamples(JNIEnv *env, + jclass cls, + int expected, + int percent_error) { jvmtiHeapSamplingStats stats; check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), "Heap Sampling Statistics"); + fprintf(stderr, "Sample data count %ld, rate %ld, rate count %ld\n", + stats.sample_count, stats.sample_rate_accumulation, stats.sample_rate_count); double diff_ratio = (stats.sample_count - expected); diff_ratio = (diff_ratio < 0) ? -diff_ratio : diff_ratio; diff_ratio /= expected; # HG changeset patch # User jcbeyler # Date 1515105900 28800 # Thu Jan 04 14:45:00 2018 -0800 # Node ID f2d63277a6a8eef2282992f0d3e73d23d12cef7e # Parent 15ff781c423647274c1894543683bd198d520d76 [mq]: heap_port diff --git a/src/hotspot/share/gc/shared/weakProcessor.cpp b/src/hotspot/share/gc/shared/weakProcessor.cpp --- a/src/hotspot/share/gc/shared/weakProcessor.cpp +++ b/src/hotspot/share/gc/shared/weakProcessor.cpp @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "gc/shared/weakProcessor.hpp" #include "prims/jvmtiExport.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/jniHandles.hpp" #include "trace/tracing.hpp" #include "trace/traceMacros.hpp" @@ -32,6 +33,8 @@ void WeakProcessor::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive) { JNIHandles::weak_oops_do(is_alive, keep_alive); JvmtiExport::weak_oops_do(is_alive, keep_alive); + + HeapMonitoring::weak_oops_do(is_alive, keep_alive); TRACE_WEAK_OOPS_DO(is_alive, keep_alive); } # HG changeset patch # User jcbeyler # Date 1515624802 28800 # Wed Jan 10 14:53:22 2018 -0800 # Node ID 64e085c88245a83e3dcc8aca9647aba72caf9cd9 # Parent f2d63277a6a8eef2282992f0d3e73d23d12cef7e [mq]: heap16 diff --git a/src/hotspot/share/gc/shared/weakProcessor.cpp b/src/hotspot/share/gc/shared/weakProcessor.cpp --- a/src/hotspot/share/gc/shared/weakProcessor.cpp +++ b/src/hotspot/share/gc/shared/weakProcessor.cpp @@ -34,7 +34,9 @@ JNIHandles::weak_oops_do(is_alive, keep_alive); JvmtiExport::weak_oops_do(is_alive, keep_alive); - HeapMonitoring::weak_oops_do(is_alive, keep_alive); + if (HeapMonitoring::ever_enabled()) { + HeapMonitoring::weak_oops_do(is_alive, keep_alive); + } TRACE_WEAK_OOPS_DO(is_alive, keep_alive); } diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -295,6 +295,7 @@ // Statics for Sampler double HeapMonitoring::_log_table[1 << FastLogNumBits]; bool HeapMonitoring::_enabled; +bool HeapMonitoring::_ever_enabled; jint HeapMonitoring::_monitoring_rate; // Cheap random number generator @@ -589,6 +590,7 @@ StackTraceStorage::storage()->initialize(max_gc_storage); _enabled = true; + _ever_enabled = true; } void HeapMonitoring::stop_profiling() { diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -35,6 +35,7 @@ static uint64_t _rnd; static jint _monitoring_rate; static bool _enabled; + static bool _ever_enabled; // Statics for the fast log static const int FastLogNumBits = 10; @@ -101,6 +102,10 @@ static bool enabled() { return _enabled; } + + static bool ever_enabled() { + return _enabled; + } }; #endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatRateTest.java @@ -52,8 +52,8 @@ double errorPercentage = error / rate * 100; - if (errorPercentage > 5) { - throw new RuntimeException("Rate average over 5% for rate " + rate + " -> " + calculatedRate); + if (errorPercentage > 10) { + throw new RuntimeException("Rate average over 10% for rate " + rate + " -> " + calculatedRate); } } } # HG changeset patch # User jcbeyler # Date 1516751651 28800 # Tue Jan 23 15:54:11 2018 -0800 # Node ID b65f68ddca73dcba0239c4e8ce289c5981f5c349 # Parent 64e085c88245a83e3dcc8aca9647aba72caf9cd9 [mq]: heap17 diff --git a/src/hotspot/share/gc/shared/weakProcessor.cpp b/src/hotspot/share/gc/shared/weakProcessor.cpp --- a/src/hotspot/share/gc/shared/weakProcessor.cpp +++ b/src/hotspot/share/gc/shared/weakProcessor.cpp @@ -34,7 +34,7 @@ JNIHandles::weak_oops_do(is_alive, keep_alive); JvmtiExport::weak_oops_do(is_alive, keep_alive); - if (HeapMonitoring::ever_enabled()) { + if (HeapMonitoring::enabled()) { HeapMonitoring::weak_oops_do(is_alive, keep_alive); } TRACE_WEAK_OOPS_DO(is_alive, keep_alive); diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -11627,10 +11627,7 @@ rate requested and will fill internal data structures with heap allocation samples. The samples are obtained via the , , , - functions. - - Starting the heap sampler resets internal traces and counters. Therefore stopping the sampler - puts internal trace samples and counters on pause for post-processing. + or functions. new @@ -11667,10 +11664,12 @@ Stop the heap sampler in the JVM. Any sample obtained during sampling is still available via the , , , - functions. - - Starting the heap sampler resets internal traces and counters. Therefore stopping the sampler - puts internal trace samples and counters on pause for post-processing. + or functions. + + Stopping the heap sampler resets internal traces and counters. Therefore stopping the sampler frees any + internal trace samples, any subsequent call to the , + , , + or functions will return no traces. new @@ -11688,8 +11687,12 @@ Get Live Heap Sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. - This method can be called at any time but if the sampler has not been started via at least - one call to it returns no traces. + This methods call full GC and can be costly. Use with care as it can affect performance. For + continuous profiling, perhaps prefer GetCachedTraces, which returns the live traces at the last + full GC point. + + This method can be called at any time but if the sampler is not enabled, via + , it returns no traces. new @@ -11713,8 +11716,8 @@ Get the recent garbage heap sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. - This method can be called at any time but if the sampler has not been started via at least - one call to it returns no traces. + This method can be called at any time but if the sampler is not enabled, via + , it returns no traces. new @@ -11738,8 +11741,8 @@ Get the frequent garbage heap sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. - This method can be called at any time but if the sampler has not been started via at least - one call to it returns no traces. + This method can be called at any time but if the sampler is not enabled, via + , it returns no traces. new @@ -11757,7 +11760,33 @@ - + + Get Live Traces + + Get the cached sampled traces: the traces are the ones that were collected during the last + full GC. The fields of the structure are filled in with + details of the specified sampled allocation. + + This method can be called at any time but if the sampler is not enabled, via + , it returns no traces. + + new + + + + + + jvmtiStackTraces + + The stack trace data structure to be filled. + + + + + + + + Release traces provided by the heap monitoring Release traces provided by any of the trace retrieval methods. @@ -11778,7 +11807,7 @@ - + Get the heap sampling statistics Returns a to understand the heap sampling behavior and current diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -2026,9 +2026,10 @@ return JVMTI_ERROR_NONE; } /* end StopHeapSampling */ -// Get the currently live sampled allocations. +// Provoke a GC and get the currently live sampled allocations. jvmtiError JvmtiEnv::GetLiveTraces(jvmtiStackTraces* stack_traces) { + ForceGarbageCollection(); HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; @@ -2038,7 +2039,7 @@ return JVMTI_ERROR_NONE; } /* end GetLiveTraces */ -// Get the currently live sampled allocations. +// Get the recently garbage collected allocations. jvmtiError JvmtiEnv::GetGarbageTraces(jvmtiStackTraces* stack_traces) { HeapThreadTransition htt(Thread::current()); @@ -2050,7 +2051,7 @@ return JVMTI_ERROR_NONE; } /* end GetGarbageTraces */ -// Get the currently live sampled allocations. +// Get the frequently garbage collected traces. jvmtiError JvmtiEnv::GetFrequentGarbageTraces(jvmtiStackTraces* stack_traces) { HeapThreadTransition htt(Thread::current()); @@ -2062,6 +2063,18 @@ return JVMTI_ERROR_NONE; } /* end GetFrequentGarbageTraces */ +// Get the traces that were garbage collected in the last full GC. +jvmtiError +JvmtiEnv::GetCachedTraces(jvmtiStackTraces* stack_traces) { + HeapThreadTransition htt(Thread::current()); + if (stack_traces == NULL) { + return JVMTI_ERROR_ILLEGAL_ARGUMENT; + } + + HeapMonitoring::get_cached_traces(stack_traces); + return JVMTI_ERROR_NONE; +} /* end GetCachedTraces */ + // Release sampled traces. jvmtiError JvmtiEnv::ReleaseTraces(jvmtiStackTraces* stack_traces) { diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -196,6 +196,10 @@ // of stack traces. Passes a jvmtiStackTraces which will get mutated. void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces); + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_cached_stack_traces(jvmtiStackTraces *traces); + // Executes whenever weak references are traversed. is_alive tells // you if the given oop is still reachable and live. void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); @@ -210,11 +214,15 @@ void initialize(int max_storage) { MutexLocker mu(HeapMonitorStorage_lock); - free_storage(); allocate_storage(max_storage); memset(&_stats, 0, sizeof(_stats)); } + void stop() { + MutexLocker mu(HeapMonitorStorage_lock); + free_storage(); + } + const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { return _stats; } @@ -230,6 +238,9 @@ // The traces currently sampled. GrowableArray *_allocated_traces; + // The traces currently sampled. + GrowableArray *_traces_on_last_full_gc; + // Recent garbage traces. MostRecentGarbageTraces *_recent_garbage_traces; @@ -287,6 +298,8 @@ void free_garbage(); void free_storage(); + void reset(); + void allocate_storage(int max_gc_storage); }; @@ -295,18 +308,22 @@ // Statics for Sampler double HeapMonitoring::_log_table[1 << FastLogNumBits]; bool HeapMonitoring::_enabled; -bool HeapMonitoring::_ever_enabled; jint HeapMonitoring::_monitoring_rate; // Cheap random number generator uint64_t HeapMonitoring::_rnd; -StackTraceStorage::StackTraceStorage() : - _allocated_traces(NULL), - _recent_garbage_traces(NULL), - _frequent_garbage_traces(NULL), - _max_gc_storage(0), - _initialized(false) { +StackTraceStorage::StackTraceStorage() { + reset(); +} + +void StackTraceStorage::reset() { + _allocated_traces = NULL; + _traces_on_last_full_gc = NULL; + _recent_garbage_traces = NULL; + _frequent_garbage_traces = NULL; + _max_gc_storage = 0; + _initialized = false; } void StackTraceStorage::free_garbage() { @@ -353,12 +370,18 @@ } void StackTraceStorage::free_storage() { + if (!_initialized) { + return; + } + delete _allocated_traces; + delete _traces_on_last_full_gc; free_garbage(); delete _recent_garbage_traces; delete _frequent_garbage_traces; - _initialized = false; + + reset(); } StackTraceStorage::~StackTraceStorage() { @@ -373,6 +396,8 @@ _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray(128, true); + _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal) + GrowableArray(128, true); _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); @@ -383,19 +408,26 @@ void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); - StackTraceData new_data(trace, o); - _stats.sample_count++; - _stats.stack_depth_accumulation += trace->frame_count; - _allocated_traces->append(new_data); + // Last minute check on initialization here in case: + // Between the moment object_alloc_do_sample's check for initialization + // and now, there was a stop() that deleted the data. + if (_initialized) { + StackTraceData new_data(trace, o); + _stats.sample_count++; + _stats.stack_depth_accumulation += trace->frame_count; + _allocated_traces->append(new_data); + } } void StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, OopClosure *f) { MutexLocker mu(HeapMonitorStorage_lock); size_t count = 0; - if (initialized()) { + if (_initialized) { int len = _allocated_traces->length(); + _traces_on_last_full_gc->clear(); + // Compact the oop traces. Moves the live oops to the beginning of the // growable array, potentially overwriting the dead ones. int curr_pos = 0; @@ -410,6 +442,9 @@ // Copy the old trace, if it is still live. _allocated_traces->at_put(curr_pos++, trace); + // Store the live trace in a cache, to be served up on /heapz. + _traces_on_last_full_gc->append(trace); + count++; } else { // If the old trace is no longer live, add it to the list of @@ -454,12 +489,24 @@ // (because we could be replacing them as the user handles them). // The array is secretly null-terminated (to make it easier to reclaim). void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) { + if (!_allocated_traces) { + traces->stack_traces = NULL; + traces->trace_count = 0; + return; + } + LiveStackTraceDataCopier copier(_allocated_traces); copy_stack_traces(copier, traces); } // See comment on get_all_stack_traces void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) { + if (!_recent_garbage_traces) { + traces->stack_traces = NULL; + traces->trace_count = 0; + return; + } + GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), _recent_garbage_traces->size()); copy_stack_traces(copier, traces); @@ -468,11 +515,28 @@ // See comment on get_all_stack_traces void StackTraceStorage::get_frequent_garbage_stack_traces( jvmtiStackTraces *traces) { + if (!_frequent_garbage_traces) { + traces->stack_traces = NULL; + traces->trace_count = 0; + return; + } + GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), _frequent_garbage_traces->size()); copy_stack_traces(copier, traces); } +// See comment on get_all_stack_traces +void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces *traces) { + if (!_traces_on_last_full_gc) { + traces->stack_traces = NULL; + traces->trace_count = 0; + return; + } + + LiveStackTraceDataCopier copier(_traces_on_last_full_gc); + copy_stack_traces(copier, traces); +} void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, jvmtiStackTraces *traces) { @@ -543,6 +607,10 @@ StackTraceStorage::storage()->get_garbage_stack_traces(traces); } +void HeapMonitoring::get_cached_traces(jvmtiStackTraces *traces) { + StackTraceStorage::storage()->get_cached_stack_traces(traces); +} + void HeapMonitoring::release_traces(jvmtiStackTraces *traces) { jint trace_count = traces->trace_count; jvmtiStackTrace *stack_traces = traces->stack_traces; @@ -590,11 +658,11 @@ StackTraceStorage::storage()->initialize(max_gc_storage); _enabled = true; - _ever_enabled = true; } void HeapMonitoring::stop_profiling() { MutexLocker mu(HeapMonitor_lock); + StackTraceStorage::storage()->stop(); _enabled = false; } @@ -635,7 +703,6 @@ } void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { -#if defined(X86) || defined(PPC) JavaThread *thread = static_cast(t); if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); @@ -683,7 +750,4 @@ FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); FREE_C_HEAP_OBJ(trace); } -#else - Unimplemented(); -#endif } diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -35,7 +35,6 @@ static uint64_t _rnd; static jint _monitoring_rate; static bool _enabled; - static bool _ever_enabled; // Statics for the fast log static const int FastLogNumBits = 10; @@ -83,6 +82,7 @@ static void get_live_traces(jvmtiStackTraces* stack_traces); static void get_garbage_traces(jvmtiStackTraces* stack_traces); static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); + static void get_cached_traces(jvmtiStackTraces* stack_traces); static void release_traces(jvmtiStackTraces *trace_info); static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); @@ -102,10 +102,6 @@ static bool enabled() { return _enabled; } - - static bool ever_enabled() { - return _enabled; - } }; #endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java @@ -38,7 +38,7 @@ } } - private static int g_tmp[]; + private static int arrays[][]; /** Enable heap monitoring sampling given a rate and maximum garbage to keep in memory. */ public native static void enableSampling(int rate, int maximumGarbage); @@ -74,7 +74,7 @@ frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 73)); } else { actuallyAllocate(frames); - frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 119)); + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 127)); frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 76)); } return frames; @@ -108,19 +108,32 @@ for (int j = 0; j < 1000; j++) { sum += actuallyAllocate(frames); } - frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 119)); + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 127)); frames.add(new Frame("allocate", "(Ljava/util/List;)V", "HeapMonitor.java", 109)); } private static int actuallyAllocate(List frames) { int sum = 0; - // Let us assume that the array is 24 bytes of memory. - for (int i = 0; i < 127000 / 6; i++) { + + // Let us assume that a 1-element array is 24 bytes of memory and we want + // 2MB allocated. + int iterations = (1 << 19) / 6; + + if (arrays == null) { + arrays = new int[iterations][]; + } + + for (int i = 0; i < iterations; i++) { int tmp[] = new int[1]; - // Force it to be kept. - g_tmp = tmp; - sum += g_tmp[0]; + // Force it to be kept and, at the same time, wipe out any previous data. + arrays[i] = tmp; + sum += arrays[0][0]; } return sum; } + + /** Remove the reference to the global array to free data at the next GC. */ + public static void freeStorage() { + arrays = null; + } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +import java.util.List; + +/** + * @test + * @summary Verifies if cached and live are the same right after a GC. + * @build Frame HeapMonitor + * @compile HeapMonitorCachedTest.java + * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorCachedTest + */ + +public class HeapMonitorCachedTest { + + private static native boolean cachedAndLiveAreSame(); + private static native void getLiveTracesToForceGc(); + private static native long getCachedHashCode(); + + public static void main(String[] args) { + HeapMonitor.enableSampling(); + List frameList = HeapMonitor.allocate(); + frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorCachedTest.java", 44)); + Frame[] frames = frameList.toArray(new Frame[0]); + + // Check that the data is available while heap sampling is enabled. + boolean status = HeapMonitor.framesExistEverywhere(frames); + if (!status) { + throw new RuntimeException("Failed to find the traces before the wipe out."); + } + + // Check cached & live are the same after a GC. + getLiveTracesToForceGc(); + status = cachedAndLiveAreSame(); + if (!status) { + throw new RuntimeException("Cached frames and live frames are not the same."); + } + + // Allocate some more and then free it all, the cache hash code should remain the same. + long cacheHashCode = getCachedHashCode(); + HeapMonitor.allocate(); + // Free the memory entirely. + HeapMonitor.freeStorage(); + long secondCacheHashCode = getCachedHashCode(); + + if (cacheHashCode != secondCacheHashCode) { + throw new RuntimeException("Cached hash code changed."); + } + + // Check cached & live are not the same: cached will still have the old items and live, by + // provoking a GC, will have nothing left. + status = cachedAndLiveAreSame(); + if (status) { + throw new RuntimeException("Cached frames and live frames are still the same."); + } + + // Disabling the sampling should wipe everything out. + HeapMonitor.disableSampling(); + } +} diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java @@ -27,7 +27,7 @@ /** * @test - * @summary Verifies if turning on/off/on the monitor wipes out the information. + * @summary Verifies if turning off the monitor wipes out the information. * @build Frame HeapMonitor * @compile HeapMonitorOnOffTest.java * @run main/othervm/native -agentlib:HeapMonitor MyPackage.HeapMonitorOnOffTest @@ -39,16 +39,15 @@ List frameList = HeapMonitor.allocate(); frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorOnOffTest.java", 39)); Frame[] frames = frameList.toArray(new Frame[0]); - HeapMonitor.disableSampling(); - // Check that the data is still there: this allows to peruse samples after profiling. + // Check that the data is available while heap sampling is enabled. boolean status = HeapMonitor.framesExistEverywhere(frames); if (!status) { throw new RuntimeException("Failed to find the traces before the wipe out."); } - // Enabling the sampling should wipe everything out. - HeapMonitor.enableSampling(); + // Disabling the sampling should wipe everything out. + HeapMonitor.disableSampling(); status = HeapMonitor.framesExistNowhere(frames); if (!status) { diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java @@ -48,7 +48,6 @@ HeapMonitor.enableSampling(); HeapMonitor.allocate(depth); - HeapMonitor.disableSampling(); // baseDepth represents the helper method depth: main, finalWrapper, and helper. // To get the requested depth, remove this from the count. @@ -62,15 +61,17 @@ } } + HeapMonitor.disableSampling(); + // Last test is 1024, which is the current maximum. HeapMonitor.enableSampling(); final int maximumDepth = 1024; HeapMonitor.allocate(1024); - HeapMonitor.disableSampling(); // Because of the extra frames, we should be at (maximumDepth + a few frames). Due to the // maximum depth allowed, we hit it and so should still be at an average of 1024. double averageDepth = getAverageStackDepth(); double errorPercentage = calculateErrorPercentage(maximumDepth, averageDepth); + HeapMonitor.disableSampling(); // 1% error should be close enough. if (errorPercentage > 1) { diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorTest.java @@ -35,14 +35,22 @@ public class HeapMonitorTest { + private static native boolean framesAreNotLive(Frame[] frames); + public static void main(String[] args) { HeapMonitor.enableSampling(); List frameList = HeapMonitor.allocate(); - frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorTest.java", 40)); + frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorTest.java", 42)); - boolean status = HeapMonitor.framesExistEverywhere(frameList.toArray(new Frame[0])); + Frame[] frames = frameList.toArray(new Frame[0]); + boolean status = HeapMonitor.framesExistEverywhere(frames); if (!status) { throw new RuntimeException("Non-zero status returned from the agent: " + status); } + + // By freeing the storage and looking at live objects, the expected frames should no longer be + // there since looking at live objects provokes a full GC. + HeapMonitor.freeStorage(); + status = framesAreNotLive(frames); } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -423,11 +423,13 @@ } static jboolean checkAll(JNIEnv *env, jobjectArray frames, int print_out) { + fprintf(stderr, "Checking all!!!\n"); return check_and(env, frames, 1, 1, 1, print_out); } static jboolean checkNone(JNIEnv *env, jobjectArray frames, int print_out) { + fprintf(stderr, "Checking none!!!\n"); jobject loader = NULL; if (frames == NULL) { @@ -692,6 +694,132 @@ return TRUE; } +JNIEXPORT void JNICALL +Java_MyPackage_HeapMonitorCachedTest_getLiveTracesToForceGc(JNIEnv *env, + jclass cls) { + jvmtiStackTraces live_traces; + jvmtiError error = (*jvmti)->GetLiveTraces(jvmti, &live_traces); + + if (error != JVMTI_ERROR_NONE) { + return; + } + + (*jvmti)->ReleaseTraces(jvmti, &live_traces); +} + +static jboolean compare_traces(jvmtiStackTraces* traces, + jvmtiStackTraces* other_traces, + int print_out_comparisons) { + int trace_count = traces->trace_count; + if (trace_count != other_traces->trace_count) { + return FALSE; + } + + int i; + for (i = 0; i < trace_count; i++) { + jvmtiStackTrace* trace = traces->stack_traces + i; + jvmtiStackTrace* other_trace = other_traces->stack_traces + i; + + if (trace->frame_count != other_trace->frame_count) { + return FALSE; + } + + if (trace->size != other_trace->size) { + return FALSE; + } + + if (trace->thread_id != other_trace->thread_id) { + return FALSE; + } + + jvmtiFrameInfo* frames = trace->frames; + jvmtiFrameInfo* other_frames = other_trace->frames; + if (memcmp(frames, other_frames, sizeof(*frames) * trace->frame_count)) { + return FALSE; + } + } + + return TRUE; +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorCachedTest_cachedAndLiveAreSame(JNIEnv *env, + jclass cls) { + // Get cached first, then get live (since live performs a GC). + jvmtiStackTraces cached_traces; + jvmtiError error = (*jvmti)->GetCachedTraces(jvmti, &cached_traces); + + if (error != JVMTI_ERROR_NONE) { + return FALSE; + } + + jvmtiStackTraces live_traces; + error = (*jvmti)->GetLiveTraces(jvmti, &live_traces); + + if (error != JVMTI_ERROR_NONE) { + return FALSE; + } + + int result = compare_traces(&cached_traces, &live_traces, PRINT_OUT); + + (*jvmti)->ReleaseTraces(jvmti, &cached_traces); + (*jvmti)->ReleaseTraces(jvmti, &live_traces); + return result; +} + +static long hash(long hash_code, long value) { + return hash_code * 31 + value; +} + +static long get_hash_code(jvmtiStackTraces* traces) { + int trace_count = traces->trace_count; + int hash_code = 17; + + int i; + hash_code = hash(hash_code, trace_count); + for (i = 0; i < trace_count; i++) { + jvmtiStackTrace* trace = traces->stack_traces + i; + hash_code = hash(hash_code, trace->frame_count); + hash_code = hash(hash_code, trace->size); + hash_code = hash(hash_code, trace->thread_id); + + int j; + int frame_count = trace->frame_count; + jvmtiFrameInfo* frames = trace->frames; + hash_code = hash(hash_code, frame_count); + for (j = 0; j < frame_count; j++) { + hash_code = hash(hash_code, (long) frames[i].method); + hash_code = hash(hash_code, frames[i].location); + } + } + + return TRUE; +} + +JNIEXPORT jlong JNICALL +Java_MyPackage_HeapMonitorCachedTest_getCachedHashCode(JNIEnv *env, + jclass cls) { + // Get cached first, then get live. + jvmtiStackTraces cached_traces; + jvmtiError error = (*jvmti)->GetCachedTraces(jvmti, &cached_traces); + + if (error != JVMTI_ERROR_NONE) { + return 0; + } + + long hash_code = get_hash_code(&cached_traces); + (*jvmti)->ReleaseTraces(jvmti, &cached_traces); + + return hash_code; +} + +JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitorTest_framesAreNotLive(JNIEnv *env, + jclass cls, + jobjectArray frames) { + return !check_and(env, frames, FALSE, FALSE, TRUE, PRINT_OUT); +} + #ifdef __cplusplus } #endif # HG changeset patch # User jcbeyler # Date 1516943647 28800 # Thu Jan 25 21:14:07 2018 -0800 # Node ID c16a9b1ae4bdd53e50e84dc4706ed507ef7956d6 # Parent b65f68ddca73dcba0239c4e8ce289c5981f5c349 [mq]: heap17 diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -315,7 +315,7 @@ HeapWord* obj = NULL; if (should_sample) { // Remember the tlab end to fix up the sampling rate. - HeapWord *tlab_old_end = thread->tlab().end(); + HeapWord* tlab_old_end = thread->tlab().end(); obj = allocate_sampled_object(thread, size); // If we did allocate in this tlab, sample it. Otherwise, we wait for the diff --git a/src/hotspot/share/prims/jvmtiHeapTransition.hpp b/src/hotspot/share/prims/jvmtiHeapTransition.hpp --- a/src/hotspot/share/prims/jvmtiHeapTransition.hpp +++ b/src/hotspot/share/prims/jvmtiHeapTransition.hpp @@ -31,13 +31,13 @@ class HeapThreadTransition : StackObj { private: JavaThreadState _saved_state; - JavaThread *_jthread; + JavaThread* _jthread; public: // Transitions this thread from the agent (thread_in_native) to the VM. - HeapThreadTransition(Thread *thread) { + HeapThreadTransition(Thread* thread) { if (thread->is_Java_thread()) { - _jthread = static_cast(thread); + _jthread = static_cast(thread); _saved_state = _jthread->thread_state(); if (_saved_state == _thread_in_native) { ThreadStateTransition::transition_from_native(_jthread, _thread_in_vm); diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -31,21 +31,20 @@ static const int MaxStackDepth = 1024; -// Internal data structure representing traces. +// Internal data structure representing traces, used when object has been GC'd. struct StackTraceData : CHeapObj { - jvmtiStackTrace *trace; - oop obj; + jvmtiStackTrace* trace; int references; - StackTraceData(jvmtiStackTrace *t, oop o) : trace(t), obj(o), references(0) {} + StackTraceData(jvmtiStackTrace* t) : trace(t), references(0) {} - StackTraceData() : trace(NULL), obj(NULL), references(0) {} + StackTraceData() : trace(NULL), references(0) {} // StackTraceDatas are shared around the board between various lists. So // handle this by hand instead of having this in the destructor. There are // cases where the struct is on the stack but holding heap data not to be // freed. - static void free_data(StackTraceData *data) { + static void free_data(StackTraceData* data) { if (data->trace != NULL) { FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames); FREE_C_HEAP_OBJ(data->trace); @@ -54,6 +53,17 @@ } }; +// Internal data structure representing traces with the oop, used while object +// is live. Since this structure just passes the trace to the GC lists, it does +// not handle any freeing. +struct StackTraceDataWithOop : public StackTraceData { + oop obj; + + StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t), obj(o) {} + + StackTraceDataWithOop() : StackTraceData(), obj(NULL) {} +}; + // Fixed size buffer for holding garbage traces. class GarbageTracesBuffer : public CHeapObj { public: @@ -72,13 +82,13 @@ return _garbage_traces; } - bool store_trace(StackTraceData *trace) { + bool store_trace(StackTraceData* trace) { uint32_t index; if (!select_replacement(&index)) { return false; } - StackTraceData *old_data = _garbage_traces[index]; + StackTraceData* old_data = _garbage_traces[index]; if (old_data != NULL) { old_data->references--; @@ -101,13 +111,13 @@ // Subclasses select the trace to replace. Returns false if no replacement // is to happen, otherwise stores the index of the trace to replace in // *index. - virtual bool select_replacement(uint32_t *index) = 0; + virtual bool select_replacement(uint32_t* index) = 0; const uint32_t _size; private: // The current garbage traces. A fixed-size ring buffer. - StackTraceData **_garbage_traces; + StackTraceData** _garbage_traces; }; // Keep statistical sample of traces over the lifetime of the server. @@ -182,27 +192,27 @@ public: // The function that gets called to add a trace to the list of // traces we are maintaining. - void add_trace(jvmtiStackTrace *trace, oop o); - - // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_all_stack_traces(jvmtiStackTraces *traces); + void add_trace(jvmtiStackTrace* trace, oop o); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_garbage_stack_traces(jvmtiStackTraces *traces); + void get_all_stack_traces(jvmtiStackTraces* traces); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_frequent_garbage_stack_traces(jvmtiStackTraces *traces); + void get_garbage_stack_traces(jvmtiStackTraces* traces); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_cached_stack_traces(jvmtiStackTraces *traces); + void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiStackTraces which will get mutated. + void get_cached_stack_traces(jvmtiStackTraces* traces); // Executes whenever weak references are traversed. is_alive tells // you if the given oop is still reachable and live. - void weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f); + void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); ~StackTraceStorage(); StackTraceStorage(); @@ -236,16 +246,16 @@ private: // The traces currently sampled. - GrowableArray *_allocated_traces; + GrowableArray* _allocated_traces; // The traces currently sampled. - GrowableArray *_traces_on_last_full_gc; + GrowableArray* _traces_on_last_full_gc; // Recent garbage traces. - MostRecentGarbageTraces *_recent_garbage_traces; + MostRecentGarbageTraces* _recent_garbage_traces; // Frequent garbage traces. - FrequentGarbageTraces *_frequent_garbage_traces; + FrequentGarbageTraces* _frequent_garbage_traces; // Heap Sampling statistics. jvmtiHeapSamplingStats _stats; @@ -261,40 +271,40 @@ class StackTraceDataCopier { public: virtual int size() const = 0; - virtual const StackTraceData *get(uint32_t i) const = 0; + virtual const StackTraceData* get(uint32_t i) const = 0; }; class LiveStackTraceDataCopier : public StackTraceDataCopier { public: - LiveStackTraceDataCopier(GrowableArray *data) : + LiveStackTraceDataCopier(GrowableArray* data) : _data(data) {} int size() const { return _data ? _data->length() : 0; } - const StackTraceData *get(uint32_t i) const { return _data->adr_at(i); } + const StackTraceData* get(uint32_t i) const { return _data->adr_at(i); } private: - GrowableArray *_data; + GrowableArray* _data; }; class GarbageStackTraceDataCopier : public StackTraceDataCopier { public: - GarbageStackTraceDataCopier(StackTraceData **data, int size) : + GarbageStackTraceDataCopier(StackTraceData** data, int size) : _data(data), _size(size) {} int size() const { return _size; } - const StackTraceData *get(uint32_t i) const { return _data[i]; } + const StackTraceData* get(uint32_t i) const { return _data[i]; } private: - StackTraceData **_data; + StackTraceData** _data; int _size; }; // Copies from StackTraceData to jvmtiStackTrace. - bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); + bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from); // Creates a deep copy of the list of StackTraceData. void copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces *traces); + jvmtiStackTraces* traces); - void store_garbage_trace(const StackTraceData &trace); + void store_garbage_trace(const StackTraceDataWithOop &trace); void free_garbage(); void free_storage(); @@ -327,10 +337,10 @@ } void StackTraceStorage::free_garbage() { - StackTraceData **recent_garbage = NULL; + StackTraceData** recent_garbage = NULL; uint32_t recent_size = 0; - StackTraceData **frequent_garbage = NULL; + StackTraceData** frequent_garbage = NULL; uint32_t frequent_size = 0; if (_recent_garbage_traces != NULL) { @@ -346,7 +356,7 @@ // Simple solution since this happens at exit. // Go through the recent and remove any that only are referenced there. for (uint32_t i = 0; i < recent_size; i++) { - StackTraceData *trace = recent_garbage[i]; + StackTraceData* trace = recent_garbage[i]; if (trace != NULL) { trace->references--; @@ -358,7 +368,7 @@ // Then go through the frequent and remove those that are now only there. for (uint32_t i = 0; i < frequent_size; i++) { - StackTraceData *trace = frequent_garbage[i]; + StackTraceData* trace = frequent_garbage[i]; if (trace != NULL) { trace->references--; @@ -395,9 +405,9 @@ } _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) - GrowableArray(128, true); + GrowableArray(128, true); _traces_on_last_full_gc = new (ResourceObj::C_HEAP, mtInternal) - GrowableArray(128, true); + GrowableArray(128, true); _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); @@ -406,21 +416,21 @@ _initialized = true; } -void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { +void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); // Last minute check on initialization here in case: // Between the moment object_alloc_do_sample's check for initialization // and now, there was a stop() that deleted the data. if (_initialized) { - StackTraceData new_data(trace, o); + StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->frame_count; _allocated_traces->append(new_data); } } -void StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, - OopClosure *f) { +void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive, + OopClosure* f) { MutexLocker mu(HeapMonitorStorage_lock); size_t count = 0; if (_initialized) { @@ -432,10 +442,10 @@ // growable array, potentially overwriting the dead ones. int curr_pos = 0; for (int i = 0; i < len; i++) { - StackTraceData &trace = _allocated_traces->at(i); - oop value = trace.obj; - if (Universe::heap()->is_in_reserved(value) - && is_alive->do_object_b(value)) { + StackTraceDataWithOop &trace = _allocated_traces->at(i); + oop value = RootAccess::oop_load( + &trace.obj); + if (is_alive->do_object_b(value)) { // Update the oop to point to the new object if it is still alive. f->do_oop(&(trace.obj)); @@ -455,7 +465,7 @@ // Zero out remaining array elements. Even though the call to trunc_to // below truncates these values, zeroing them out is good practice. - StackTraceData zero_trace; + StackTraceDataWithOop zero_trace; for (int i = curr_pos; i < len; i++) { _allocated_traces->at_put(i, zero_trace); } @@ -467,9 +477,9 @@ log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count); } -bool StackTraceStorage::deep_copy(jvmtiStackTrace *to, - const StackTraceData *from) { - const jvmtiStackTrace *src = from->trace; +bool StackTraceStorage::deep_copy(jvmtiStackTrace* to, + const StackTraceData* from) { + const jvmtiStackTrace* src = from->trace; *to = *src; to->frames = @@ -488,7 +498,7 @@ // Called by the outside world; returns a copy of the stack traces // (because we could be replacing them as the user handles them). // The array is secretly null-terminated (to make it easier to reclaim). -void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces *traces) { +void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) { if (!_allocated_traces) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -500,7 +510,7 @@ } // See comment on get_all_stack_traces -void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces *traces) { +void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) { if (!_recent_garbage_traces) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -514,7 +524,7 @@ // See comment on get_all_stack_traces void StackTraceStorage::get_frequent_garbage_stack_traces( - jvmtiStackTraces *traces) { + jvmtiStackTraces* traces) { if (!_frequent_garbage_traces) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -527,7 +537,7 @@ } // See comment on get_all_stack_traces -void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces *traces) { +void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) { if (!_traces_on_last_full_gc) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -539,13 +549,13 @@ } void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces *traces) { + jvmtiStackTraces* traces) { MutexLocker mu(HeapMonitorStorage_lock); int len = copier.size(); // Create a new array to store the StackTraceData objects. // + 1 for a NULL at the end. - jvmtiStackTrace *t = + jvmtiStackTrace* t = NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); if (t == NULL) { traces->stack_traces = NULL; @@ -558,9 +568,9 @@ // Copy the StackTraceData objects into the new array. int trace_count = 0; for (int i = 0; i < len; i++) { - const StackTraceData *stack_trace = copier.get(i); + const StackTraceData* stack_trace = copier.get(i); if (stack_trace != NULL) { - jvmtiStackTrace *to = &t[trace_count]; + jvmtiStackTrace* to = &t[trace_count]; if (!deep_copy(to, stack_trace)) { continue; } @@ -572,9 +582,8 @@ traces->trace_count = trace_count; } -void StackTraceStorage::store_garbage_trace(const StackTraceData &trace) { - StackTraceData *new_trace = new StackTraceData(); - *new_trace = trace; +void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) { + StackTraceData* new_trace = new StackTraceData(trace.trace); bool accepted = _recent_garbage_traces->store_trace(new_trace); @@ -589,34 +598,34 @@ _stats.garbage_collected_samples++; } -void HeapMonitoring::get_live_traces(jvmtiStackTraces *traces) { +void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) { StackTraceStorage::storage()->get_all_stack_traces(traces); } -void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats *stats) { +void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) { const jvmtiHeapSamplingStats& internal_stats = StackTraceStorage::storage()->get_heap_sampling_stats(); *stats = internal_stats; } -void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces *traces) { +void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) { StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); } -void HeapMonitoring::get_garbage_traces(jvmtiStackTraces *traces) { +void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) { StackTraceStorage::storage()->get_garbage_stack_traces(traces); } -void HeapMonitoring::get_cached_traces(jvmtiStackTraces *traces) { +void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) { StackTraceStorage::storage()->get_cached_stack_traces(traces); } -void HeapMonitoring::release_traces(jvmtiStackTraces *traces) { +void HeapMonitoring::release_traces(jvmtiStackTraces* traces) { jint trace_count = traces->trace_count; - jvmtiStackTrace *stack_traces = traces->stack_traces; + jvmtiStackTrace* stack_traces = traces->stack_traces; for (jint i = 0; i < trace_count; i++) { - jvmtiStackTrace *current_trace = stack_traces + i; + jvmtiStackTrace* current_trace = stack_traces + i; FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); } @@ -627,7 +636,7 @@ // Invoked by the GC to clean up old stack traces and remove old arrays // of instrumentation that are still lying around. -void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure *f) { +void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); StackTraceStorage::storage()->weak_oops_do(is_alive, f); } @@ -650,7 +659,7 @@ _log_table[i] = (log(1.0 + half_way / (1 << FastLogNumBits)) / log(2.0)); } - JavaThread *t = static_cast(Thread::current()); + JavaThread* t = static_cast(Thread::current()); _rnd = static_cast(reinterpret_cast(t)); if (_rnd == 0) { _rnd = 1; @@ -677,7 +686,7 @@ // -log_e(q)/m = x // log_2(q) * (-log_e(2) * 1/m) = x // In the code, q is actually in the range 1 to 2**26, hence the -26 below -void HeapMonitoring::pick_next_sample(size_t *ptr) { +void HeapMonitoring::pick_next_sample(size_t* ptr) { _rnd = next_random(_rnd); // Take the top 26 bits as the random number // (This plus a 1<<58 sampling bound gives a max possible step of @@ -702,18 +711,18 @@ StackTraceStorage::storage()->accumulate_sample_rate(rate); } -void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { - JavaThread *thread = static_cast(t); +void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) { + JavaThread* thread = static_cast(t); if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); - JavaThread *thread = static_cast(t); + JavaThread* thread = static_cast(t); - jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); + jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); if (trace == NULL) { return; } - jvmtiFrameInfo *frames = + jvmtiFrameInfo* frames = NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); if (frames == NULL) { diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -41,7 +41,7 @@ static const int FastLogMask = (1 << FastLogNumBits) - 1; static double _log_table[1<length(); # HG changeset patch # User jcbeyler # Date 1517871286 28800 # Mon Feb 05 14:54:46 2018 -0800 # Node ID 4849e7c595bcfb03a600d13525c2f890d6d4a3ac # Parent d381aedc9441d268e3bc2a91f1eb656ee6ac4567 [mq]: heap20 diff --git a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp @@ -158,7 +158,9 @@ AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD); - THREAD->tlab().handle_sample(THREAD, result, size); + if (UseTLAB) { + THREAD->tlab().handle_sample(THREAD, result, size); + } return result; } diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -420,6 +420,7 @@ return _actual_end + alignment_reserve(); } + GlobalTLABStats::GlobalTLABStats() : _allocating_threads_avg(TLABAllocationWeight) { diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -10354,7 +10354,7 @@ - + Can sample the heap. If this capability is enabled then the heap sampling methods can be called. @@ -11539,7 +11539,7 @@ - + jvmtiFrameInfo @@ -11560,7 +11560,7 @@ - + jvmtiStackTrace @@ -11578,7 +11578,7 @@ - + @@ -11620,7 +11620,7 @@ - + Start Heap Sampling Start the heap sampler in the JVM. The function provides, via its argument, the sampling @@ -11658,7 +11658,7 @@ - + Stop Heap Sampling Stop the heap sampler in the JVM. @@ -11681,7 +11681,7 @@ - + Get Live Traces Get Live Heap Sampled traces. The fields of the @@ -11710,7 +11710,7 @@ - + Get Garbage Traces Get the recent garbage heap sampled traces. The fields of the @@ -11735,7 +11735,7 @@ - + Get Frequent Garbage Traces Get the frequent garbage heap sampled traces. The fields of the @@ -11760,7 +11760,7 @@ - + Get Live Traces Get the cached sampled traces: the traces are the ones that were collected during the last @@ -11786,7 +11786,7 @@ - + Release traces provided by the heap monitoring Release traces provided by any of the trace retrieval methods. @@ -11807,7 +11807,7 @@ - + Get the heap sampling statistics Returns a to understand the heap sampling behavior and current diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -27,6 +27,7 @@ #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" #include "runtime/heapMonitoring.hpp" +#include "runtime/orderAccess.inline.hpp" #include "runtime/vframe.hpp" static const int MaxStackDepth = 1024; @@ -59,9 +60,23 @@ struct StackTraceDataWithOop : public StackTraceData { oop obj; - StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t), obj(o) {} + StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) { + store_oop(o); + } StackTraceDataWithOop() : StackTraceData(), obj(NULL) {} + + oop load_oop() { + return RootAccess::oop_load(&obj); + } + + void store_oop(oop value) { + RootAccess::oop_store(&obj, value); + } + + void clear_oop() { + store_oop(reinterpret_cast(NULL)); + } }; // Fixed size buffer for holding garbage traces. @@ -242,7 +257,9 @@ _stats.sample_rate_count++; } - bool initialized() { return _initialized; } + bool initialized() { + return OrderAccess::load_acquire(&_initialized) != 0; + } private: // The traces currently sampled. @@ -264,7 +281,7 @@ int _max_gc_storage; static StackTraceStorage* internal_storage; - volatile bool _initialized; + int _initialized; // Support functions and classes for copying data to the external // world. @@ -333,7 +350,7 @@ _recent_garbage_traces = NULL; _frequent_garbage_traces = NULL; _max_gc_storage = 0; - _initialized = false; + OrderAccess::release_store(&_initialized, 0); } void StackTraceStorage::free_garbage() { @@ -380,7 +397,7 @@ } void StackTraceStorage::free_storage() { - if (!_initialized) { + if (!initialized()) { return; } @@ -400,7 +417,7 @@ void StackTraceStorage::allocate_storage(int max_gc_storage) { // In case multiple threads got locked and then 1 by 1 got through. - if (_initialized) { + if (initialized()) { return; } @@ -413,7 +430,7 @@ _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); _max_gc_storage = max_gc_storage; - _initialized = true; + OrderAccess::release_store(&_initialized, 1); } void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) { @@ -421,7 +438,7 @@ // Last minute check on initialization here in case: // Between the moment object_alloc_do_sample's check for initialization // and now, there was a stop() that deleted the data. - if (_initialized) { + if (initialized()) { StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->frame_count; @@ -432,30 +449,28 @@ void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { size_t count = 0; - if (_initialized) { + if (initialized()) { int len = _allocated_traces->length(); _traces_on_last_full_gc->clear(); // Compact the oop traces. Moves the live oops to the beginning of the // growable array, potentially overwriting the dead ones. - int curr_pos = 0; for (int i = 0; i < len; i++) { StackTraceDataWithOop &trace = _allocated_traces->at(i); - oop value = RootAccess::oop_load( - &trace.obj); + oop value = trace.load_oop(); if (is_alive->do_object_b(value)) { // Update the oop to point to the new object if it is still alive. f->do_oop(&(trace.obj)); // Copy the old trace, if it is still live. - _allocated_traces->at_put(curr_pos++, trace); + _allocated_traces->at_put(count++, trace); // Store the live trace in a cache, to be served up on /heapz. _traces_on_last_full_gc->append(trace); + } else { + trace.clear_oop(); - count++; - } else { // If the old trace is no longer live, add it to the list of // recently collected garbage. store_garbage_trace(trace); @@ -465,12 +480,12 @@ // Zero out remaining array elements. Even though the call to trunc_to // below truncates these values, zeroing them out is good practice. StackTraceDataWithOop zero_trace; - for (int i = curr_pos; i < len; i++) { + for (int i = count; i < len; i++) { _allocated_traces->at_put(i, zero_trace); } // Set the array's length to the number of live elements. - _allocated_traces->trunc_to(curr_pos); + _allocated_traces->trunc_to(count); } log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count); # HG changeset patch # User jcbeyler # Date 1517985546 28800 # Tue Feb 06 22:39:06 2018 -0800 # Node ID 4c38444f83418ff6c760d4d6c43368df4c1757da # Parent 4849e7c595bcfb03a600d13525c2f890d6d4a3ac [mq]: heap21 diff --git a/src/hotspot/share/prims/jvmtiHeapTransition.hpp b/src/hotspot/share/prims/jvmtiHeapTransition.hpp --- a/src/hotspot/share/prims/jvmtiHeapTransition.hpp +++ b/src/hotspot/share/prims/jvmtiHeapTransition.hpp @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP -#define SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP +#ifndef SHARE_PRIMS_JVMTIHEAPTRANSITION_HPP +#define SHARE_PRIMS_JVMTIHEAPTRANSITION_HPP #include "runtime/interfaceSupport.hpp" @@ -34,7 +34,7 @@ JavaThread* _jthread; public: - // Transitions this thread from the agent (thread_in_native) to the VM. + // Transitions this thread if it is a Java Thread to a _thread_in_vm. HeapThreadTransition(Thread* thread) { if (thread->is_Java_thread()) { _jthread = static_cast(thread); @@ -60,4 +60,4 @@ } }; -#endif // SHARE_VM_PRIMS_JVMTIHEAPSAMPLING_HPP +#endif // SHARE_PRIMS_JVMTIHEAPTRANSITION_HPP diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -33,45 +33,63 @@ static const int MaxStackDepth = 1024; // Internal data structure representing traces, used when object has been GC'd. -struct StackTraceData : CHeapObj { - jvmtiStackTrace* trace; - int references; - - StackTraceData(jvmtiStackTrace* t) : trace(t), references(0) {} - - StackTraceData() : trace(NULL), references(0) {} +class StackTraceData : public CHeapObj { + private: + jvmtiStackTrace* _trace; + int _references; - // StackTraceDatas are shared around the board between various lists. So - // handle this by hand instead of having this in the destructor. There are - // cases where the struct is on the stack but holding heap data not to be - // freed. - static void free_data(StackTraceData* data) { - if (data->trace != NULL) { - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->trace->frames); - FREE_C_HEAP_OBJ(data->trace); + public: + StackTraceData(jvmtiStackTrace* t) : _trace(t), _references(0) {} + + void increment_reference_count() { + _references++; + } + + jvmtiStackTrace* get_trace() const { + return _trace; + } + + static void unreference_and_free(StackTraceData* data) { + if (!data) { + return; } - delete data; + + data->_references--; + if (data->_references == 0) { + if (data->_trace != NULL) { + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->_trace->frames); + FREE_C_HEAP_OBJ(data->_trace); + } + delete data; + } } }; // Internal data structure representing traces with the oop, used while object // is live. Since this structure just passes the trace to the GC lists, it does // not handle any freeing. -struct StackTraceDataWithOop : public StackTraceData { - oop obj; +class StackTraceDataWithOop : public StackTraceData { + private: + oop _obj; + public: StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) { store_oop(o); } - StackTraceDataWithOop() : StackTraceData(), obj(NULL) {} + StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) { + } oop load_oop() { - return RootAccess::oop_load(&obj); + return RootAccess::oop_load(&_obj); + } + + oop* get_oop_addr() { + return &_obj; } void store_oop(oop value) { - RootAccess::oop_store(&obj, value); + RootAccess::oop_store(&_obj, value); } void clear_oop() { @@ -104,16 +122,9 @@ } StackTraceData* old_data = _garbage_traces[index]; - - if (old_data != NULL) { - old_data->references--; + StackTraceData::unreference_and_free(old_data); - if (old_data->references == 0) { - StackTraceData::free_data(old_data); - } - } - - trace->references++; + trace->increment_reference_count(); _garbage_traces[index] = trace; return true; } @@ -240,7 +251,6 @@ void initialize(int max_storage) { MutexLocker mu(HeapMonitorStorage_lock); allocate_storage(max_storage); - memset(&_stats, 0, sizeof(_stats)); } void stop() { @@ -249,10 +259,12 @@ } const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { + MutexLocker mu(HeapMonitorStorage_lock); return _stats; } void accumulate_sample_rate(size_t rate) { + MutexLocker mu(HeapMonitorStorage_lock); _stats.sample_rate_accumulation += rate; _stats.sample_rate_count++; } @@ -334,7 +346,7 @@ // Statics for Sampler double HeapMonitoring::_log_table[1 << FastLogNumBits]; -bool HeapMonitoring::_enabled; +int HeapMonitoring::_enabled; jint HeapMonitoring::_monitoring_rate; // Cheap random number generator @@ -373,26 +385,12 @@ // Simple solution since this happens at exit. // Go through the recent and remove any that only are referenced there. for (uint32_t i = 0; i < recent_size; i++) { - StackTraceData* trace = recent_garbage[i]; - if (trace != NULL) { - trace->references--; - - if (trace->references == 0) { - StackTraceData::free_data(trace); - } - } + StackTraceData::unreference_and_free(recent_garbage[i]); } // Then go through the frequent and remove those that are now only there. for (uint32_t i = 0; i < frequent_size; i++) { - StackTraceData* trace = frequent_garbage[i]; - if (trace != NULL) { - trace->references--; - - if (trace->references == 0) { - StackTraceData::free_data(trace); - } - } + StackTraceData::unreference_and_free(frequent_garbage[i]); } } @@ -412,6 +410,7 @@ } StackTraceStorage::~StackTraceStorage() { + MutexLocker mu(HeapMonitorStorage_lock); free_storage(); } @@ -430,6 +429,7 @@ _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); _max_gc_storage = max_gc_storage; + memset(&_stats, 0, sizeof(_stats)); OrderAccess::release_store(&_initialized, 1); } @@ -461,7 +461,7 @@ oop value = trace.load_oop(); if (is_alive->do_object_b(value)) { // Update the oop to point to the new object if it is still alive. - f->do_oop(&(trace.obj)); + f->do_oop(trace.get_oop_addr()); // Copy the old trace, if it is still live. _allocated_traces->at_put(count++, trace); @@ -493,7 +493,7 @@ bool StackTraceStorage::deep_copy(jvmtiStackTrace* to, const StackTraceData* from) { - const jvmtiStackTrace* src = from->trace; + const jvmtiStackTrace* src = from->get_trace(); *to = *src; to->frames = @@ -513,6 +513,7 @@ // (because we could be replacing them as the user handles them). // The array is secretly null-terminated (to make it easier to reclaim). void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) { + MutexLocker mu(HeapMonitorStorage_lock); if (!_allocated_traces) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -525,6 +526,7 @@ // See comment on get_all_stack_traces void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) { + MutexLocker mu(HeapMonitorStorage_lock); if (!_recent_garbage_traces) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -539,6 +541,7 @@ // See comment on get_all_stack_traces void StackTraceStorage::get_frequent_garbage_stack_traces( jvmtiStackTraces* traces) { + MutexLocker mu(HeapMonitorStorage_lock); if (!_frequent_garbage_traces) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -552,6 +555,7 @@ // See comment on get_all_stack_traces void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) { + MutexLocker mu(HeapMonitorStorage_lock); if (!_traces_on_last_full_gc) { traces->stack_traces = NULL; traces->trace_count = 0; @@ -564,7 +568,6 @@ void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, jvmtiStackTraces* traces) { - MutexLocker mu(HeapMonitorStorage_lock); int len = copier.size(); // Create a new array to store the StackTraceData objects. @@ -597,7 +600,7 @@ } void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) { - StackTraceData* new_trace = new StackTraceData(trace.trace); + StackTraceData* new_trace = new StackTraceData(trace.get_trace()); bool accepted = _recent_garbage_traces->store_trace(new_trace); @@ -659,7 +662,7 @@ jint max_gc_storage) { MutexLocker mu(HeapMonitor_lock); // Ignore if already enabled. - if (_enabled) { + if (enabled()) { return; } @@ -680,13 +683,16 @@ } StackTraceStorage::storage()->initialize(max_gc_storage); - _enabled = true; + OrderAccess::release_store(&_enabled, 1); } void HeapMonitoring::stop_profiling() { MutexLocker mu(HeapMonitor_lock); - StackTraceStorage::storage()->stop(); - _enabled = false; + + if (enabled()) { + StackTraceStorage::storage()->stop(); + OrderAccess::release_store(&_enabled, 0); + } } // Generates a geometric variable with the specified mean (512K by default). diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -22,8 +22,8 @@ * */ -#ifndef SHARE_VM_RUNTIME_HEAPMONITORING_HPP -#define SHARE_VM_RUNTIME_HEAPMONITORING_HPP +#ifndef SHARE_RUNTIME_HEAPMONITORING_HPP +#define SHARE_RUNTIME_HEAPMONITORING_HPP #include "gc/shared/referenceProcessor.hpp" #include "runtime/sharedRuntime.hpp" @@ -34,7 +34,7 @@ // Cheap random number generator static uint64_t _rnd; static jint _monitoring_rate; - static bool _enabled; + static int _enabled; // Statics for the fast log static const int FastLogNumBits = 10; @@ -100,8 +100,8 @@ } static bool enabled() { - return _enabled; + return OrderAccess::load_acquire(&_enabled) != 0; } }; -#endif // SHARE_VM_RUNTIME_HEAPMONITORING_HPP +#endif // SHARE_RUNTIME_HEAPMONITORING_HPP diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessCMSTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessCMSTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessCMSTest.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics using CMS GC + * @build Frame HeapMonitor + * @compile HeapMonitorStackDepthTest.java + * @run main/othervm/native -agentlib:HeapMonitor -XX:+UseConcMarkSweepGC MyPackage.HeapMonitorStatObjectCorrectnessTest + */ diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessParallelTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessParallelTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessParallelTest.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics using ParallelGc + * @build Frame HeapMonitor + * @compile HeapMonitorStackDepthTest.java + * @run main/othervm/native -agentlib:HeapMonitor -XX:+UseParallelGC MyPackage.HeapMonitorStatObjectCorrectnessTest + */ diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessSerialTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessSerialTest.java new file mode 100644 --- /dev/null +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessSerialTest.java @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package MyPackage; + +/** + * @test + * @summary Verifies the JVMTI Heap Monitor Statistics using SerialGC + * @build Frame HeapMonitor + * @compile HeapMonitorStackDepthTest.java + * @run main/othervm/native -agentlib:HeapMonitor -XX:+UseSerialGC MyPackage.HeapMonitorStatObjectCorrectnessTest + */ diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -568,8 +568,6 @@ check_error((*jvmti)->GetHeapSamplingStats(jvmti, &stats), "Heap Sampling Statistics"); - fprintf(stderr, "Sample data count %ld, rate %ld, rate count %ld\n", - stats.sample_count, stats.sample_rate_accumulation, stats.sample_rate_count); double diff_ratio = (stats.sample_count - expected); diff_ratio = (diff_ratio < 0) ? -diff_ratio : diff_ratio; diff_ratio /= expected; # HG changeset patch # User jcbeyler # Date 1518110169 28800 # Thu Feb 08 09:16:09 2018 -0800 # Node ID 31a5297861eff27f053157bf32fdd04bbee57dd7 # Parent 4c38444f83418ff6c760d4d6c43368df4c1757da [mq]: heap22 diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitor.java @@ -61,6 +61,8 @@ public native static boolean framesExistEverywhere(Frame[] frames); /** Do the frames provided not exist in live, recent garbage, and frequent garbage. */ public native static boolean framesExistNowhere(Frame[] frames); + /** Do the frames provided exist in live, recent garbage, or frequent garbage. */ + public native static boolean framesExistSomewhere(Frame[] frames); /** * Allocate memory but first create a stack trace of a particular depth. @@ -71,11 +73,11 @@ List frames = new ArrayList(); if (depth > 1) { createStackDepth(depth - 1, frames); - frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 73)); + frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 75)); } else { actuallyAllocate(frames); - frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 127)); - frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 76)); + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 138)); + frames.add(new Frame("allocate", "(I)Ljava/util/List;", "HeapMonitor.java", 78)); } return frames; } @@ -89,17 +91,17 @@ int sum = 0; List frames = new ArrayList(); allocate(frames); - frames.add(new Frame("allocate", "()Ljava/util/List;", "HeapMonitor.java", 91)); + frames.add(new Frame("allocate", "()Ljava/util/List;", "HeapMonitor.java", 93)); return frames; } private static void createStackDepth(int depth, List frames) { if (depth > 1) { createStackDepth(depth - 1, frames); - frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 98)); + frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 100)); } else { allocate(frames); - frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 101)); + frames.add(new Frame("createStackDepth", "(ILjava/util/List;)V", "HeapMonitor.java", 103)); } } @@ -108,8 +110,17 @@ for (int j = 0; j < 1000; j++) { sum += actuallyAllocate(frames); } - frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 127)); - frames.add(new Frame("allocate", "(Ljava/util/List;)V", "HeapMonitor.java", 109)); + frames.add(new Frame("actuallyAllocate", "(Ljava/util/List;)I", "HeapMonitor.java", 138)); + frames.add(new Frame("allocate", "(Ljava/util/List;)V", "HeapMonitor.java", 111)); + } + + public static List repeatAllocate(int max) { + List frames = null; + for (int i = 0; i < max; i++) { + frames = allocate(); + } + frames.add(new Frame("repeatAllocate", "(I)Ljava/util/List;", "HeapMonitor.java", 120)); + return frames; } private static int actuallyAllocate(List frames) { diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorCachedTest.java @@ -41,12 +41,12 @@ public static void main(String[] args) { HeapMonitor.enableSampling(); - List frameList = HeapMonitor.allocate(); + List frameList = HeapMonitor.repeatAllocate(10); frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorCachedTest.java", 44)); Frame[] frames = frameList.toArray(new Frame[0]); // Check that the data is available while heap sampling is enabled. - boolean status = HeapMonitor.framesExistEverywhere(frames); + boolean status = HeapMonitor.framesExistSomewhere(frames); if (!status) { throw new RuntimeException("Failed to find the traces before the wipe out."); } @@ -60,7 +60,7 @@ // Allocate some more and then free it all, the cache hash code should remain the same. long cacheHashCode = getCachedHashCode(); - HeapMonitor.allocate(); + HeapMonitor.repeatAllocate(10); // Free the memory entirely. HeapMonitor.freeStorage(); long secondCacheHashCode = getCachedHashCode(); diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorFrequentTest.java @@ -49,16 +49,18 @@ public static void main(String[] args) { HeapMonitor.enableSampling(); // We are testing for the recent garbage sampler: - // First run for 10000 iterations to fill up the garbage sampler. + // First run for 10 iterations to fill up the garbage sampler. List firstFrameList = runner(10); firstFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorFrequentTest.java", 53)); // Now because we are in a different stack frame line here, we can just re-use the same runner. - // Run for 3, we really should not see that many of these and most should be the first type. + // Run for 10, this would normally replace the garbage history but frequent has a back-off + // mechanism making it less and less likely to populate the history buffer. + // See the HeapMonitorRecentTest to understand the Frequent variation. List secondFrameList = runner(10); secondFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorFrequentTest.java", - 59)); + 61)); // Both types should exist in frequent since it was frequent enough. boolean status = framesExistInFrequent(firstFrameList.toArray(new Frame[0])); diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorOnOffTest.java @@ -36,12 +36,13 @@ public class HeapMonitorOnOffTest { public static void main(String[] args) { HeapMonitor.enableSampling(); - List frameList = HeapMonitor.allocate(); + List frameList = HeapMonitor.repeatAllocate(10); + frameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorOnOffTest.java", 39)); Frame[] frames = frameList.toArray(new Frame[0]); // Check that the data is available while heap sampling is enabled. - boolean status = HeapMonitor.framesExistEverywhere(frames); + boolean status = HeapMonitor.framesExistSomewhere(frames); if (!status) { throw new RuntimeException("Failed to find the traces before the wipe out."); } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorRecentTest.java @@ -36,7 +36,7 @@ public class HeapMonitorRecentTest { private native static boolean framesNotInLiveOrRecent(Frame[] frames); - private native static boolean framesExistInLiveAndRecent(Frame[] frames); + private native static boolean framesExistInRecent(Frame[] frames); private static List runner(int max) { List frameList = null; @@ -50,14 +50,15 @@ public static void main(String[] args) { HeapMonitor.enableSampling(); // We are testing for the recent garbage sampler: - // First run for 10000 iterations to fill up the garbage sampler. + // First run for 10 iterations to fill up the garbage sampler. List firstFrameList = runner(10); firstFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 54)); // Now because we are in a different stack frame line here, we can just re-use the same runner. - // Run for 3, we really should not see that many of these and most should be the first type. + // Run for 10, we really should no longer see the original frames and only see these. + // See the HeapMonitorFrequentTest to understand the Frequent variation. List secondFrameList = runner(10); - secondFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 59)); + secondFrameList.add(new Frame("main", "([Ljava/lang/String;)V", "HeapMonitorRecentTest.java", 60)); // We should no longer have the initial frames. boolean status = framesNotInLiveOrRecent(firstFrameList.toArray(new Frame[0])); @@ -66,7 +67,7 @@ } // We should see those new frames. - status = framesExistInLiveAndRecent(secondFrameList.toArray(new Frame[0])); + status = framesExistInRecent(secondFrameList.toArray(new Frame[0])); if (!status) { throw new RuntimeException("Second frame list not found in both live and recent."); } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java @@ -40,6 +40,12 @@ return error / expected * 100; } + private static void runner(int max, int depth) { + for (int j = 0; j < max; j++) { + HeapMonitor.allocate(depth); + } + } + public static void main(String[] args) { int[] depths = {10, 100, 500}; @@ -47,34 +53,38 @@ int depth = depths[depthIdx]; HeapMonitor.enableSampling(); - HeapMonitor.allocate(depth); + // Do the runner 10 times to ensure the stack is really sampled. + runner(10, depth); - // baseDepth represents the helper method depth: main, finalWrapper, and helper. + // baseDepth represents the helper method depth: main, runner, HeapMonitor.allocate, + // and HeapMonitor.actuallyAllocate. // To get the requested depth, remove this from the count. - final int baseDepth = 3; + final int baseDepth = 4; double averageDepth = getAverageStackDepth() - baseDepth; double errorPercentage = calculateErrorPercentage(depth, averageDepth); - // 1% error should be close enough. - if (errorPercentage > 1) { + // 3% error should be close enough. + if (errorPercentage > 3) { throw new RuntimeException("Stack depth average over 5% for depth " + depth + " : " + averageDepth + " , error: " + errorPercentage); } + + HeapMonitor.disableSampling(); } - HeapMonitor.disableSampling(); // Last test is 1024, which is the current maximum. HeapMonitor.enableSampling(); final int maximumDepth = 1024; - HeapMonitor.allocate(1024); + // Do the runner 10 times to ensure the stack is really sampled. + runner(10, maximumDepth); // Because of the extra frames, we should be at (maximumDepth + a few frames). Due to the // maximum depth allowed, we hit it and so should still be at an average of 1024. double averageDepth = getAverageStackDepth(); double errorPercentage = calculateErrorPercentage(maximumDepth, averageDepth); HeapMonitor.disableSampling(); - // 1% error should be close enough. - if (errorPercentage > 1) { + // 3% error should be close enough. + if (errorPercentage > 3) { throw new RuntimeException("Stack depth average over 5% for depth 1024 : " + averageDepth + " , error: " + errorPercentage); } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStatObjectCorrectnessTest.java @@ -34,8 +34,8 @@ /** This test is checking the object allocation path works with heap sampling. */ public class HeapMonitorStatObjectCorrectnessTest { - // Do 100000 iterations and expect maxIteration / multiplier samples. - private static final int maxIteration = 100000; + // Do 200000 iterations and expect maxIteration / multiplier samples. + private static final int maxIteration = 200000; private static BigObject obj; private native static boolean statsHaveExpectedNumberSamples(int expected, int percentError); diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -490,6 +490,12 @@ } JNIEXPORT jboolean JNICALL +Java_MyPackage_HeapMonitor_framesExistSomewhere(JNIEnv *env, jclass cls, + jobjectArray frames) { + return check_or(env, frames, TRUE, TRUE, TRUE, PRINT_OUT); +} + +JNIEXPORT jboolean JNICALL Java_MyPackage_HeapMonitorRecentTest_framesNotInLiveOrRecent(JNIEnv *env, jclass cls, jobjectArray frames) { @@ -497,10 +503,10 @@ } JNIEXPORT jboolean JNICALL -Java_MyPackage_HeapMonitorRecentTest_framesExistInLiveAndRecent(JNIEnv *env, - jclass cls, - jobjectArray frames) { - return check_and(env, frames, TRUE, TRUE, FALSE, PRINT_OUT); +Java_MyPackage_HeapMonitorRecentTest_framesExistInRecent(JNIEnv *env, + jclass cls, + jobjectArray frames) { + return check_and(env, frames, FALSE, TRUE, FALSE, PRINT_OUT); } JNIEXPORT jboolean JNICALL @@ -572,6 +578,10 @@ diff_ratio = (diff_ratio < 0) ? -diff_ratio : diff_ratio; diff_ratio /= expected; + if (diff_ratio * 100 >= percent_error) { + fprintf(stderr, "Problem with sample count, obtained %ld and expected %d\n", + stats.sample_count, expected); + } return diff_ratio * 100 < percent_error; } @@ -583,11 +593,11 @@ return ((double) stats.sample_rate_accumulation) / stats.sample_rate_count; } -JNIEXPORT jdouble JNICALL -Java_MyPackage_HeapMonitorStackDepthTest_getAverageStackDepth(JNIEnv *env, - jclass cls) { +static double calculate_average_stack_depth( + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*)) { jvmtiStackTraces traces; - jvmtiError error = (*jvmti)->GetLiveTraces(jvmti, &traces);; + + jvmtiError error = get_traces(jvmti, &traces);; if (error != JVMTI_ERROR_NONE) { return 0; @@ -607,9 +617,27 @@ sum += stack_trace->frame_count; } + if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + return 0; + } + return sum / i; } +JNIEXPORT jdouble JNICALL +Java_MyPackage_HeapMonitorStackDepthTest_getAverageStackDepth(JNIEnv *env, + jclass cls) { + double result = calculate_average_stack_depth((*jvmti)->GetLiveTraces); + + if (result != 0) { + return result; + } + + // It is possible all the live objects got collected, check the garbage traces + // in case. + return calculate_average_stack_depth((*jvmti)->GetGarbageTraces); +} + typedef struct sThreadsFound { jint *threads; int num_threads; # HG changeset patch # User jcbeyler # Date 1518494627 28800 # Mon Feb 12 20:03:47 2018 -0800 # Node ID 7725de9aba3d712a1cb42b16209afd8f19e437e4 # Parent 31a5297861eff27f053157bf32fdd04bbee57dd7 [mq]: heap23 diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -4076,7 +4076,7 @@ } else { lea(end, Address(obj, var_size_in_bytes)); } - ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset())); + ldr(rscratch1, Address(rthread, JavaThread::tlab_current_end_offset())); cmp(end, rscratch1); br(Assembler::HI, slow_case); @@ -4106,7 +4106,7 @@ } ldr(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); - ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); + ldr(t1, Address(rthread, in_bytes(JavaThread::tlab_current_end_offset()))); // calculate amount of free space sub(t1, t1, top); @@ -4200,7 +4200,7 @@ str(top, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); add(top, top, t1); sub(top, top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - str(top, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); + str(top, Address(rthread, in_bytes(JavaThread::tlab_current_end_offset()))); if (ZeroTLAB) { // This is a fast TLAB refill, therefore the GC is not notified of it. @@ -4347,7 +4347,7 @@ should_not_reach_here(); bind(next); - ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); + ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_current_end_offset()))); ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); cmp(rscratch2, rscratch1); br(Assembler::HS, ok); diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp --- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp +++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp @@ -1309,7 +1309,7 @@ assert_different_registers(obj, obj_end, tlab_end); ldr(obj, Address(Rthread, JavaThread::tlab_top_offset())); - ldr(tlab_end, Address(Rthread, JavaThread::tlab_end_offset())); + ldr(tlab_end, Address(Rthread, JavaThread::tlab_current_end_offset())); add_rc(obj_end, obj, size_expression); cmp(obj_end, tlab_end); b(slow_case, hi); @@ -1327,7 +1327,7 @@ InlinedAddress intArrayKlass_addr((address)Universe::intArrayKlassObj_addr()); Label discard_tlab, do_refill; ldr(top, Address(Rthread, JavaThread::tlab_top_offset())); - ldr(tmp1, Address(Rthread, JavaThread::tlab_end_offset())); + ldr(tmp1, Address(Rthread, JavaThread::tlab_current_end_offset())); ldr(tmp2, Address(Rthread, JavaThread::tlab_refill_waste_limit_offset())); // Calculate amount of free space @@ -1397,7 +1397,7 @@ #endif sub(tmp1, tmp1, ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - str(tmp1, Address(Rthread, JavaThread::tlab_end_offset())); + str(tmp1, Address(Rthread, JavaThread::tlab_current_end_offset())); if (ZeroTLAB) { // clobbers start and tmp diff --git a/src/hotspot/cpu/arm/templateTable_arm.cpp b/src/hotspot/cpu/arm/templateTable_arm.cpp --- a/src/hotspot/cpu/arm/templateTable_arm.cpp +++ b/src/hotspot/cpu/arm/templateTable_arm.cpp @@ -4408,7 +4408,7 @@ assert_different_registers(Robj, Rsize, Rklass, Rtlab_top, Rtlab_end); __ ldr(Robj, Address(Rthread, JavaThread::tlab_top_offset())); - __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_end_offset()))); + __ ldr(Rtlab_end, Address(Rthread, in_bytes(JavaThread::tlab_current_end_offset()))); __ add(Rtlab_top, Robj, Rsize); __ cmp(Rtlab_top, Rtlab_end); __ b(slow_case, hi); diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp --- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp +++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp @@ -2309,7 +2309,7 @@ //verify_tlab(); not implemented ld(obj, in_bytes(JavaThread::tlab_top_offset()), R16_thread); - ld(R0, in_bytes(JavaThread::tlab_end_offset()), R16_thread); + ld(R0, in_bytes(JavaThread::tlab_current_end_offset()), R16_thread); if (var_size_in_bytes == noreg) { addi(new_top, obj, con_size_in_bytes); } else { diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp @@ -3692,7 +3692,7 @@ // Check if we can allocate in the TLAB. __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); - __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); + __ ld(RendValue, in_bytes(JavaThread::tlab_current_end_offset()), R16_thread); __ add(RnewTopValue, Rinstance_size, RoldTopValue); diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -2783,7 +2783,7 @@ } else { z_lay(end, Address(obj, var_size_in_bytes)); } - z_cg(end, Address(thread, JavaThread::tlab_end_offset())); + z_cg(end, Address(thread, JavaThread::tlab_current_end_offset())); branch_optimized(bcondHigh, slow_case); // Update the tlab top pointer. diff --git a/src/hotspot/cpu/s390/templateTable_s390.cpp b/src/hotspot/cpu/s390/templateTable_s390.cpp --- a/src/hotspot/cpu/s390/templateTable_s390.cpp +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp @@ -3768,7 +3768,7 @@ Register RnewTopValue = tmp; __ z_lg(RoldTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); __ load_address(RnewTopValue, Address(RoldTopValue, Rsize)); - __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_end_offset())); + __ z_cg(RnewTopValue, Address(Z_thread, JavaThread::tlab_current_end_offset())); __ z_brh(slow_case); __ z_stg(RnewTopValue, Address(Z_thread, JavaThread::tlab_top_offset())); diff --git a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp @@ -3074,7 +3074,7 @@ bind(next); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), t2); or3(t3, t2, t3); cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); STOP("assert(top <= end)"); @@ -3196,7 +3196,7 @@ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj); // calculate amount of free space - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free); + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), free); sub(free, obj, free); Label done; @@ -3248,7 +3248,7 @@ } ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); - ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1); + ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), t1); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2); // calculate amount of free space @@ -3340,7 +3340,7 @@ #endif // ASSERT add(top, t1, top); // t1 is tlab_size sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); - st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); + st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_current_end_offset())); if (ZeroTLAB) { // This is a fast TLAB refill, therefore the GC is not notified of it. diff --git a/src/hotspot/cpu/sparc/templateTable_sparc.cpp b/src/hotspot/cpu/sparc/templateTable_sparc.cpp --- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp +++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp @@ -3296,7 +3296,7 @@ // check if we can allocate in the TLAB __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject - __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue); + __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_current_end_offset()), RendValue); __ add(RoldTopValue, Roffset, RnewTopValue); // if there is enough space, we do not CAS and do not clear diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp @@ -5585,7 +5585,7 @@ } else { lea(end, Address(obj, var_size_in_bytes, Address::times_1)); } - cmpptr(end, Address(thread, JavaThread::tlab_end_offset())); + cmpptr(end, Address(thread, JavaThread::tlab_current_end_offset())); jcc(Assembler::above, slow_case); // update the tlab top pointer @@ -5617,7 +5617,7 @@ NOT_LP64(get_thread(thread_reg)); movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_current_end_offset()))); // calculate amount of free space subptr(t1, top); @@ -5698,7 +5698,7 @@ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top); addptr(top, t1); subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes()); - movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top); + movptr(Address(thread_reg, in_bytes(JavaThread::tlab_current_end_offset())), top); if (ZeroTLAB) { // This is a fast TLAB refill, therefore the GC is not notified of it. @@ -6259,7 +6259,7 @@ should_not_reach_here(); bind(next); - movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); + movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_current_end_offset()))); cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); jcc(Assembler::aboveEqual, ok); STOP("assert(top <= end)"); diff --git a/src/hotspot/cpu/x86/templateTable_x86.cpp b/src/hotspot/cpu/x86/templateTable_x86.cpp --- a/src/hotspot/cpu/x86/templateTable_x86.cpp +++ b/src/hotspot/cpu/x86/templateTable_x86.cpp @@ -3903,7 +3903,7 @@ if (UseTLAB) { __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset()))); __ lea(rbx, Address(rax, rdx, Address::times_1)); - __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset()))); + __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_current_end_offset()))); __ jcc(Assembler::above, slow_case); __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx); if (ZeroTLAB) { diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -302,7 +302,7 @@ } HeapWord* CollectedHeap::allocate_sampled_object(Thread* thread, size_t size) { - thread->tlab().set_back_actual_end(); + thread->tlab().set_back_allocation_end(); // The tlab could still have space after this sample. return thread->tlab().allocate(size); @@ -315,7 +315,7 @@ HeapWord* obj = NULL; if (should_sample) { // Remember the tlab end to fix up the sampling rate. - HeapWord* tlab_old_end = thread->tlab().end(); + HeapWord* tlab_old_end = thread->tlab().current_end(); obj = allocate_sampled_object(thread, size); // If we did allocate in this tlab, sample it. Otherwise, we wait for the diff --git a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.inline.hpp +++ b/src/hotspot/share/gc/shared/collectedHeap.inline.hpp @@ -33,6 +33,7 @@ #include "oops/arrayOop.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" +#include "runtime/heapMonitoring.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/thread.inline.hpp" #include "services/lowMemoryDetector.hpp" @@ -154,17 +155,17 @@ check_for_non_bad_heap_word_value(result, size)); assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage"); - THREAD->incr_allocated_bytes(size * HeapWordSize); + int size_in_bytes = size * HeapWordSize; + THREAD->incr_allocated_bytes(size_in_bytes); - AllocTracer::send_allocation_outside_tlab(klass, result, size * HeapWordSize, THREAD); + AllocTracer::send_allocation_outside_tlab(klass, result, size_in_bytes, THREAD); if (UseTLAB) { - THREAD->tlab().handle_sample(THREAD, result, size); + THREAD->tlab().handle_sample(THREAD, result, size_in_bytes); } return result; } - if (!gc_overhead_limit_was_exceeded) { // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support report_java_out_of_memory("Java heap space"); diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp @@ -47,6 +47,16 @@ make_parsable(true); // also retire the TLAB } +size_t ThreadLocalAllocBuffer::remaining() { + if (current_end() == NULL) { + return 0; + } + + // TODO: To be deprecated when FastTLABRefill is deprecated. + update_end_pointers(); + return pointer_delta(reserved_end(), top()); +} + void ThreadLocalAllocBuffer::accumulate_statistics_before_gc() { global_stats()->initialize(); @@ -109,27 +119,29 @@ // Waste accounting should be done in caller as appropriate; see, // for example, clear_before_allocation(). void ThreadLocalAllocBuffer::make_parsable(bool retire, bool zap) { - if (end() != NULL) { + if (current_end() != NULL) { invariants(); if (retire) { myThread()->incr_allocated_bytes(used_bytes()); } - CollectedHeap::fill_with_object(top(), hard_end(), retire && zap); + // TODO: To be deprecated when FastTLABRefill is deprecated. + update_end_pointers(); + CollectedHeap::fill_with_object(top(), reserved_end(), retire && zap); if (retire || ZeroTLAB) { // "Reset" the TLAB set_start(NULL); set_top(NULL); set_pf_top(NULL); - set_end(NULL); - set_actual_end(NULL); - set_slow_path_end(NULL); + set_current_end(NULL); + set_allocation_end(NULL); + set_last_slow_path_end(NULL); } } assert(!(retire || ZeroTLAB) || - (start() == NULL && end() == NULL && top() == NULL && - _actual_end == NULL && _slow_path_end == NULL), + (start() == NULL && current_end() == NULL && top() == NULL && + _allocation_end == NULL && _last_slow_path_end == NULL), "TLAB must be reset"); } @@ -200,9 +212,9 @@ set_start(start); set_top(top); set_pf_top(top); - set_end(end); - set_actual_end(end); - set_slow_path_end(end); + set_current_end(end); + set_allocation_end(end); + set_last_slow_path_end(end); invariants(); _bytes_until_sample = 0; } @@ -327,14 +339,14 @@ } void ThreadLocalAllocBuffer::set_sample_end() { - size_t heap_words_remaining = pointer_delta(_end, _top); + size_t heap_words_remaining = pointer_delta(_current_end, _top); size_t bytes_left = _bytes_until_sample; size_t words_until_sample = bytes_left / HeapWordSize; if (heap_words_remaining > words_until_sample) { HeapWord* new_end = _top + words_until_sample; - set_end(new_end); - set_slow_path_end(new_end); + set_current_end(new_end); + set_last_slow_path_end(new_end); set_bytes_until_sample(0); } else { bytes_left -= heap_words_remaining * HeapWordSize; @@ -361,11 +373,12 @@ set_sample_end(); log_trace(gc, tlab)("TLAB picked next sample: thread: " INTPTR_FORMAT " [id: %2d]" - " start: " INTPTR_FORMAT " top: " INTPTR_FORMAT " end: " INTPTR_FORMAT " actual_end:" - INTPTR_FORMAT " slow_path_end: " INTPTR_FORMAT, + " start: " INTPTR_FORMAT " top: " INTPTR_FORMAT " end: " + INTPTR_FORMAT " allocation_end:" + INTPTR_FORMAT " last_slow_path_end: " INTPTR_FORMAT, p2i(myThread()), myThread()->osthread()->thread_id(), - p2i(start()), p2i(top()), p2i(end()), - p2i(_actual_end), p2i(_slow_path_end)); + p2i(start()), p2i(top()), p2i(current_end()), + p2i(_allocation_end), p2i(_last_slow_path_end)); } Thread* ThreadLocalAllocBuffer::myThread() { @@ -374,51 +387,58 @@ in_bytes(Thread::tlab_start_offset())); } -void ThreadLocalAllocBuffer::set_back_actual_end() { +void ThreadLocalAllocBuffer::set_back_allocation_end() { // Did a fast TLAB refill occur? - if (_slow_path_end != _end) { + if (_last_slow_path_end != _current_end) { // Fix up the actual end to be now the end of this TLAB. - _slow_path_end = _end; - _actual_end = _end; + _last_slow_path_end = _current_end; + _allocation_end = _current_end; } else { - _end = _actual_end; + _current_end = _allocation_end; } } void ThreadLocalAllocBuffer::handle_sample(Thread* thread, HeapWord* result, - size_t size) { + size_t size_in_bytes) { if (!HeapMonitoring::enabled()) { return; } - size_t size_in_bytes = size * HeapWordSize; - if (_bytes_until_sample > size_in_bytes) { - set_bytes_until_sample(_bytes_until_sample - size_in_bytes); - } else { - // Technically this is not exactly right, we probably should remember how many bytes are - // negative probably to then reduce our next sample size. - set_bytes_until_sample(0); - } - - // Should we sample now? - if (should_sample()) { + if (_bytes_until_sample < size_in_bytes) { HeapMonitoring::object_alloc_do_sample(thread, reinterpret_cast(result), size_in_bytes); - set_back_actual_end(); - pick_next_sample(); + } + + update_tlab_sample_point(size_in_bytes); +} + +void ThreadLocalAllocBuffer::update_tlab_sample_point(size_t size_in_bytes) { + if (_bytes_until_sample > size_in_bytes) { + _bytes_until_sample -= size_in_bytes; + return; + } + + // We sampled here, so reset it all and start a new sample point. + set_bytes_until_sample(0); + set_back_allocation_end(); + pick_next_sample(); +} + +void ThreadLocalAllocBuffer::update_end_pointers() { + // Did a fast TLAB refill occur? (This will be deprecated when fast TLAB + // refill disappears). + if (_last_slow_path_end != _current_end) { + // Fix up the last slow path end to be now the end of this TLAB. + _last_slow_path_end = _current_end; + _allocation_end = _current_end; } } -HeapWord* ThreadLocalAllocBuffer::hard_end() { - // Did a fast TLAB refill occur? - if (_slow_path_end != _end) { - // Fix up the actual end to be now the end of this TLAB. - _slow_path_end = _end; - _actual_end = _end; - } - - return _actual_end + alignment_reserve(); +HeapWord* ThreadLocalAllocBuffer::reserved_end() { + assert (_last_slow_path_end == _current_end, + "Have to call update_end_pointers before reserved_end."); + return _allocation_end + alignment_reserve(); } GlobalTLABStats::GlobalTLABStats() : diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.hpp @@ -51,10 +51,9 @@ HeapWord* _start; // address of TLAB HeapWord* _top; // address after last allocation HeapWord* _pf_top; // allocation prefetch watermark - HeapWord* _end; // allocation end (can be the sampling end point or - // the actual TLAB end, excluding alignment_reserve) - HeapWord* _actual_end; // allocation actual_end (actual TLAB end, excluding alignment_reserve) - HeapWord* _slow_path_end; // remember the end in case a fast refill occurs. + HeapWord* _current_end; // allocation end (can be the sampling end point or _allocation_end) + HeapWord* _allocation_end; // end for allocations (actual TLAB end, excluding alignment_reserve) + HeapWord* _last_slow_path_end; // last address for slow_path_end (as opposed to _allocation_end) size_t _desired_size; // desired size (including alignment_reserve) size_t _refill_waste_limit; // hold onto tlab if free() is larger than this @@ -77,9 +76,9 @@ void initialize_statistics(); void set_start(HeapWord* start) { _start = start; } - void set_end(HeapWord* end) { _end = end; } - void set_actual_end(HeapWord* actual_end) { _actual_end = actual_end; } - void set_slow_path_end(HeapWord* slow_path_end) { _slow_path_end = slow_path_end; } + void set_current_end(HeapWord* current_end) { _current_end = current_end; } + void set_allocation_end(HeapWord* ptr) { _allocation_end = ptr; } + void set_last_slow_path_end(HeapWord* ptr) { _last_slow_path_end = ptr; } void set_top(HeapWord* top) { _top = top; } void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; } void set_desired_size(size_t desired_size) { _desired_size = desired_size; } @@ -91,9 +90,10 @@ static int target_refills() { return _target_refills; } size_t initial_desired_size(); - size_t remaining() { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); } + size_t remaining(); void set_sample_end(); + void update_end_pointers(); // Make parsable and release it. void reset(); @@ -101,7 +101,7 @@ // Resize based on amount of allocation, etc. void resize(); - void invariants() const { assert(top() >= start() && top() <= end(), "invalid tlab"); } + void invariants() const { assert(top() >= start() && top() <= current_end(), "invalid tlab"); } void initialize(HeapWord* start, HeapWord* top, HeapWord* end); @@ -131,14 +131,14 @@ static void set_max_size(size_t max_size) { _max_size = max_size; } HeapWord* start() const { return _start; } - HeapWord* end() const { return _end; } + HeapWord* current_end() const { return _current_end; } HeapWord* top() const { return _top; } - HeapWord* hard_end(); + HeapWord* reserved_end(); HeapWord* pf_top() const { return _pf_top; } size_t desired_size() const { return _desired_size; } size_t used() const { return pointer_delta(top(), start()); } size_t used_bytes() const { return pointer_delta(top(), start(), 1); } - size_t free() const { return pointer_delta(end(), top()); } + size_t free() const { return pointer_delta(current_end(), top()); } // Don't discard tlab if remaining space is larger than this. size_t refill_waste_limit() const { return _refill_waste_limit; } @@ -180,15 +180,16 @@ void initialize(); void pick_next_sample(size_t diff = 0); - void set_back_actual_end(); - void handle_sample(Thread* thread, HeapWord* result, size_t size); + void set_back_allocation_end(); + void update_tlab_sample_point(size_t size_in_bytes); + void handle_sample(Thread* thread, HeapWord* result, size_t size_in_bytes); bool should_sample() { return _bytes_until_sample == 0; } static size_t refill_waste_limit_increment() { return TLABWasteIncrement; } // Code generation support static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); } - static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); } + static ByteSize current_end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _current_end ); } static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); } static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); } static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); } diff --git a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp --- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.inline.hpp @@ -34,7 +34,7 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) { invariants(); HeapWord* obj = top(); - if (pointer_delta(end(), obj) >= size) { + if (pointer_delta(current_end(), obj) >= size) { // successful thread-local allocation #ifdef ASSERT // Skip mangling the space corresponding to the object header to diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp --- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp +++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp @@ -321,7 +321,7 @@ \ nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \ - nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \ + nonstatic_field(ThreadLocalAllocBuffer, _current_end, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \ diff --git a/src/hotspot/share/opto/macro.cpp b/src/hotspot/share/opto/macro.cpp --- a/src/hotspot/share/opto/macro.cpp +++ b/src/hotspot/share/opto/macro.cpp @@ -1241,9 +1241,9 @@ if (UseTLAB) { // Private allocation: load from TLS Node* thread = transform_later(new ThreadLocalNode()); int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset()); - int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset()); + int tlab_current_end_offset = in_bytes(JavaThread::tlab_current_end_offset()); eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset); - eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset); + eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_current_end_offset); } else { // Shared allocation: load from globals CollectedHeap* ch = Universe::heap(); address top_adr = (address)ch->top_addr(); diff --git a/src/hotspot/share/prims/jvmti.xml b/src/hotspot/share/prims/jvmti.xml --- a/src/hotspot/share/prims/jvmti.xml +++ b/src/hotspot/share/prims/jvmti.xml @@ -11539,42 +11539,20 @@ - - - - jvmtiFrameInfo + + + + jvmtiStackInfo - Pointer to the call frames. - - - - The number of frames for the trace. + Pointer to the stack information. - + The size of the object allocation. - - The thread id number. - - - - - - - jvmtiStackTrace - - - The array with the various stack traces. - - - - - - Number of traces pointed by the array . - + The thread id of the object allocation. @@ -11627,7 +11605,7 @@ rate requested and will fill internal data structures with heap allocation samples. The samples are obtained via the , , , - or functions. + or functions. new @@ -11664,12 +11642,12 @@ Stop the heap sampler in the JVM. Any sample obtained during sampling is still available via the , , , - or functions. + or functions. Stopping the heap sampler resets internal traces and counters. Therefore stopping the sampler frees any internal trace samples, any subsequent call to the , , , - or functions will return no traces. + or functions will return no traces. new @@ -11681,15 +11659,15 @@ - - Get Live Traces + + Get Object Allocation Traces Get Live Heap Sampled traces. The fields of the structure are filled in with details of the specified sampled allocation. This methods call full GC and can be costly. Use with care as it can affect performance. For - continuous profiling, perhaps prefer GetCachedTraces, which returns the live traces at the last - full GC point. + continuous profiling, perhaps prefer GetCachedObjectAllocTraces, which returns the live + traces at the last full GC point. This method can be called at any time but if the sampler is not enabled, via , it returns no traces. @@ -11699,10 +11677,27 @@ - - jvmtiStackTraces - - The stack trace data structure to be filled. + + + jvmtiAllocTraceInfo + + + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. + + + + + + The number of traces allocated. @@ -11724,10 +11719,27 @@ - - jvmtiStackTraces - - The stack trace data structure to be filled. + + + jvmtiAllocTraceInfo + + + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. + + + + + + The number of traces allocated. @@ -11749,19 +11761,36 @@ - - jvmtiStackTraces - - The stack trace data structure to be filled. + + + jvmtiAllocTraceInfo + + + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. - - - - - - - Get Live Traces + + + + The number of traces allocated. + + + + + + + + + Get Cached Object Allocated Traces Get the cached sampled traces: the traces are the ones that were collected during the last full GC. The fields of the structure are filled in with @@ -11775,31 +11804,27 @@ - - jvmtiStackTraces - - The stack trace data structure to be filled. + + + jvmtiAllocTraceInfo + + + On return, this buffer is filled with stack information for each live object. + The number of records is determined + by . +

+ Note that this buffer is allocated to include the + buffers pointed to by , which also + include the buffers pointed by + . + + All these buffers must not be separately deallocated. - - - - - - - Release traces provided by the heap monitoring - - Release traces provided by any of the trace retrieval methods. - - new - - - - - - jvmtiStackTraces - - The stack trace data structure to be released. + + + + The number of traces allocated. @@ -11807,7 +11832,7 @@ - + Get the heap sampling statistics Returns a to understand the heap sampling behavior and current diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -2028,62 +2028,56 @@ // Provoke a GC and get the currently live sampled allocations. jvmtiError -JvmtiEnv::GetLiveTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetObjectAllocTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { ForceGarbageCollection(); HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_live_traces(stack_traces); + HeapMonitoring::get_live_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; } /* end GetLiveTraces */ // Get the recently garbage collected allocations. jvmtiError -JvmtiEnv::GetGarbageTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetGarbageTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_garbage_traces(stack_traces); + HeapMonitoring::get_garbage_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; } /* end GetGarbageTraces */ // Get the frequently garbage collected traces. jvmtiError -JvmtiEnv::GetFrequentGarbageTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetFrequentGarbageTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_frequent_garbage_traces(stack_traces); + HeapMonitoring::get_frequent_garbage_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; } /* end GetFrequentGarbageTraces */ // Get the traces that were garbage collected in the last full GC. jvmtiError -JvmtiEnv::GetCachedTraces(jvmtiStackTraces* stack_traces) { +JvmtiEnv::GetCachedObjectAllocTraces(jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr) { HeapThreadTransition htt(Thread::current()); if (stack_traces == NULL) { return JVMTI_ERROR_ILLEGAL_ARGUMENT; } - HeapMonitoring::get_cached_traces(stack_traces); + HeapMonitoring::get_cached_traces(this, stack_traces, trace_counter_ptr); return JVMTI_ERROR_NONE; -} /* end GetCachedTraces */ - -// Release sampled traces. -jvmtiError -JvmtiEnv::ReleaseTraces(jvmtiStackTraces* stack_traces) { - if (stack_traces == NULL) { - return JVMTI_ERROR_NONE; - } - HeapMonitoring::release_traces(stack_traces); - return JVMTI_ERROR_NONE; -} /* end ReleaseTraces */ +} /* end GetObjectAllocTraces */ // Get the heap sampling statistics. jvmtiError diff --git a/src/hotspot/share/runtime/heapMonitoring.cpp b/src/hotspot/share/runtime/heapMonitoring.cpp --- a/src/hotspot/share/runtime/heapMonitoring.cpp +++ b/src/hotspot/share/runtime/heapMonitoring.cpp @@ -26,6 +26,7 @@ #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" +#include "prims/jvmtiEnvBase.hpp" #include "runtime/heapMonitoring.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vframe.hpp" @@ -35,17 +36,17 @@ // Internal data structure representing traces, used when object has been GC'd. class StackTraceData : public CHeapObj { private: - jvmtiStackTrace* _trace; + jvmtiAllocTraceInfo* _trace; int _references; public: - StackTraceData(jvmtiStackTrace* t) : _trace(t), _references(0) {} + StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {} void increment_reference_count() { _references++; } - jvmtiStackTrace* get_trace() const { + jvmtiAllocTraceInfo* get_trace() const { return _trace; } @@ -57,7 +58,9 @@ data->_references--; if (data->_references == 0) { if (data->_trace != NULL) { - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->_trace->frames); + jvmtiStackInfo* stack_info = data->_trace->stack_info; + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer); + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(data->_trace); } delete data; @@ -73,7 +76,7 @@ oop _obj; public: - StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) { + StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) { store_oop(o); } @@ -218,23 +221,31 @@ public: // The function that gets called to add a trace to the list of // traces we are maintaining. - void add_trace(jvmtiStackTrace* trace, oop o); - - // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_all_stack_traces(jvmtiStackTraces* traces); + void add_trace(jvmtiAllocTraceInfo* trace, oop o); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_garbage_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_all_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_cached_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_frequent_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); + + // The function that gets called by the client to retrieve the list + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_cached_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // Executes whenever weak references are traversed. is_alive tells // you if the given oop is still reachable and live. @@ -271,6 +282,7 @@ bool initialized() { return OrderAccess::load_acquire(&_initialized) != 0; + return _initialized; } private: @@ -326,12 +338,11 @@ int _size; }; - // Copies from StackTraceData to jvmtiStackTrace. - bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from); - // Creates a deep copy of the list of StackTraceData. - void copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces* traces); + void copy_stack_traces(JvmtiEnv* env, + const StackTraceDataCopier &copier, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); void store_garbage_trace(const StackTraceDataWithOop &trace); @@ -340,6 +351,20 @@ void reset(); void allocate_storage(int max_gc_storage); + + int calculate_frame_count(const StackTraceDataCopier &copier); + int calculate_info_count(const StackTraceDataCopier &copier); + + bool copy_frame(const StackTraceData* stack_trace_data, + jvmtiAllocTraceInfo* current_alloc_traces, + jvmtiStackInfo* current_stack_info, + jvmtiFrameInfo* current_frame_info); + + // Returns frame copy success. Failure can result when there is no longer + // enough memory. + bool copy_frames(const StackTraceDataCopier& copier, int info_count, + unsigned char* start, + unsigned char* end); }; StackTraceStorage* StackTraceStorage::internal_storage; @@ -353,10 +378,15 @@ uint64_t HeapMonitoring::_rnd; StackTraceStorage::StackTraceStorage() { + MutexLocker mu(HeapMonitorStorage_lock); reset(); } void StackTraceStorage::reset() { + assert(HeapMonitorStorage_lock->owned_by_self() + || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), + "This should not be accessed concurrently"); + _allocated_traces = NULL; _traces_on_last_full_gc = NULL; _recent_garbage_traces = NULL; @@ -415,6 +445,10 @@ } void StackTraceStorage::allocate_storage(int max_gc_storage) { + assert(HeapMonitorStorage_lock->owned_by_self() + || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), + "This should not be accessed concurrently"); + // In case multiple threads got locked and then 1 by 1 got through. if (initialized()) { return; @@ -433,7 +467,7 @@ OrderAccess::release_store(&_initialized, 1); } -void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) { +void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); // Last minute check on initialization here in case: // Between the moment object_alloc_do_sample's check for initialization @@ -441,7 +475,7 @@ if (initialized()) { StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; - _stats.stack_depth_accumulation += trace->frame_count; + _stats.stack_depth_accumulation += trace->stack_info->frame_count; _allocated_traces->append(new_data); } } @@ -491,112 +525,201 @@ log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count); } -bool StackTraceStorage::deep_copy(jvmtiStackTrace* to, - const StackTraceData* from) { - const jvmtiStackTrace* src = from->get_trace(); - *to = *src; - - to->frames = - NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); - - if (to->frames == NULL) { - return false; - } - - memcpy(to->frames, - src->frames, - sizeof(jvmtiFrameInfo) * src->frame_count); - return true; -} - // Called by the outside world; returns a copy of the stack traces // (because we could be replacing them as the user handles them). // The array is secretly null-terminated (to make it easier to reclaim). -void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_all_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_allocated_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_allocated_traces); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces -void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_recent_garbage_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), _recent_garbage_traces->size()); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces void StackTraceStorage::get_frequent_garbage_stack_traces( - jvmtiStackTraces* traces) { + JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_frequent_garbage_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), _frequent_garbage_traces->size()); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces -void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_cached_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_traces_on_last_full_gc) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_traces_on_last_full_gc); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } -void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces* traces) { +int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) { int len = copier.size(); - // Create a new array to store the StackTraceData objects. - // + 1 for a NULL at the end. - jvmtiStackTrace* t = - NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); - if (t == NULL) { - traces->stack_traces = NULL; - traces->trace_count = 0; - return; - } - // +1 to have a NULL at the end of the array. - memset(t, 0, (len + 1) * sizeof(*t)); + // Walk the traces first to find the size of the frames as well. + int frame_total = 0; - // Copy the StackTraceData objects into the new array. - int trace_count = 0; for (int i = 0; i < len; i++) { const StackTraceData* stack_trace = copier.get(i); + if (stack_trace != NULL) { - jvmtiStackTrace* to = &t[trace_count]; - if (!deep_copy(to, stack_trace)) { - continue; - } - trace_count++; + jvmtiAllocTraceInfo* trace = stack_trace->get_trace(); + jvmtiStackInfo* stack_info = trace->stack_info; + frame_total += stack_info->frame_count; } } - traces->stack_traces = t; - traces->trace_count = trace_count; + return frame_total; +} + +int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) { + int len = copier.size(); + + int info_total = 0; + + for (int i = 0; i < len; i++) { + const StackTraceData* stack_trace = copier.get(i); + + if (stack_trace != NULL) { + // TODO: merge this with the method above. + info_total++; + } + } + + return info_total; +} + +// Method to test if the data structure would fit between the src address and +// the end address. +template +static bool next_ptr_less_or_equal(T src, U* end) { + return (src + 1) <= reinterpret_cast(end); +} + +bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data, + jvmtiAllocTraceInfo* current_alloc_trace, + jvmtiStackInfo* current_stack_info, + jvmtiFrameInfo* current_frame_info) { + jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace(); + jvmtiStackInfo* stack_info = trace->stack_info; + int frame_count = stack_info->frame_count; + + memcpy(current_alloc_trace, trace, sizeof(*trace)); + + current_alloc_trace->stack_info = current_stack_info; + memcpy(current_stack_info, stack_info, sizeof(*stack_info)); + + current_stack_info->frame_buffer = current_frame_info; + memcpy(current_frame_info, stack_info->frame_buffer, + sizeof(jvmtiFrameInfo) * frame_count); + return true; +} + +bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier, + int info_count, + unsigned char* start, + unsigned char* end) { + jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast(start); + jvmtiStackInfo* start_stack_info = reinterpret_cast(start_alloc_trace + info_count); + jvmtiFrameInfo* start_frame_info = reinterpret_cast(start_stack_info + info_count); + + jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace; + jvmtiStackInfo* current_stack_info = start_stack_info; + jvmtiFrameInfo* current_frame_info = start_frame_info; + + for (int i = 0; i < info_count; i++) { + assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info), + "jvmtiAllocTraceInfo would write over jvmtiStackInfos."); + assert(next_ptr_less_or_equal(current_stack_info, start_frame_info), + "jvmtiStackInfo would write over jvmtiFrameInfos."); + + assert(next_ptr_less_or_equal(current_frame_info, end), + "jvmtiFrameInfo would write over the end of the buffer."); + + const StackTraceData* stack_trace_data = copier.get(i); + if (stack_trace_data != NULL) { + if (!copy_frame(stack_trace_data, current_alloc_trace, + current_stack_info, current_frame_info)) { + return false; + } + + current_frame_info += current_stack_info->frame_count; + current_stack_info++; + current_alloc_trace++; + } + } + + return true; +} + +void StackTraceStorage::copy_stack_traces(JvmtiEnv* env, + const StackTraceDataCopier& copier, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + *traces = NULL; + *trace_counter_ptr = 0; + + int frame_total = calculate_frame_count(copier); + int len = calculate_info_count(copier); + + // Allocate the whole stacktraces in one bloc to simplify freeing. + size_t total_size = len * sizeof(jvmtiAllocTraceInfo) + + len * sizeof(jvmtiStackInfo) + + frame_total * sizeof(jvmtiFrameInfo); + + unsigned char* buffer = NULL; + jvmtiAllocTraceInfo* result = NULL; + JvmtiEnvBase* env_base = reinterpret_cast(env); + env_base->allocate(total_size, &buffer); + + if (buffer == NULL) { + return; + } + + bool success = copy_frames(copier, len, buffer, buffer + total_size); + + if (!success) { + env_base->deallocate(buffer); + return; + } + + *trace_counter_ptr = len; + *traces = reinterpret_cast(buffer); } void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) { @@ -615,8 +738,12 @@ _stats.garbage_collected_samples++; } -void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_all_stack_traces(traces); +void HeapMonitoring::get_live_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_all_stack_traces(env, + traces, + trace_counter_ptr); } void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) { @@ -625,30 +752,27 @@ *stats = internal_stats; } -void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); -} - -void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_garbage_stack_traces(traces); -} - -void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_cached_stack_traces(traces); +void HeapMonitoring::get_frequent_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_frequent_garbage_stack_traces( + env, traces, trace_counter_ptr); } -void HeapMonitoring::release_traces(jvmtiStackTraces* traces) { - jint trace_count = traces->trace_count; - jvmtiStackTrace* stack_traces = traces->stack_traces; +void HeapMonitoring::get_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_garbage_stack_traces(env, + traces, + trace_counter_ptr); +} - for (jint i = 0; i < trace_count; i++) { - jvmtiStackTrace* current_trace = stack_traces + i; - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); - } - - FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); - traces->trace_count = 0; - traces->stack_traces = NULL; +void HeapMonitoring::get_cached_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_cached_stack_traces(env, + traces, + trace_counter_ptr); } // Invoked by the GC to clean up old stack traces and remove old arrays @@ -731,29 +855,37 @@ StackTraceStorage::storage()->accumulate_sample_rate(rate); } -void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) { +void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) { JavaThread* thread = static_cast(t); if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); JavaThread* thread = static_cast(t); - jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); + jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal); if (trace == NULL) { return; } + jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal); + if (trace == NULL) { + FREE_C_HEAP_OBJ(trace); + return; + } + trace->stack_info = stack_info; + jvmtiFrameInfo* frames = NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); if (frames == NULL) { + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); return; } + stack_info->frame_buffer = frames; + stack_info->frame_count = 0; - trace->frames = frames; trace->thread_id = SharedRuntime::get_java_tid(thread); trace->size = byte_size; - trace->frame_count = 0; if (thread->has_last_Java_frame()) { // just to be safe vframeStream vfst(thread, true); @@ -766,17 +898,18 @@ vfst.next(); } - trace->frame_count = count; + stack_info->frame_count = count; } - if (trace->frame_count> 0) { + if (stack_info->frame_count > 0) { // Success! StackTraceStorage::storage()->add_trace(trace, o); return; } // Failure! - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); } } diff --git a/src/hotspot/share/runtime/heapMonitoring.hpp b/src/hotspot/share/runtime/heapMonitoring.hpp --- a/src/hotspot/share/runtime/heapMonitoring.hpp +++ b/src/hotspot/share/runtime/heapMonitoring.hpp @@ -78,18 +78,25 @@ // initialize_profiling method. static void pick_next_sample(size_t* ptr); - // Get live/garbage traces and provide a method to release the traces. - static void get_live_traces(jvmtiStackTraces* stack_traces); - static void get_garbage_traces(jvmtiStackTraces* stack_traces); - static void get_frequent_garbage_traces(jvmtiStackTraces* stack_traces); - static void get_cached_traces(jvmtiStackTraces* stack_traces); - static void release_traces(jvmtiStackTraces* trace_info); + // Get live/cached/garbage traces. + static void get_live_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); + static void get_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); + static void get_frequent_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); + static void get_cached_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** stack_traces, + jint* trace_counter_ptr); static void get_sampling_statistics(jvmtiHeapSamplingStats* stats); static void stop_profiling(); // Called when o is to be sampled from a given thread and a given size. - static void object_alloc_do_sample(Thread* t, oopDesc* o, intx size_in_bytes); + static void object_alloc_do_sample(Thread* t, oopDesc* o, size_t size_in_bytes); // Called to clean up oops that have been saved by our sampling function, // but which no longer have other references in the heap. diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -678,7 +678,7 @@ static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); } TLAB_FIELD_OFFSET(start) - TLAB_FIELD_OFFSET(end) + TLAB_FIELD_OFFSET(current_end) TLAB_FIELD_OFFSET(top) TLAB_FIELD_OFFSET(pf_top) TLAB_FIELD_OFFSET(size) // desired_size diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp +++ b/src/hotspot/share/runtime/vmStructs.cpp @@ -533,7 +533,7 @@ \ nonstatic_field(ThreadLocalAllocBuffer, _start, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _top, HeapWord*) \ - nonstatic_field(ThreadLocalAllocBuffer, _end, HeapWord*) \ + nonstatic_field(ThreadLocalAllocBuffer, _current_end, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _pf_top, HeapWord*) \ nonstatic_field(ThreadLocalAllocBuffer, _desired_size, size_t) \ nonstatic_field(ThreadLocalAllocBuffer, _refill_waste_limit, size_t) \ diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorStackDepthTest.java @@ -53,8 +53,8 @@ int depth = depths[depthIdx]; HeapMonitor.enableSampling(); - // Do the runner 10 times to ensure the stack is really sampled. - runner(10, depth); + // Do the runner 3 times to ensure the stack is really sampled. + runner(3, depth); // baseDepth represents the helper method depth: main, runner, HeapMonitor.allocate, // and HeapMonitor.actuallyAllocate. @@ -65,7 +65,7 @@ // 3% error should be close enough. if (errorPercentage > 3) { - throw new RuntimeException("Stack depth average over 5% for depth " + depth + " : " + averageDepth + " , error: " + errorPercentage); + throw new RuntimeException("Stack depth average over 3% for depth " + depth + " : " + averageDepth + " , error: " + errorPercentage); } HeapMonitor.disableSampling(); @@ -75,8 +75,8 @@ // Last test is 1024, which is the current maximum. HeapMonitor.enableSampling(); final int maximumDepth = 1024; - // Do the runner 10 times to ensure the stack is really sampled. - runner(10, maximumDepth); + // Do the runner 3 times to ensure the stack is really sampled. + runner(3, maximumDepth); // Because of the extra frames, we should be at (maximumDepth + a few frames). Due to the // maximum depth allowed, we hit it and so should still be at an average of 1024. double averageDepth = getAverageStackDepth(); @@ -85,7 +85,7 @@ // 3% error should be close enough. if (errorPercentage > 3) { - throw new RuntimeException("Stack depth average over 5% for depth 1024 : " + averageDepth + " , error: " + errorPercentage); + throw new RuntimeException("Stack depth average over 3% for depth 1024 : " + averageDepth + " , error: " + errorPercentage); } } } diff --git a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c --- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c +++ b/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/libHeapMonitor.c @@ -178,20 +178,24 @@ } ExpectedContentFrame; static jboolean check_sample_content(JNIEnv *env, - jvmtiStackTrace *trace, + jvmtiAllocTraceInfo* trace, ExpectedContentFrame *expected, int expected_count, int print_out_comparisons) { int i; - if (expected_count > trace->frame_count) { + jvmtiStackInfo* stack_info = trace->stack_info; + + if (expected_count > stack_info->frame_count) { return FALSE; } + jvmtiFrameInfo* frames = stack_info->frame_buffer; + for (i = 0; i < expected_count; i++) { // Get basic information out of the trace. - int bci = trace->frames[i].location; - jmethodID methodid = trace->frames[i].method; + int bci = frames[i].location; + jmethodID methodid = frames[i].method; char *name = NULL, *signature = NULL, *file_name = NULL; if (bci < 0) { @@ -228,12 +232,12 @@ } if (print_out_comparisons) { - fprintf(stderr, "Comparing:\n"); - fprintf(stderr, "\tNames: %s and %s\n", name, expected[i].name); - fprintf(stderr, "\tSignatures: %s and %s\n", signature, expected[i].signature); - fprintf(stderr, "\tFile name: %s and %s\n", file_name, expected[i].file_name); - fprintf(stderr, "\tLines: %d and %d\n", line_number, expected[i].line_number); - fprintf(stderr, "\tResult is %d\n", + fprintf(stderr, "\tComparing:\n"); + fprintf(stderr, "\t\tNames: %s and %s\n", name, expected[i].name); + fprintf(stderr, "\t\tSignatures: %s and %s\n", signature, expected[i].signature); + fprintf(stderr, "\t\tFile name: %s and %s\n", file_name, expected[i].file_name); + fprintf(stderr, "\t\tLines: %d and %d\n", line_number, expected[i].line_number); + fprintf(stderr, "\t\tResult is %d\n", (strcmp(name, expected[i].name) || strcmp(signature, expected[i].signature) || strcmp(file_name, expected[i].file_name) || @@ -251,7 +255,7 @@ return TRUE; } -static jboolean compare_samples(JNIEnv* env, jvmtiStackTrace* traces, +static jboolean compare_samples(JNIEnv* env, jvmtiAllocTraceInfo* traces, int trace_count, ExpectedContentFrame* expected_content, size_t size, @@ -259,8 +263,12 @@ // We expect the code to record correctly the bci, retrieve the line // number, have the right method and the class name of the first frames. int i; + if (print_out_comparisons) { + fprintf(stderr, "\tNumber of traces: %d\n", print_out_comparisons); + } + for (i = 0; i < trace_count; i++) { - jvmtiStackTrace *trace = traces + i; + jvmtiAllocTraceInfo* trace = traces + i; if (check_sample_content(env, trace, expected_content, size, print_out_comparisons)) { // At least one frame matched what we were looking for. @@ -275,18 +283,23 @@ check_samples(JNIEnv* env, ExpectedContentFrame* expected, size_t size, - jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*), + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiAllocTraceInfo**, jint*), int print_out_comparisons) { - jvmtiStackTraces traces; - jvmtiError error = get_traces(jvmti, &traces); + jvmtiAllocTraceInfo *traces; + jint trace_counter; + jvmtiError error = get_traces(jvmti, &traces, &trace_counter); if (error != JVMTI_ERROR_NONE) { return FALSE; } - int result = compare_samples(env, traces.stack_traces, traces.trace_count, + int result = compare_samples(env, traces, trace_counter, expected, size, print_out_comparisons); - (*jvmti)->ReleaseTraces(jvmti, &traces); + + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + return result; } @@ -294,7 +307,7 @@ ExpectedContentFrame* expected, size_t size, int print_out_comparisons) { - return check_samples(env, expected, size, (*jvmti)->GetLiveTraces, + return check_samples(env, expected, size, (*jvmti)->GetObjectAllocTraces, print_out_comparisons); } @@ -538,28 +551,28 @@ return FALSE; } - if (check_capability_error((*jvmti)->ReleaseTraces(jvmti, NULL), - "Release Traces")) { - return FALSE; - } - if (check_capability_error((*jvmti)->GetHeapSamplingStats(jvmti, NULL), "Get Heap Sampling Stats")) { return FALSE; } - if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL), + if (check_capability_error((*jvmti)->GetGarbageTraces(jvmti, NULL, NULL), "Get Garbage Traces")) { return FALSE; } - if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL), + if (check_capability_error((*jvmti)->GetFrequentGarbageTraces(jvmti, NULL, NULL), "Get Frequent Garbage Traces")) { return FALSE; } - if (check_capability_error((*jvmti)->GetLiveTraces(jvmti, NULL), - "Get Live Traces")) { + if (check_capability_error((*jvmti)->GetObjectAllocTraces(jvmti, NULL, NULL), + "Get Object Allocated Traces")) { + return FALSE; + } + + if (check_capability_error((*jvmti)->GetObjectAllocTraces(jvmti, NULL, NULL), + "Get Cached Object Allocated Traces")) { return FALSE; } return TRUE; @@ -594,30 +607,29 @@ } static double calculate_average_stack_depth( - jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiStackTraces*)) { - jvmtiStackTraces traces; + jvmtiError (*const get_traces)(jvmtiEnv*, jvmtiAllocTraceInfo**, jint*)) { + jvmtiAllocTraceInfo* traces = NULL; + jint trace_counter; - jvmtiError error = get_traces(jvmti, &traces);; + jvmtiError error = get_traces(jvmti, &traces, &trace_counter);; if (error != JVMTI_ERROR_NONE) { return 0; } - int trace_count = traces.trace_count; - - if (trace_count == 0) { + if (trace_counter == 0) { return 0; } int i; - jvmtiStackTrace* stack_traces = traces.stack_traces; double sum = 0; - for (i = 0; i < trace_count; i++) { - jvmtiStackTrace *stack_trace = stack_traces + i; - sum += stack_trace->frame_count; + for (i = 0; i < trace_counter; i++) { + jvmtiAllocTraceInfo* trace = traces + i; + jvmtiStackInfo* stack_info = trace->stack_info; + sum += stack_info->frame_count; } - if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { return 0; } @@ -627,7 +639,7 @@ JNIEXPORT jdouble JNICALL Java_MyPackage_HeapMonitorStackDepthTest_getAverageStackDepth(JNIEnv *env, jclass cls) { - double result = calculate_average_stack_depth((*jvmti)->GetLiveTraces); + double result = calculate_average_stack_depth((*jvmti)->GetObjectAllocTraces); if (result != 0) { return result; @@ -639,26 +651,29 @@ } typedef struct sThreadsFound { - jint *threads; + jint* threads; int num_threads; } ThreadsFound; -static void find_threads_in_traces(jvmtiStackTraces* traces, +static void find_threads_in_traces(jvmtiAllocTraceInfo* traces, + jint trace_counter, ThreadsFound* thread_data) { int i; - jvmtiStackTrace* stack_traces = traces->stack_traces; - int trace_count = traces->trace_count; - - jint *threads = thread_data->threads; + jint* threads = thread_data->threads; int num_threads = thread_data->num_threads; // We are looking for at last expected_num_threads different traces. - for (i = 0; i < trace_count; i++) { - jvmtiStackTrace *stack_trace = stack_traces + i; - jlong thread_id = stack_trace->thread_id; + for (i = 0; i < trace_counter; i++) { + jvmtiAllocTraceInfo* trace = traces + i; + jvmtiStackInfo* stack_info = trace->stack_info; + jint thread_id = trace->thread_id; // Check it is the right frame: only accept helper top framed traces. - jmethodID methodid = stack_trace->frames[0].method; + if (stack_info->frame_count == 0) { + continue; + } + + jmethodID methodid = stack_info->frame_buffer[0].method; char *name = NULL, *signature = NULL, *file_name = NULL; (*jvmti)->GetMethodName(jvmti, methodid, &name, &signature, 0); @@ -687,30 +702,32 @@ JNIEXPORT jboolean JNICALL Java_MyPackage_HeapMonitorThreadTest_checkSamples(JNIEnv* env, jclass cls, jintArray threads) { - jvmtiStackTraces traces; + jvmtiAllocTraceInfo* traces; + jint trace_counter; + ThreadsFound thread_data; thread_data.threads = (*env)->GetIntArrayElements(env, threads, 0); thread_data.num_threads = 0; // Get live and garbage traces to ensure we capture all the threads that have // been sampled. - if ((*jvmti)->GetLiveTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->GetObjectAllocTraces(jvmti, &traces, &trace_counter) != JVMTI_ERROR_NONE) { return FALSE; } - find_threads_in_traces(&traces, &thread_data); + find_threads_in_traces(traces, trace_counter, &thread_data); - if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { return FALSE; } - if ((*jvmti)->GetGarbageTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->GetGarbageTraces(jvmti, &traces, &trace_counter) != JVMTI_ERROR_NONE) { return FALSE; } - find_threads_in_traces(&traces, &thread_data); + find_threads_in_traces(traces, trace_counter, &thread_data); - if ((*jvmti)->ReleaseTraces(jvmti, &traces) != JVMTI_ERROR_NONE) { + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) traces) != JVMTI_ERROR_NONE) { return FALSE; } @@ -721,30 +738,37 @@ JNIEXPORT void JNICALL Java_MyPackage_HeapMonitorCachedTest_getLiveTracesToForceGc(JNIEnv *env, jclass cls) { - jvmtiStackTraces live_traces; - jvmtiError error = (*jvmti)->GetLiveTraces(jvmti, &live_traces); + jvmtiAllocTraceInfo* traces; + jint trace_counter; + + jvmtiError error = (*jvmti)->GetObjectAllocTraces(jvmti, &traces, + &trace_counter); if (error != JVMTI_ERROR_NONE) { return; } - (*jvmti)->ReleaseTraces(jvmti, &live_traces); + (*jvmti)->Deallocate(jvmti, (unsigned char*) traces); } -static jboolean compare_traces(jvmtiStackTraces* traces, - jvmtiStackTraces* other_traces, +static jboolean compare_traces(jvmtiAllocTraceInfo* traces, + int trace_count, + jvmtiAllocTraceInfo* other_traces, + int other_trace_count, int print_out_comparisons) { - int trace_count = traces->trace_count; - if (trace_count != other_traces->trace_count) { + if (trace_count != other_trace_count) { return FALSE; } int i; for (i = 0; i < trace_count; i++) { - jvmtiStackTrace* trace = traces->stack_traces + i; - jvmtiStackTrace* other_trace = other_traces->stack_traces + i; + jvmtiAllocTraceInfo* trace = traces + i; + jvmtiAllocTraceInfo* other_trace = other_traces + i; - if (trace->frame_count != other_trace->frame_count) { + jvmtiStackInfo* stack_info = trace->stack_info; + jvmtiStackInfo* other_stack_info = trace->stack_info; + + if (stack_info->frame_count != other_stack_info->frame_count) { return FALSE; } @@ -756,9 +780,9 @@ return FALSE; } - jvmtiFrameInfo* frames = trace->frames; - jvmtiFrameInfo* other_frames = other_trace->frames; - if (memcmp(frames, other_frames, sizeof(*frames) * trace->frame_count)) { + jvmtiFrameInfo* frames = stack_info->frame_buffer; + jvmtiFrameInfo* other_frames = other_stack_info->frame_buffer; + if (memcmp(frames, other_frames, sizeof(*frames) * stack_info->frame_count)) { return FALSE; } } @@ -770,24 +794,34 @@ Java_MyPackage_HeapMonitorCachedTest_cachedAndLiveAreSame(JNIEnv *env, jclass cls) { // Get cached first, then get live (since live performs a GC). - jvmtiStackTraces cached_traces; - jvmtiError error = (*jvmti)->GetCachedTraces(jvmti, &cached_traces); + jvmtiAllocTraceInfo* cached_traces; + jint cached_trace_counter; + jvmtiError error = (*jvmti)->GetCachedObjectAllocTraces(jvmti, &cached_traces, + &cached_trace_counter); if (error != JVMTI_ERROR_NONE) { return FALSE; } - jvmtiStackTraces live_traces; - error = (*jvmti)->GetLiveTraces(jvmti, &live_traces); + jvmtiAllocTraceInfo* live_traces; + jint live_trace_counter; + error = (*jvmti)->GetObjectAllocTraces(jvmti, &live_traces, + &live_trace_counter); if (error != JVMTI_ERROR_NONE) { return FALSE; } - int result = compare_traces(&cached_traces, &live_traces, PRINT_OUT); + int result = compare_traces(cached_traces, cached_trace_counter, + live_traces, live_trace_counter, + PRINT_OUT); - (*jvmti)->ReleaseTraces(jvmti, &cached_traces); - (*jvmti)->ReleaseTraces(jvmti, &live_traces); + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) cached_traces) != JVMTI_ERROR_NONE) { + return FALSE; + } + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) live_traces) != JVMTI_ERROR_NONE) { + return FALSE; + } return result; } @@ -795,21 +829,22 @@ return hash_code * 31 + value; } -static long get_hash_code(jvmtiStackTraces* traces) { - int trace_count = traces->trace_count; +static long get_hash_code(jvmtiAllocTraceInfo* traces, jint trace_counter) { int hash_code = 17; + int i, j; - int i; - hash_code = hash(hash_code, trace_count); - for (i = 0; i < trace_count; i++) { - jvmtiStackTrace* trace = traces->stack_traces + i; - hash_code = hash(hash_code, trace->frame_count); + hash_code = hash(hash_code, trace_counter); + for (i = 0; i < trace_counter; i++) { + jvmtiAllocTraceInfo* trace = traces + i; + hash_code = hash(hash_code, trace->size); hash_code = hash(hash_code, trace->thread_id); - int j; - int frame_count = trace->frame_count; - jvmtiFrameInfo* frames = trace->frames; + jvmtiStackInfo* stack_info = trace->stack_info; + hash_code = hash(hash_code, stack_info->frame_count); + + int frame_count = stack_info->frame_count; + jvmtiFrameInfo* frames = stack_info->frame_buffer; hash_code = hash(hash_code, frame_count); for (j = 0; j < frame_count; j++) { hash_code = hash(hash_code, (long) frames[i].method); @@ -824,15 +859,20 @@ Java_MyPackage_HeapMonitorCachedTest_getCachedHashCode(JNIEnv *env, jclass cls) { // Get cached first, then get live. - jvmtiStackTraces cached_traces; - jvmtiError error = (*jvmti)->GetCachedTraces(jvmti, &cached_traces); + jvmtiAllocTraceInfo* cached_traces; + jint cached_trace_counter; + jvmtiError error = (*jvmti)->GetCachedObjectAllocTraces(jvmti, &cached_traces, + &cached_trace_counter); if (error != JVMTI_ERROR_NONE) { return 0; } - long hash_code = get_hash_code(&cached_traces); - (*jvmti)->ReleaseTraces(jvmti, &cached_traces); + long hash_code = get_hash_code(cached_traces, cached_trace_counter); + + if ((*jvmti)->Deallocate(jvmti, (unsigned char*) cached_traces) != JVMTI_ERROR_NONE) { + return FALSE; + } return hash_code; }