--- old/src/hotspot/share/runtime/heapMonitoring.cpp 2018-02-12 20:05:12.179739036 -0800 +++ new/src/hotspot/share/runtime/heapMonitoring.cpp 2018-02-12 20:05:11.915740053 -0800 @@ -26,6 +26,7 @@ #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" +#include "prims/jvmtiEnvBase.hpp" #include "runtime/heapMonitoring.hpp" #include "runtime/orderAccess.inline.hpp" #include "runtime/vframe.hpp" @@ -35,17 +36,17 @@ // Internal data structure representing traces, used when object has been GC'd. class StackTraceData : public CHeapObj { private: - jvmtiStackTrace* _trace; + jvmtiAllocTraceInfo* _trace; int _references; public: - StackTraceData(jvmtiStackTrace* t) : _trace(t), _references(0) {} + StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {} void increment_reference_count() { _references++; } - jvmtiStackTrace* get_trace() const { + jvmtiAllocTraceInfo* get_trace() const { return _trace; } @@ -57,7 +58,9 @@ data->_references--; if (data->_references == 0) { if (data->_trace != NULL) { - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->_trace->frames); + jvmtiStackInfo* stack_info = data->_trace->stack_info; + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer); + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(data->_trace); } delete data; @@ -73,7 +76,7 @@ oop _obj; public: - StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) { + StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) { store_oop(o); } @@ -218,23 +221,31 @@ public: // The function that gets called to add a trace to the list of // traces we are maintaining. - void add_trace(jvmtiStackTrace* trace, oop o); + void add_trace(jvmtiAllocTraceInfo* trace, oop o); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_all_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_all_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_garbage_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_frequent_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // The function that gets called by the client to retrieve the list - // of stack traces. Passes a jvmtiStackTraces which will get mutated. - void get_cached_stack_traces(jvmtiStackTraces* traces); + // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. + void get_cached_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); // Executes whenever weak references are traversed. is_alive tells // you if the given oop is still reachable and live. @@ -271,6 +282,7 @@ bool initialized() { return OrderAccess::load_acquire(&_initialized) != 0; + return _initialized; } private: @@ -326,12 +338,11 @@ int _size; }; - // Copies from StackTraceData to jvmtiStackTrace. - bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from); - // Creates a deep copy of the list of StackTraceData. - void copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces* traces); + void copy_stack_traces(JvmtiEnv* env, + const StackTraceDataCopier &copier, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr); void store_garbage_trace(const StackTraceDataWithOop &trace); @@ -340,6 +351,20 @@ void reset(); void allocate_storage(int max_gc_storage); + + int calculate_frame_count(const StackTraceDataCopier &copier); + int calculate_info_count(const StackTraceDataCopier &copier); + + bool copy_frame(const StackTraceData* stack_trace_data, + jvmtiAllocTraceInfo* current_alloc_traces, + jvmtiStackInfo* current_stack_info, + jvmtiFrameInfo* current_frame_info); + + // Returns frame copy success. Failure can result when there is no longer + // enough memory. + bool copy_frames(const StackTraceDataCopier& copier, int info_count, + unsigned char* start, + unsigned char* end); }; StackTraceStorage* StackTraceStorage::internal_storage; @@ -353,10 +378,15 @@ uint64_t HeapMonitoring::_rnd; StackTraceStorage::StackTraceStorage() { + MutexLocker mu(HeapMonitorStorage_lock); reset(); } void StackTraceStorage::reset() { + assert(HeapMonitorStorage_lock->owned_by_self() + || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), + "This should not be accessed concurrently"); + _allocated_traces = NULL; _traces_on_last_full_gc = NULL; _recent_garbage_traces = NULL; @@ -415,6 +445,10 @@ } void StackTraceStorage::allocate_storage(int max_gc_storage) { + assert(HeapMonitorStorage_lock->owned_by_self() + || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), + "This should not be accessed concurrently"); + // In case multiple threads got locked and then 1 by 1 got through. if (initialized()) { return; @@ -433,7 +467,7 @@ OrderAccess::release_store(&_initialized, 1); } -void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) { +void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); // Last minute check on initialization here in case: // Between the moment object_alloc_do_sample's check for initialization @@ -441,7 +475,7 @@ if (initialized()) { StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; - _stats.stack_depth_accumulation += trace->frame_count; + _stats.stack_depth_accumulation += trace->stack_info->frame_count; _allocated_traces->append(new_data); } } @@ -491,112 +525,201 @@ log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count); } -bool StackTraceStorage::deep_copy(jvmtiStackTrace* to, - const StackTraceData* from) { - const jvmtiStackTrace* src = from->get_trace(); - *to = *src; - - to->frames = - NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); - - if (to->frames == NULL) { - return false; - } - - memcpy(to->frames, - src->frames, - sizeof(jvmtiFrameInfo) * src->frame_count); - return true; -} - // Called by the outside world; returns a copy of the stack traces // (because we could be replacing them as the user handles them). // The array is secretly null-terminated (to make it easier to reclaim). -void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_all_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_allocated_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_allocated_traces); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces -void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_garbage_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_recent_garbage_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(), _recent_garbage_traces->size()); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces void StackTraceStorage::get_frequent_garbage_stack_traces( - jvmtiStackTraces* traces) { + JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_frequent_garbage_traces) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(), _frequent_garbage_traces->size()); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } // See comment on get_all_stack_traces -void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) { +void StackTraceStorage::get_cached_stack_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { MutexLocker mu(HeapMonitorStorage_lock); if (!_traces_on_last_full_gc) { - traces->stack_traces = NULL; - traces->trace_count = 0; + *traces = NULL; + *trace_counter_ptr = 0; return; } LiveStackTraceDataCopier copier(_traces_on_last_full_gc); - copy_stack_traces(copier, traces); + copy_stack_traces(env, copier, traces, trace_counter_ptr); } -void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, - jvmtiStackTraces* traces) { +int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) { int len = copier.size(); - // Create a new array to store the StackTraceData objects. - // + 1 for a NULL at the end. - jvmtiStackTrace* t = - NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal); - if (t == NULL) { - traces->stack_traces = NULL; - traces->trace_count = 0; - return; + // Walk the traces first to find the size of the frames as well. + int frame_total = 0; + + for (int i = 0; i < len; i++) { + const StackTraceData* stack_trace = copier.get(i); + + if (stack_trace != NULL) { + jvmtiAllocTraceInfo* trace = stack_trace->get_trace(); + jvmtiStackInfo* stack_info = trace->stack_info; + frame_total += stack_info->frame_count; + } } - // +1 to have a NULL at the end of the array. - memset(t, 0, (len + 1) * sizeof(*t)); - // Copy the StackTraceData objects into the new array. - int trace_count = 0; + return frame_total; +} + +int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) { + int len = copier.size(); + + int info_total = 0; + for (int i = 0; i < len; i++) { const StackTraceData* stack_trace = copier.get(i); + if (stack_trace != NULL) { - jvmtiStackTrace* to = &t[trace_count]; - if (!deep_copy(to, stack_trace)) { - continue; + // TODO: merge this with the method above. + info_total++; + } + } + + return info_total; +} + +// Method to test if the data structure would fit between the src address and +// the end address. +template +static bool next_ptr_less_or_equal(T src, U* end) { + return (src + 1) <= reinterpret_cast(end); +} + +bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data, + jvmtiAllocTraceInfo* current_alloc_trace, + jvmtiStackInfo* current_stack_info, + jvmtiFrameInfo* current_frame_info) { + jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace(); + jvmtiStackInfo* stack_info = trace->stack_info; + int frame_count = stack_info->frame_count; + + memcpy(current_alloc_trace, trace, sizeof(*trace)); + + current_alloc_trace->stack_info = current_stack_info; + memcpy(current_stack_info, stack_info, sizeof(*stack_info)); + + current_stack_info->frame_buffer = current_frame_info; + memcpy(current_frame_info, stack_info->frame_buffer, + sizeof(jvmtiFrameInfo) * frame_count); + return true; +} + +bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier, + int info_count, + unsigned char* start, + unsigned char* end) { + jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast(start); + jvmtiStackInfo* start_stack_info = reinterpret_cast(start_alloc_trace + info_count); + jvmtiFrameInfo* start_frame_info = reinterpret_cast(start_stack_info + info_count); + + jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace; + jvmtiStackInfo* current_stack_info = start_stack_info; + jvmtiFrameInfo* current_frame_info = start_frame_info; + + for (int i = 0; i < info_count; i++) { + assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info), + "jvmtiAllocTraceInfo would write over jvmtiStackInfos."); + assert(next_ptr_less_or_equal(current_stack_info, start_frame_info), + "jvmtiStackInfo would write over jvmtiFrameInfos."); + + assert(next_ptr_less_or_equal(current_frame_info, end), + "jvmtiFrameInfo would write over the end of the buffer."); + + const StackTraceData* stack_trace_data = copier.get(i); + if (stack_trace_data != NULL) { + if (!copy_frame(stack_trace_data, current_alloc_trace, + current_stack_info, current_frame_info)) { + return false; } - trace_count++; + + current_frame_info += current_stack_info->frame_count; + current_stack_info++; + current_alloc_trace++; } } - traces->stack_traces = t; - traces->trace_count = trace_count; + return true; +} + +void StackTraceStorage::copy_stack_traces(JvmtiEnv* env, + const StackTraceDataCopier& copier, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + *traces = NULL; + *trace_counter_ptr = 0; + + int frame_total = calculate_frame_count(copier); + int len = calculate_info_count(copier); + + // Allocate the whole stacktraces in one bloc to simplify freeing. + size_t total_size = len * sizeof(jvmtiAllocTraceInfo) + + len * sizeof(jvmtiStackInfo) + + frame_total * sizeof(jvmtiFrameInfo); + + unsigned char* buffer = NULL; + jvmtiAllocTraceInfo* result = NULL; + JvmtiEnvBase* env_base = reinterpret_cast(env); + env_base->allocate(total_size, &buffer); + + if (buffer == NULL) { + return; + } + + bool success = copy_frames(copier, len, buffer, buffer + total_size); + + if (!success) { + env_base->deallocate(buffer); + return; + } + + *trace_counter_ptr = len; + *traces = reinterpret_cast(buffer); } void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) { @@ -615,8 +738,12 @@ _stats.garbage_collected_samples++; } -void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_all_stack_traces(traces); +void HeapMonitoring::get_live_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_all_stack_traces(env, + traces, + trace_counter_ptr); } void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) { @@ -625,30 +752,27 @@ *stats = internal_stats; } -void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces); -} - -void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_garbage_stack_traces(traces); -} - -void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) { - StackTraceStorage::storage()->get_cached_stack_traces(traces); -} - -void HeapMonitoring::release_traces(jvmtiStackTraces* traces) { - jint trace_count = traces->trace_count; - jvmtiStackTrace* stack_traces = traces->stack_traces; - - for (jint i = 0; i < trace_count; i++) { - jvmtiStackTrace* current_trace = stack_traces + i; - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames); - } - - FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces); - traces->trace_count = 0; - traces->stack_traces = NULL; +void HeapMonitoring::get_frequent_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_frequent_garbage_stack_traces( + env, traces, trace_counter_ptr); +} + +void HeapMonitoring::get_garbage_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_garbage_stack_traces(env, + traces, + trace_counter_ptr); +} + +void HeapMonitoring::get_cached_traces(JvmtiEnv* env, + jvmtiAllocTraceInfo** traces, + jint* trace_counter_ptr) { + StackTraceStorage::storage()->get_cached_stack_traces(env, + traces, + trace_counter_ptr); } // Invoked by the GC to clean up old stack traces and remove old arrays @@ -731,29 +855,37 @@ StackTraceStorage::storage()->accumulate_sample_rate(rate); } -void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) { +void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) { JavaThread* thread = static_cast(t); if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); JavaThread* thread = static_cast(t); - jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal); + jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal); + if (trace == NULL) { + return; + } + + jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal); if (trace == NULL) { + FREE_C_HEAP_OBJ(trace); return; } + trace->stack_info = stack_info; jvmtiFrameInfo* frames = NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); if (frames == NULL) { + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); return; } + stack_info->frame_buffer = frames; + stack_info->frame_count = 0; - trace->frames = frames; trace->thread_id = SharedRuntime::get_java_tid(thread); trace->size = byte_size; - trace->frame_count = 0; if (thread->has_last_Java_frame()) { // just to be safe vframeStream vfst(thread, true); @@ -766,17 +898,18 @@ vfst.next(); } - trace->frame_count = count; + stack_info->frame_count = count; } - if (trace->frame_count> 0) { + if (stack_info->frame_count > 0) { // Success! StackTraceStorage::storage()->add_trace(trace, o); return; } // Failure! - FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); + FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); + FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); } }