< prev index next >
src/hotspot/share/runtime/heapMonitoring.cpp
Print this page
rev 48551 : [mq]: heap8
rev 48552 : [mq]: heap10a
rev 48553 : [mq]: heap14_rebased
rev 48555 : [mq]: heap16
rev 48556 : [mq]: heap17
rev 48557 : [mq]: heap17
rev 48558 : [mq]: heap19
rev 48559 : [mq]: heap20
rev 48560 : [mq]: heap21
rev 48562 : [mq]: heap23
@@ -24,30 +24,31 @@
#include "precompiled.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
+#include "prims/jvmtiEnvBase.hpp"
#include "runtime/heapMonitoring.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vframe.hpp"
static const int MaxStackDepth = 1024;
// Internal data structure representing traces, used when object has been GC'd.
class StackTraceData : public CHeapObj<mtInternal> {
private:
- jvmtiStackTrace* _trace;
+ jvmtiAllocTraceInfo* _trace;
int _references;
public:
- StackTraceData(jvmtiStackTrace* t) : _trace(t), _references(0) {}
+ StackTraceData(jvmtiAllocTraceInfo* t) : _trace(t), _references(0) {}
void increment_reference_count() {
_references++;
}
- jvmtiStackTrace* get_trace() const {
+ jvmtiAllocTraceInfo* get_trace() const {
return _trace;
}
static void unreference_and_free(StackTraceData* data) {
if (!data) {
@@ -55,11 +56,13 @@
}
data->_references--;
if (data->_references == 0) {
if (data->_trace != NULL) {
- FREE_C_HEAP_ARRAY(jvmtiFrameInfo, data->_trace->frames);
+ jvmtiStackInfo* stack_info = data->_trace->stack_info;
+ FREE_C_HEAP_ARRAY(jvmtiFrameInfo, stack_info->frame_buffer);
+ FREE_C_HEAP_OBJ(stack_info);
FREE_C_HEAP_OBJ(data->_trace);
}
delete data;
}
}
@@ -71,11 +74,11 @@
class StackTraceDataWithOop : public StackTraceData {
private:
oop _obj;
public:
- StackTraceDataWithOop(jvmtiStackTrace* t, oop o) : StackTraceData(t) {
+ StackTraceDataWithOop(jvmtiAllocTraceInfo* t, oop o) : StackTraceData(t) {
store_oop(o);
}
StackTraceDataWithOop() : StackTraceData(NULL), _obj(NULL) {
}
@@ -216,27 +219,35 @@
// Each object that we profile is stored as trace with the thread_id.
class StackTraceStorage : public CHeapObj<mtInternal> {
public:
// The function that gets called to add a trace to the list of
// traces we are maintaining.
- void add_trace(jvmtiStackTrace* trace, oop o);
+ void add_trace(jvmtiAllocTraceInfo* trace, oop o);
// The function that gets called by the client to retrieve the list
- // of stack traces. Passes a jvmtiStackTraces which will get mutated.
- void get_all_stack_traces(jvmtiStackTraces* traces);
+ // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
+ void get_all_stack_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr);
// The function that gets called by the client to retrieve the list
- // of stack traces. Passes a jvmtiStackTraces which will get mutated.
- void get_garbage_stack_traces(jvmtiStackTraces* traces);
+ // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
+ void get_garbage_stack_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr);
// The function that gets called by the client to retrieve the list
- // of stack traces. Passes a jvmtiStackTraces which will get mutated.
- void get_frequent_garbage_stack_traces(jvmtiStackTraces* traces);
+ // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
+ void get_frequent_garbage_stack_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr);
// The function that gets called by the client to retrieve the list
- // of stack traces. Passes a jvmtiStackTraces which will get mutated.
- void get_cached_stack_traces(jvmtiStackTraces* traces);
+ // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated.
+ void get_cached_stack_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr);
// Executes whenever weak references are traversed. is_alive tells
// you if the given oop is still reachable and live.
void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
@@ -269,10 +280,11 @@
_stats.sample_rate_count++;
}
bool initialized() {
return OrderAccess::load_acquire(&_initialized) != 0;
+ return _initialized;
}
private:
// The traces currently sampled.
GrowableArray<StackTraceDataWithOop>* _allocated_traces;
@@ -324,24 +336,37 @@
private:
StackTraceData** _data;
int _size;
};
- // Copies from StackTraceData to jvmtiStackTrace.
- bool deep_copy(jvmtiStackTrace* to, const StackTraceData* from);
-
// Creates a deep copy of the list of StackTraceData.
- void copy_stack_traces(const StackTraceDataCopier &copier,
- jvmtiStackTraces* traces);
+ void copy_stack_traces(JvmtiEnv* env,
+ const StackTraceDataCopier &copier,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr);
void store_garbage_trace(const StackTraceDataWithOop &trace);
void free_garbage();
void free_storage();
void reset();
void allocate_storage(int max_gc_storage);
+
+ int calculate_frame_count(const StackTraceDataCopier &copier);
+ int calculate_info_count(const StackTraceDataCopier &copier);
+
+ bool copy_frame(const StackTraceData* stack_trace_data,
+ jvmtiAllocTraceInfo* current_alloc_traces,
+ jvmtiStackInfo* current_stack_info,
+ jvmtiFrameInfo* current_frame_info);
+
+ // Returns frame copy success. Failure can result when there is no longer
+ // enough memory.
+ bool copy_frames(const StackTraceDataCopier& copier, int info_count,
+ unsigned char* start,
+ unsigned char* end);
};
StackTraceStorage* StackTraceStorage::internal_storage;
// Statics for Sampler
@@ -351,14 +376,19 @@
// Cheap random number generator
uint64_t HeapMonitoring::_rnd;
StackTraceStorage::StackTraceStorage() {
+ MutexLocker mu(HeapMonitorStorage_lock);
reset();
}
void StackTraceStorage::reset() {
+ assert(HeapMonitorStorage_lock->owned_by_self()
+ || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
+ "This should not be accessed concurrently");
+
_allocated_traces = NULL;
_traces_on_last_full_gc = NULL;
_recent_garbage_traces = NULL;
_frequent_garbage_traces = NULL;
_max_gc_storage = 0;
@@ -413,10 +443,14 @@
MutexLocker mu(HeapMonitorStorage_lock);
free_storage();
}
void StackTraceStorage::allocate_storage(int max_gc_storage) {
+ assert(HeapMonitorStorage_lock->owned_by_self()
+ || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
+ "This should not be accessed concurrently");
+
// In case multiple threads got locked and then 1 by 1 got through.
if (initialized()) {
return;
}
@@ -431,19 +465,19 @@
_max_gc_storage = max_gc_storage;
memset(&_stats, 0, sizeof(_stats));
OrderAccess::release_store(&_initialized, 1);
}
-void StackTraceStorage::add_trace(jvmtiStackTrace* trace, oop o) {
+void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) {
MutexLocker mu(HeapMonitorStorage_lock);
// Last minute check on initialization here in case:
// Between the moment object_alloc_do_sample's check for initialization
// and now, there was a stop() that deleted the data.
if (initialized()) {
StackTraceDataWithOop new_data(trace, o);
_stats.sample_count++;
- _stats.stack_depth_accumulation += trace->frame_count;
+ _stats.stack_depth_accumulation += trace->stack_info->frame_count;
_allocated_traces->append(new_data);
}
}
void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive,
@@ -489,116 +523,205 @@
}
log_develop_trace(gc, ref)("Clearing HeapMonitoring weak reference (" INT64_FORMAT ")", count);
}
-bool StackTraceStorage::deep_copy(jvmtiStackTrace* to,
- const StackTraceData* from) {
- const jvmtiStackTrace* src = from->get_trace();
- *to = *src;
-
- to->frames =
- NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal);
-
- if (to->frames == NULL) {
- return false;
- }
-
- memcpy(to->frames,
- src->frames,
- sizeof(jvmtiFrameInfo) * src->frame_count);
- return true;
-}
-
// Called by the outside world; returns a copy of the stack traces
// (because we could be replacing them as the user handles them).
// The array is secretly null-terminated (to make it easier to reclaim).
-void StackTraceStorage::get_all_stack_traces(jvmtiStackTraces* traces) {
+void StackTraceStorage::get_all_stack_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
MutexLocker mu(HeapMonitorStorage_lock);
if (!_allocated_traces) {
- traces->stack_traces = NULL;
- traces->trace_count = 0;
+ *traces = NULL;
+ *trace_counter_ptr = 0;
return;
}
LiveStackTraceDataCopier copier(_allocated_traces);
- copy_stack_traces(copier, traces);
+ copy_stack_traces(env, copier, traces, trace_counter_ptr);
}
// See comment on get_all_stack_traces
-void StackTraceStorage::get_garbage_stack_traces(jvmtiStackTraces* traces) {
+void StackTraceStorage::get_garbage_stack_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
MutexLocker mu(HeapMonitorStorage_lock);
if (!_recent_garbage_traces) {
- traces->stack_traces = NULL;
- traces->trace_count = 0;
+ *traces = NULL;
+ *trace_counter_ptr = 0;
return;
}
GarbageStackTraceDataCopier copier(_recent_garbage_traces->get_traces(),
_recent_garbage_traces->size());
- copy_stack_traces(copier, traces);
+ copy_stack_traces(env, copier, traces, trace_counter_ptr);
}
// See comment on get_all_stack_traces
void StackTraceStorage::get_frequent_garbage_stack_traces(
- jvmtiStackTraces* traces) {
+ JvmtiEnv* env, jvmtiAllocTraceInfo** traces, jint* trace_counter_ptr) {
MutexLocker mu(HeapMonitorStorage_lock);
if (!_frequent_garbage_traces) {
- traces->stack_traces = NULL;
- traces->trace_count = 0;
+ *traces = NULL;
+ *trace_counter_ptr = 0;
return;
}
GarbageStackTraceDataCopier copier(_frequent_garbage_traces->get_traces(),
_frequent_garbage_traces->size());
- copy_stack_traces(copier, traces);
+ copy_stack_traces(env, copier, traces, trace_counter_ptr);
}
// See comment on get_all_stack_traces
-void StackTraceStorage::get_cached_stack_traces(jvmtiStackTraces* traces) {
+void StackTraceStorage::get_cached_stack_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
MutexLocker mu(HeapMonitorStorage_lock);
if (!_traces_on_last_full_gc) {
- traces->stack_traces = NULL;
- traces->trace_count = 0;
+ *traces = NULL;
+ *trace_counter_ptr = 0;
return;
}
LiveStackTraceDataCopier copier(_traces_on_last_full_gc);
- copy_stack_traces(copier, traces);
+ copy_stack_traces(env, copier, traces, trace_counter_ptr);
}
-void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
- jvmtiStackTraces* traces) {
+int StackTraceStorage::calculate_frame_count(const StackTraceDataCopier &copier) {
int len = copier.size();
- // Create a new array to store the StackTraceData objects.
- // + 1 for a NULL at the end.
- jvmtiStackTrace* t =
- NEW_C_HEAP_ARRAY(jvmtiStackTrace, len + 1, mtInternal);
- if (t == NULL) {
- traces->stack_traces = NULL;
- traces->trace_count = 0;
- return;
+ // Walk the traces first to find the size of the frames as well.
+ int frame_total = 0;
+
+ for (int i = 0; i < len; i++) {
+ const StackTraceData* stack_trace = copier.get(i);
+
+ if (stack_trace != NULL) {
+ jvmtiAllocTraceInfo* trace = stack_trace->get_trace();
+ jvmtiStackInfo* stack_info = trace->stack_info;
+ frame_total += stack_info->frame_count;
+ }
}
- // +1 to have a NULL at the end of the array.
- memset(t, 0, (len + 1) * sizeof(*t));
- // Copy the StackTraceData objects into the new array.
- int trace_count = 0;
+ return frame_total;
+}
+
+int StackTraceStorage::calculate_info_count(const StackTraceDataCopier &copier) {
+ int len = copier.size();
+
+ int info_total = 0;
+
for (int i = 0; i < len; i++) {
const StackTraceData* stack_trace = copier.get(i);
+
if (stack_trace != NULL) {
- jvmtiStackTrace* to = &t[trace_count];
- if (!deep_copy(to, stack_trace)) {
- continue;
+ // TODO: merge this with the method above.
+ info_total++;
+ }
+ }
+
+ return info_total;
+}
+
+// Method to test if the data structure would fit between the src address and
+// the end address.
+template<typename T, typename U>
+static bool next_ptr_less_or_equal(T src, U* end) {
+ return (src + 1) <= reinterpret_cast<T>(end);
+}
+
+bool StackTraceStorage::copy_frame(const StackTraceData* stack_trace_data,
+ jvmtiAllocTraceInfo* current_alloc_trace,
+ jvmtiStackInfo* current_stack_info,
+ jvmtiFrameInfo* current_frame_info) {
+ jvmtiAllocTraceInfo* trace = stack_trace_data->get_trace();
+ jvmtiStackInfo* stack_info = trace->stack_info;
+ int frame_count = stack_info->frame_count;
+
+ memcpy(current_alloc_trace, trace, sizeof(*trace));
+
+ current_alloc_trace->stack_info = current_stack_info;
+ memcpy(current_stack_info, stack_info, sizeof(*stack_info));
+
+ current_stack_info->frame_buffer = current_frame_info;
+ memcpy(current_frame_info, stack_info->frame_buffer,
+ sizeof(jvmtiFrameInfo) * frame_count);
+ return true;
+}
+
+bool StackTraceStorage::copy_frames(const StackTraceDataCopier& copier,
+ int info_count,
+ unsigned char* start,
+ unsigned char* end) {
+ jvmtiAllocTraceInfo* start_alloc_trace = reinterpret_cast<jvmtiAllocTraceInfo*>(start);
+ jvmtiStackInfo* start_stack_info = reinterpret_cast<jvmtiStackInfo*>(start_alloc_trace + info_count);
+ jvmtiFrameInfo* start_frame_info = reinterpret_cast<jvmtiFrameInfo*>(start_stack_info + info_count);
+
+ jvmtiAllocTraceInfo* current_alloc_trace = start_alloc_trace;
+ jvmtiStackInfo* current_stack_info = start_stack_info;
+ jvmtiFrameInfo* current_frame_info = start_frame_info;
+
+ for (int i = 0; i < info_count; i++) {
+ assert(next_ptr_less_or_equal(current_alloc_trace, start_stack_info),
+ "jvmtiAllocTraceInfo would write over jvmtiStackInfos.");
+ assert(next_ptr_less_or_equal(current_stack_info, start_frame_info),
+ "jvmtiStackInfo would write over jvmtiFrameInfos.");
+
+ assert(next_ptr_less_or_equal(current_frame_info, end),
+ "jvmtiFrameInfo would write over the end of the buffer.");
+
+ const StackTraceData* stack_trace_data = copier.get(i);
+ if (stack_trace_data != NULL) {
+ if (!copy_frame(stack_trace_data, current_alloc_trace,
+ current_stack_info, current_frame_info)) {
+ return false;
}
- trace_count++;
+
+ current_frame_info += current_stack_info->frame_count;
+ current_stack_info++;
+ current_alloc_trace++;
}
}
- traces->stack_traces = t;
- traces->trace_count = trace_count;
+ return true;
+}
+
+void StackTraceStorage::copy_stack_traces(JvmtiEnv* env,
+ const StackTraceDataCopier& copier,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
+ *traces = NULL;
+ *trace_counter_ptr = 0;
+
+ int frame_total = calculate_frame_count(copier);
+ int len = calculate_info_count(copier);
+
+ // Allocate the whole stacktraces in one bloc to simplify freeing.
+ size_t total_size = len * sizeof(jvmtiAllocTraceInfo)
+ + len * sizeof(jvmtiStackInfo)
+ + frame_total * sizeof(jvmtiFrameInfo);
+
+ unsigned char* buffer = NULL;
+ jvmtiAllocTraceInfo* result = NULL;
+ JvmtiEnvBase* env_base = reinterpret_cast<JvmtiEnvBase*>(env);
+ env_base->allocate(total_size, &buffer);
+
+ if (buffer == NULL) {
+ return;
+ }
+
+ bool success = copy_frames(copier, len, buffer, buffer + total_size);
+
+ if (!success) {
+ env_base->deallocate(buffer);
+ return;
+ }
+
+ *trace_counter_ptr = len;
+ *traces = reinterpret_cast<jvmtiAllocTraceInfo*>(buffer);
}
void StackTraceStorage::store_garbage_trace(const StackTraceDataWithOop &trace) {
StackTraceData* new_trace = new StackTraceData(trace.get_trace());
@@ -613,44 +736,45 @@
}
_stats.garbage_collected_samples++;
}
-void HeapMonitoring::get_live_traces(jvmtiStackTraces* traces) {
- StackTraceStorage::storage()->get_all_stack_traces(traces);
+void HeapMonitoring::get_live_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
+ StackTraceStorage::storage()->get_all_stack_traces(env,
+ traces,
+ trace_counter_ptr);
}
void HeapMonitoring::get_sampling_statistics(jvmtiHeapSamplingStats* stats) {
const jvmtiHeapSamplingStats& internal_stats =
StackTraceStorage::storage()->get_heap_sampling_stats();
*stats = internal_stats;
}
-void HeapMonitoring::get_frequent_garbage_traces(jvmtiStackTraces* traces) {
- StackTraceStorage::storage()->get_frequent_garbage_stack_traces(traces);
-}
-
-void HeapMonitoring::get_garbage_traces(jvmtiStackTraces* traces) {
- StackTraceStorage::storage()->get_garbage_stack_traces(traces);
-}
-
-void HeapMonitoring::get_cached_traces(jvmtiStackTraces* traces) {
- StackTraceStorage::storage()->get_cached_stack_traces(traces);
-}
-
-void HeapMonitoring::release_traces(jvmtiStackTraces* traces) {
- jint trace_count = traces->trace_count;
- jvmtiStackTrace* stack_traces = traces->stack_traces;
-
- for (jint i = 0; i < trace_count; i++) {
- jvmtiStackTrace* current_trace = stack_traces + i;
- FREE_C_HEAP_ARRAY(jvmtiFrameInfo, current_trace->frames);
- }
-
- FREE_C_HEAP_ARRAY(jvmtiStackTrace, traces->stack_traces);
- traces->trace_count = 0;
- traces->stack_traces = NULL;
+void HeapMonitoring::get_frequent_garbage_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
+ StackTraceStorage::storage()->get_frequent_garbage_stack_traces(
+ env, traces, trace_counter_ptr);
+}
+
+void HeapMonitoring::get_garbage_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
+ StackTraceStorage::storage()->get_garbage_stack_traces(env,
+ traces,
+ trace_counter_ptr);
+}
+
+void HeapMonitoring::get_cached_traces(JvmtiEnv* env,
+ jvmtiAllocTraceInfo** traces,
+ jint* trace_counter_ptr) {
+ StackTraceStorage::storage()->get_cached_stack_traces(env,
+ traces,
+ trace_counter_ptr);
}
// Invoked by the GC to clean up old stack traces and remove old arrays
// of instrumentation that are still lying around.
void HeapMonitoring::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
@@ -729,33 +853,41 @@
*ptr = rate;
StackTraceStorage::storage()->accumulate_sample_rate(rate);
}
-void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, intx byte_size) {
+void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) {
JavaThread* thread = static_cast<JavaThread*>(t);
if (StackTraceStorage::storage()->initialized()) {
assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
JavaThread* thread = static_cast<JavaThread*>(t);
- jvmtiStackTrace* trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
+ jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal);
+ if (trace == NULL) {
+ return;
+ }
+
+ jvmtiStackInfo* stack_info = NEW_C_HEAP_OBJ(jvmtiStackInfo, mtInternal);
if (trace == NULL) {
+ FREE_C_HEAP_OBJ(trace);
return;
}
+ trace->stack_info = stack_info;
jvmtiFrameInfo* frames =
NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal);
if (frames == NULL) {
+ FREE_C_HEAP_OBJ(stack_info);
FREE_C_HEAP_OBJ(trace);
return;
}
+ stack_info->frame_buffer = frames;
+ stack_info->frame_count = 0;
- trace->frames = frames;
trace->thread_id = SharedRuntime::get_java_tid(thread);
trace->size = byte_size;
- trace->frame_count = 0;
if (thread->has_last_Java_frame()) { // just to be safe
vframeStream vfst(thread, true);
int count = 0;
while (!vfst.at_end() && count < MaxStackDepth) {
@@ -764,19 +896,20 @@
frames[count].method = m->jmethod_id();
count++;
vfst.next();
}
- trace->frame_count = count;
+ stack_info->frame_count = count;
}
- if (trace->frame_count> 0) {
+ if (stack_info->frame_count > 0) {
// Success!
StackTraceStorage::storage()->add_trace(trace, o);
return;
}
// Failure!
- FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
+ FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames);
+ FREE_C_HEAP_OBJ(stack_info);
FREE_C_HEAP_OBJ(trace);
}
}
< prev index next >