< prev index next >
src/hotspot/share/runtime/heapMonitoring.cpp
Print this page
*** 27,37 ****
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "runtime/heapMonitoring.hpp"
#include "runtime/vframe.hpp"
! const int MaxStackDepth = 1024;
// Internal data structure representing traces.
struct StackTraceData : CHeapObj<mtInternal> {
jvmtiStackTrace *trace;
oop obj;
--- 27,37 ----
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "runtime/heapMonitoring.hpp"
#include "runtime/vframe.hpp"
! static const int MaxStackDepth = 1024;
// Internal data structure representing traces.
struct StackTraceData : CHeapObj<mtInternal> {
jvmtiStackTrace *trace;
oop obj;
*** 202,246 ****
~StackTraceStorage();
StackTraceStorage();
static StackTraceStorage* storage() {
! if (internal_storage == NULL) {
! internal_storage = new StackTraceStorage();
! }
! return internal_storage;
! }
!
! static void reset_stack_trace_storage() {
! delete internal_storage;
! internal_storage = NULL;
}
! bool is_initialized() {
! return _initialized;
}
const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
return _stats;
}
- // Static method to set the storage in place at initialization.
- static void initialize_stack_trace_storage(int max_storage) {
- reset_stack_trace_storage();
- StackTraceStorage *storage = StackTraceStorage::storage();
- storage->initialize_storage(max_storage);
- }
-
void accumulate_sample_rate(size_t rate) {
_stats.sample_rate_accumulation += rate;
_stats.sample_rate_count++;
}
bool initialized() { return _initialized; }
volatile bool *initialized_address() { return &_initialized; }
private:
// The traces currently sampled.
GrowableArray<StackTraceData> *_allocated_traces;
// Recent garbage traces.
MostRecentGarbageTraces *_recent_garbage_traces;
--- 202,238 ----
~StackTraceStorage();
StackTraceStorage();
static StackTraceStorage* storage() {
! static StackTraceStorage internal_storage;
! return &internal_storage;
}
! void initialize(int max_storage) {
! MutexLocker mu(HeapMonitor_lock);
! free_storage();
! allocate_storage(max_storage);
! memset(&_stats, 0, sizeof(_stats));
}
const jvmtiHeapSamplingStats& get_heap_sampling_stats() const {
return _stats;
}
void accumulate_sample_rate(size_t rate) {
_stats.sample_rate_accumulation += rate;
_stats.sample_rate_count++;
}
bool initialized() { return _initialized; }
volatile bool *initialized_address() { return &_initialized; }
private:
+ // Protects the traces currently sampled (below).
+ volatile intptr_t _stack_storage_lock[1];
+
// The traces currently sampled.
GrowableArray<StackTraceData> *_allocated_traces;
// Recent garbage traces.
MostRecentGarbageTraces *_recent_garbage_traces;
*** 286,308 ****
private:
StackTraceData **_data;
int _size;
};
- // Instance initialization.
- void initialize_storage(int max_storage);
-
// Copies from StackTraceData to jvmtiStackTrace.
bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
// Creates a deep copy of the list of StackTraceData.
void copy_stack_traces(const StackTraceDataCopier &copier,
jvmtiStackTraces *traces);
void store_garbage_trace(const StackTraceData &trace);
void free_garbage();
};
StackTraceStorage* StackTraceStorage::internal_storage;
// Statics for Sampler
--- 278,299 ----
private:
StackTraceData **_data;
int _size;
};
// Copies from StackTraceData to jvmtiStackTrace.
bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from);
// Creates a deep copy of the list of StackTraceData.
void copy_stack_traces(const StackTraceDataCopier &copier,
jvmtiStackTraces *traces);
void store_garbage_trace(const StackTraceData &trace);
void free_garbage();
+ void free_storage();
+ void allocate_storage(int max_gc_storage);
};
StackTraceStorage* StackTraceStorage::internal_storage;
// Statics for Sampler
*** 318,328 ****
_allocated_traces(NULL),
_recent_garbage_traces(NULL),
_frequent_garbage_traces(NULL),
_max_gc_storage(0),
_initialized(false) {
! memset(&_stats, 0, sizeof(_stats));
}
void StackTraceStorage::free_garbage() {
StackTraceData **recent_garbage = NULL;
uint32_t recent_size = 0;
--- 309,319 ----
_allocated_traces(NULL),
_recent_garbage_traces(NULL),
_frequent_garbage_traces(NULL),
_max_gc_storage(0),
_initialized(false) {
! _stack_storage_lock[0] = 0;
}
void StackTraceStorage::free_garbage() {
StackTraceData **recent_garbage = NULL;
uint32_t recent_size = 0;
*** 364,383 ****
}
}
}
}
! StackTraceStorage::~StackTraceStorage() {
delete _allocated_traces;
free_garbage();
delete _recent_garbage_traces;
delete _frequent_garbage_traces;
_initialized = false;
}
! void StackTraceStorage::initialize_storage(int max_gc_storage) {
// In case multiple threads got locked and then 1 by 1 got through.
if (_initialized) {
return;
}
--- 355,378 ----
}
}
}
}
! void StackTraceStorage::free_storage() {
delete _allocated_traces;
free_garbage();
delete _recent_garbage_traces;
delete _frequent_garbage_traces;
_initialized = false;
}
! StackTraceStorage::~StackTraceStorage() {
! free_storage();
! }
!
! void StackTraceStorage::allocate_storage(int max_gc_storage) {
// In case multiple threads got locked and then 1 by 1 got through.
if (_initialized) {
return;
}
*** 390,409 ****
_max_gc_storage = max_gc_storage;
_initialized = true;
}
void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
StackTraceData new_data(trace, o);
_stats.sample_count++;
_stats.stack_depth_accumulation += trace->frame_count;
_allocated_traces->append(new_data);
}
size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
OopClosure *f) {
size_t count = 0;
! if (is_initialized()) {
int len = _allocated_traces->length();
// Compact the oop traces. Moves the live oops to the beginning of the
// growable array, potentially overwriting the dead ones.
int curr_pos = 0;
--- 385,406 ----
_max_gc_storage = max_gc_storage;
_initialized = true;
}
void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) {
+ MutexLocker mu(HeapMonitor_lock);
StackTraceData new_data(trace, o);
_stats.sample_count++;
_stats.stack_depth_accumulation += trace->frame_count;
_allocated_traces->append(new_data);
}
size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive,
OopClosure *f) {
+ MutexLocker mu(HeapMonitor_lock);
size_t count = 0;
! if (initialized()) {
int len = _allocated_traces->length();
// Compact the oop traces. Moves the live oops to the beginning of the
// growable array, potentially overwriting the dead ones.
int curr_pos = 0;
*** 482,491 ****
--- 479,489 ----
}
void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier,
jvmtiStackTraces *traces) {
+ MutexLocker mu(HeapMonitor_lock);
int len = copier.size();
// Create a new array to store the StackTraceData objects.
// + 1 for a NULL at the end.
jvmtiStackTrace *t =
*** 590,602 ****
return;
}
_monitoring_rate = monitoring_rate;
- // Initalize and reset.
- StackTraceStorage::initialize_stack_trace_storage(max_gc_storage);
-
// Populate the lookup table for fast_log2.
// This approximates the log2 curve with a step function.
// Steps have height equal to log2 of the mid-point of the step.
for (int i = 0; i < (1 << FastLogNumBits); i++) {
double half_way = static_cast<double>(i + 0.5);
--- 588,597 ----
*** 606,615 ****
--- 601,612 ----
JavaThread *t = static_cast<JavaThread *>(Thread::current());
_rnd = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(t));
if (_rnd == 0) {
_rnd = 1;
}
+
+ StackTraceStorage::storage()->initialize(max_gc_storage);
_enabled = true;
}
void HeapMonitoring::stop_profiling() {
_enabled = false;
*** 652,662 ****
}
void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
#if defined(X86) || defined(PPC)
JavaThread *thread = static_cast<JavaThread *>(t);
! if (StackTraceStorage::storage()->is_initialized()) {
assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
JavaThread *thread = static_cast<JavaThread *>(t);
jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
if (trace == NULL) {
--- 649,659 ----
}
void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) {
#if defined(X86) || defined(PPC)
JavaThread *thread = static_cast<JavaThread *>(t);
! if (StackTraceStorage::storage()->initialized()) {
assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
JavaThread *thread = static_cast<JavaThread *>(t);
jvmtiStackTrace *trace = NEW_C_HEAP_OBJ(jvmtiStackTrace, mtInternal);
if (trace == NULL) {
*** 697,712 ****
}
// Failure!
FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames);
FREE_C_HEAP_OBJ(trace);
- return;
- } else {
- // There is something like 64K worth of allocation before the VM
- // initializes. This is just in the interests of not slowing down
- // startup.
- assert(t->is_Java_thread(), "non-Java thread passed to do_sample");
}
#else
Unimplemented();
#endif
}
--- 694,703 ----
< prev index next >