--- old/src/hotspot/share/runtime/heapMonitoring.cpp 2017-10-06 14:50:10.842641126 -0700 +++ new/src/hotspot/share/runtime/heapMonitoring.cpp 2017-10-06 14:50:10.458642416 -0700 @@ -29,7 +29,7 @@ #include "runtime/heapMonitoring.hpp" #include "runtime/vframe.hpp" -const int MaxStackDepth = 1024; +static const int MaxStackDepth = 1024; // Internal data structure representing traces. struct StackTraceData : CHeapObj { @@ -204,32 +204,21 @@ StackTraceStorage(); static StackTraceStorage* storage() { - if (internal_storage == NULL) { - internal_storage = new StackTraceStorage(); - } - return internal_storage; - } - - static void reset_stack_trace_storage() { - delete internal_storage; - internal_storage = NULL; + static StackTraceStorage internal_storage; + return &internal_storage; } - bool is_initialized() { - return _initialized; + void initialize(int max_storage) { + MutexLocker mu(HeapMonitor_lock); + free_storage(); + allocate_storage(max_storage); + memset(&_stats, 0, sizeof(_stats)); } const jvmtiHeapSamplingStats& get_heap_sampling_stats() const { return _stats; } - // Static method to set the storage in place at initialization. - static void initialize_stack_trace_storage(int max_storage) { - reset_stack_trace_storage(); - StackTraceStorage *storage = StackTraceStorage::storage(); - storage->initialize_storage(max_storage); - } - void accumulate_sample_rate(size_t rate) { _stats.sample_rate_accumulation += rate; _stats.sample_rate_count++; @@ -239,6 +228,9 @@ volatile bool *initialized_address() { return &_initialized; } private: + // Protects the traces currently sampled (below). + volatile intptr_t _stack_storage_lock[1]; + // The traces currently sampled. GrowableArray *_allocated_traces; @@ -288,9 +280,6 @@ int _size; }; - // Instance initialization. - void initialize_storage(int max_storage); - // Copies from StackTraceData to jvmtiStackTrace. bool deep_copy(jvmtiStackTrace *to, const StackTraceData *from); @@ -301,6 +290,8 @@ void store_garbage_trace(const StackTraceData &trace); void free_garbage(); + void free_storage(); + void allocate_storage(int max_gc_storage); }; StackTraceStorage* StackTraceStorage::internal_storage; @@ -320,7 +311,7 @@ _frequent_garbage_traces(NULL), _max_gc_storage(0), _initialized(false) { - memset(&_stats, 0, sizeof(_stats)); + _stack_storage_lock[0] = 0; } void StackTraceStorage::free_garbage() { @@ -366,7 +357,7 @@ } } -StackTraceStorage::~StackTraceStorage() { +void StackTraceStorage::free_storage() { delete _allocated_traces; free_garbage(); @@ -375,7 +366,11 @@ _initialized = false; } -void StackTraceStorage::initialize_storage(int max_gc_storage) { +StackTraceStorage::~StackTraceStorage() { + free_storage(); +} + +void StackTraceStorage::allocate_storage(int max_gc_storage) { // In case multiple threads got locked and then 1 by 1 got through. if (_initialized) { return; @@ -392,6 +387,7 @@ } void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { + MutexLocker mu(HeapMonitor_lock); StackTraceData new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->frame_count; @@ -400,8 +396,9 @@ size_t StackTraceStorage::weak_oops_do(BoolObjectClosure *is_alive, OopClosure *f) { + MutexLocker mu(HeapMonitor_lock); size_t count = 0; - if (is_initialized()) { + if (initialized()) { int len = _allocated_traces->length(); // Compact the oop traces. Moves the live oops to the beginning of the @@ -484,6 +481,7 @@ void StackTraceStorage::copy_stack_traces(const StackTraceDataCopier &copier, jvmtiStackTraces *traces) { + MutexLocker mu(HeapMonitor_lock); int len = copier.size(); // Create a new array to store the StackTraceData objects. @@ -592,9 +590,6 @@ _monitoring_rate = monitoring_rate; - // Initalize and reset. - StackTraceStorage::initialize_stack_trace_storage(max_gc_storage); - // Populate the lookup table for fast_log2. // This approximates the log2 curve with a step function. // Steps have height equal to log2 of the mid-point of the step. @@ -608,6 +603,8 @@ if (_rnd == 0) { _rnd = 1; } + + StackTraceStorage::storage()->initialize(max_gc_storage); _enabled = true; } @@ -654,7 +651,7 @@ void HeapMonitoring::object_alloc_do_sample(Thread *t, oopDesc *o, intx byte_size) { #if defined(X86) || defined(PPC) JavaThread *thread = static_cast(t); - if (StackTraceStorage::storage()->is_initialized()) { + if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); JavaThread *thread = static_cast(t); @@ -699,12 +696,6 @@ // Failure! FREE_C_HEAP_ARRAY(jvmtiFrameInfo, trace->frames); FREE_C_HEAP_OBJ(trace); - return; - } else { - // There is something like 64K worth of allocation before the VM - // initializes. This is just in the interests of not slowing down - // startup. - assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); } #else Unimplemented();