< prev index next >

src/hotspot/share/runtime/heapMonitoring.cpp

Print this page
rev 47223 : [mq]: heapz8
rev 47224 : [mq]: heap9a
rev 47225 : [mq]: heap10

*** 27,47 **** #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" #include "runtime/heapMonitoring.hpp" #include "runtime/vframe.hpp" ! // DONE: ! // merged printouts ! // broke up the one-liner ! // talk about synchro ! // cleaned up old entry points for C1/interpreter ! // add statistics per GC and log start up initialization. ! // removed the null pointer check during the weak_oops_do walk ! // cleaned up the task_executor ! // fixed the compilation using the option --disable-precompiled-header ! ! static const int MaxStackDepth = 64; // Internal data structure representing traces. struct StackTraceData : CHeapObj<mtInternal> { jvmtiStackTrace *trace; oop obj; --- 27,37 ---- #include "gc/shared/collectedHeap.hpp" #include "memory/universe.hpp" #include "runtime/heapMonitoring.hpp" #include "runtime/vframe.hpp" ! const int MaxStackDepth = 1024; // Internal data structure representing traces. struct StackTraceData : CHeapObj<mtInternal> { jvmtiStackTrace *trace; oop obj;
*** 260,270 **** // Heap Sampling statistics. jvmtiHeapSamplingStats _stats; // Maximum amount of storage provided by the JVMTI call initialize_profiling. ! int _max_storage; static StackTraceStorage* internal_storage; volatile bool _initialized; // Support functions and classes for copying data to the external --- 250,260 ---- // Heap Sampling statistics. jvmtiHeapSamplingStats _stats; // Maximum amount of storage provided by the JVMTI call initialize_profiling. ! int _max_gc_storage; static StackTraceStorage* internal_storage; volatile bool _initialized; // Support functions and classes for copying data to the external
*** 326,336 **** StackTraceStorage::StackTraceStorage() : _allocated_traces(NULL), _recent_garbage_traces(NULL), _frequent_garbage_traces(NULL), ! _max_storage(0), _initialized(false) { memset(&_stats, 0, sizeof(_stats)); } void StackTraceStorage::free_garbage() { --- 316,326 ---- StackTraceStorage::StackTraceStorage() : _allocated_traces(NULL), _recent_garbage_traces(NULL), _frequent_garbage_traces(NULL), ! _max_gc_storage(0), _initialized(false) { memset(&_stats, 0, sizeof(_stats)); } void StackTraceStorage::free_garbage() {
*** 383,405 **** delete _recent_garbage_traces; delete _frequent_garbage_traces; _initialized = false; } ! void StackTraceStorage::initialize_storage(int max_storage) { // In case multiple threads got locked and then 1 by 1 got through. if (_initialized) { return; } _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackTraceData>(128, true); ! _recent_garbage_traces = new MostRecentGarbageTraces(max_storage); ! _frequent_garbage_traces = new FrequentGarbageTraces(max_storage); ! _max_storage = max_storage; _initialized = true; } void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { StackTraceData new_data(trace, o); --- 373,395 ---- delete _recent_garbage_traces; delete _frequent_garbage_traces; _initialized = false; } ! void StackTraceStorage::initialize_storage(int max_gc_storage) { // In case multiple threads got locked and then 1 by 1 got through. if (_initialized) { return; } _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackTraceData>(128, true); ! _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); ! _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); ! _max_gc_storage = max_gc_storage; _initialized = true; } void StackTraceStorage::add_trace(jvmtiStackTrace *trace, oop o) { StackTraceData new_data(trace, o);
*** 454,472 **** const StackTraceData *from) { const jvmtiStackTrace *src = from->trace; *to = *src; to->frames = ! NEW_C_HEAP_ARRAY(jvmtiFrameInfo, MaxStackDepth, mtInternal); if (to->frames == NULL) { return false; } memcpy(to->frames, src->frames, ! sizeof(jvmtiFrameInfo) * MaxStackDepth); return true; } // Called by the outside world; returns a copy of the stack traces // (because we could be replacing them as the user handles them). --- 444,462 ---- const StackTraceData *from) { const jvmtiStackTrace *src = from->trace; *to = *src; to->frames = ! NEW_C_HEAP_ARRAY(jvmtiFrameInfo, src->frame_count, mtInternal); if (to->frames == NULL) { return false; } memcpy(to->frames, src->frames, ! sizeof(jvmtiFrameInfo) * src->frame_count); return true; } // Called by the outside world; returns a copy of the stack traces // (because we could be replacing them as the user handles them).
*** 591,610 **** OopClosure *f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); return StackTraceStorage::storage()->weak_oops_do(is_alive, f); } ! void HeapMonitoring::initialize_profiling(jint monitoring_rate, jint max_storage) { // Ignore if already enabled. if (_enabled) { return; } _monitoring_rate = monitoring_rate; // Initalize and reset. ! StackTraceStorage::initialize_stack_trace_storage(max_storage); // Populate the lookup table for fast_log2. // This approximates the log2 curve with a step function. // Steps have height equal to log2 of the mid-point of the step. for (int i = 0; i < (1 << FastLogNumBits); i++) { --- 581,601 ---- OopClosure *f) { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); return StackTraceStorage::storage()->weak_oops_do(is_alive, f); } ! void HeapMonitoring::initialize_profiling(jint monitoring_rate, ! jint max_gc_storage) { // Ignore if already enabled. if (_enabled) { return; } _monitoring_rate = monitoring_rate; // Initalize and reset. ! StackTraceStorage::initialize_stack_trace_storage(max_gc_storage); // Populate the lookup table for fast_log2. // This approximates the log2 curve with a step function. // Steps have height equal to log2 of the mid-point of the step. for (int i = 0; i < (1 << FastLogNumBits); i++) {
< prev index next >