< prev index next >

src/hotspot/share/runtime/heapMonitoring.cpp

Print this page
rev 48551 : [mq]: heap8
rev 48552 : [mq]: heap10a
rev 48553 : [mq]: heap14_rebased
rev 48555 : [mq]: heap16
rev 48556 : [mq]: heap17
rev 48557 : [mq]: heap17
rev 48558 : [mq]: heap19
rev 48559 : [mq]: heap20
rev 48560 : [mq]: heap21
rev 48562 : [mq]: heap23
rev 48564 : [mq]: update-spec
rev 48565 : [mq]: event

*** 1,7 **** /* ! * Copyright (c) 2017, Google and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2018, Google and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 219,229 **** // Each object that we profile is stored as trace with the thread_id. class StackTraceStorage : public CHeapObj<mtInternal> { public: // The function that gets called to add a trace to the list of // traces we are maintaining. ! void add_trace(jvmtiAllocTraceInfo* trace, oop o); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. void get_live_alloc_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces, --- 219,230 ---- // Each object that we profile is stored as trace with the thread_id. class StackTraceStorage : public CHeapObj<mtInternal> { public: // The function that gets called to add a trace to the list of // traces we are maintaining. ! // Returns if the trace got added or not. ! bool add_trace(jvmtiAllocTraceInfo* trace, oop o); // The function that gets called by the client to retrieve the list // of stack traces. Passes a jvmtiAllocTraceInfo which will get mutated. void get_live_alloc_stack_traces(JvmtiEnv* env, jvmtiAllocTraceInfo** traces,
*** 278,292 **** MutexLocker mu(HeapMonitorStorage_lock); _stats.sample_rate_accumulation += rate; _stats.sample_rate_count++; } - bool initialized() { - return OrderAccess::load_acquire(&_initialized) != 0; - return _initialized; - } - private: // The traces currently sampled. GrowableArray<StackTraceDataWithOop>* _allocated_traces; // The traces currently sampled. --- 279,288 ----
*** 303,313 **** // Maximum amount of storage provided by the JVMTI call initialize_profiling. int _max_gc_storage; static StackTraceStorage* internal_storage; ! int _initialized; // Support functions and classes for copying data to the external // world. class StackTraceDataCopier { public: --- 299,309 ---- // Maximum amount of storage provided by the JVMTI call initialize_profiling. int _max_gc_storage; static StackTraceStorage* internal_storage; ! bool _initialized; // Support functions and classes for copying data to the external // world. class StackTraceDataCopier { public:
*** 390,400 **** _allocated_traces = NULL; _traces_on_last_full_gc = NULL; _recent_garbage_traces = NULL; _frequent_garbage_traces = NULL; _max_gc_storage = 0; ! OrderAccess::release_store(&_initialized, 0); } void StackTraceStorage::free_garbage() { StackTraceData** recent_garbage = NULL; uint32_t recent_size = 0; --- 386,396 ---- _allocated_traces = NULL; _traces_on_last_full_gc = NULL; _recent_garbage_traces = NULL; _frequent_garbage_traces = NULL; _max_gc_storage = 0; ! _initialized = false; } void StackTraceStorage::free_garbage() { StackTraceData** recent_garbage = NULL; uint32_t recent_size = 0;
*** 423,433 **** StackTraceData::unreference_and_free(frequent_garbage[i]); } } void StackTraceStorage::free_storage() { ! if (!initialized()) { return; } delete _allocated_traces; delete _traces_on_last_full_gc; --- 419,429 ---- StackTraceData::unreference_and_free(frequent_garbage[i]); } } void StackTraceStorage::free_storage() { ! if (!_initialized) { return; } delete _allocated_traces; delete _traces_on_last_full_gc;
*** 438,458 **** reset(); } StackTraceStorage::~StackTraceStorage() { - MutexLocker mu(HeapMonitorStorage_lock); free_storage(); } void StackTraceStorage::allocate_storage(int max_gc_storage) { assert(HeapMonitorStorage_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "This should not be accessed concurrently"); // In case multiple threads got locked and then 1 by 1 got through. ! if (initialized()) { return; } _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackTraceDataWithOop>(128, true); --- 434,453 ---- reset(); } StackTraceStorage::~StackTraceStorage() { free_storage(); } void StackTraceStorage::allocate_storage(int max_gc_storage) { assert(HeapMonitorStorage_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "This should not be accessed concurrently"); // In case multiple threads got locked and then 1 by 1 got through. ! if (_initialized) { return; } _allocated_traces = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<StackTraceDataWithOop>(128, true);
*** 462,491 **** _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); _max_gc_storage = max_gc_storage; memset(&_stats, 0, sizeof(_stats)); ! OrderAccess::release_store(&_initialized, 1); } ! void StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); ! // Last minute check on initialization here in case: ! // Between the moment object_alloc_do_sample's check for initialization ! // and now, there was a stop() that deleted the data. ! if (initialized()) { StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->stack_info->frame_count; _allocated_traces->append(new_data); ! } } void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { size_t count = 0; ! if (initialized()) { int len = _allocated_traces->length(); _traces_on_last_full_gc->clear(); // Compact the oop traces. Moves the live oops to the beginning of the --- 457,497 ---- _recent_garbage_traces = new MostRecentGarbageTraces(max_gc_storage); _frequent_garbage_traces = new FrequentGarbageTraces(max_gc_storage); _max_gc_storage = max_gc_storage; memset(&_stats, 0, sizeof(_stats)); ! _initialized = true; } ! bool StackTraceStorage::add_trace(jvmtiAllocTraceInfo* trace, oop o) { MutexLocker mu(HeapMonitorStorage_lock); ! // Last minute check on initialization here in case the system got turned off ! // and a few sample requests got through to here, ie: ! // A sample point happened in TLAB, that code checks for ! // HeapMonitoring::enabled and calls object_alloc_do_sample. ! // The code starts getting a stacktrace and then calls the this add_trace. ! // ! // At the same time, another thread has turned off HeapMonitoring while the ! // stacktraces were getting constructed and disables StackTraceStorage. ! // ! // Both disabling and this add_trace are protected by the same ! // HeapMonitorStorage_lock mutex. ! if (!_initialized) { ! return false; ! } ! StackTraceDataWithOop new_data(trace, o); _stats.sample_count++; _stats.stack_depth_accumulation += trace->stack_info->frame_count; _allocated_traces->append(new_data); ! return true; } void StackTraceStorage::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { size_t count = 0; ! if (_initialized) { int len = _allocated_traces->length(); _traces_on_last_full_gc->clear(); // Compact the oop traces. Moves the live oops to the beginning of the
*** 847,859 **** StackTraceStorage::storage()->accumulate_sample_rate(rate); } void HeapMonitoring::object_alloc_do_sample(Thread* t, oopDesc* o, size_t byte_size) { JavaThread* thread = static_cast<JavaThread*>(t); - if (StackTraceStorage::storage()->initialized()) { assert(t->is_Java_thread(), "non-Java thread passed to do_sample"); - JavaThread* thread = static_cast<JavaThread*>(t); jvmtiAllocTraceInfo* trace = NEW_C_HEAP_OBJ(jvmtiAllocTraceInfo, mtInternal); if (trace == NULL) { return; } --- 853,863 ----
*** 892,907 **** } stack_info->frame_count = count; } if (stack_info->frame_count > 0) { ! // Success! ! StackTraceStorage::storage()->add_trace(trace, o); return; } // Failure! FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); - } } --- 896,910 ---- } stack_info->frame_count = count; } if (stack_info->frame_count > 0) { ! if (StackTraceStorage::storage()->add_trace(trace, o)) { return; } + } // Failure! FREE_C_HEAP_ARRAY(jvmtiFrameInfo, frames); FREE_C_HEAP_OBJ(stack_info); FREE_C_HEAP_OBJ(trace); }
< prev index next >