/* * Copyright (c) 2014, 2015, Dynatrace and/or its affiliates. All rights reserved. * * This file is part of the Lock Contention Tracing Subsystem for the HotSpot * Virtual Machine, which is developed at Christian Doppler Laboratory on * Monitoring and Evolution of Very-Large-Scale Software Systems. Please * contact us at if you need additional information * or have any questions. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work. If not, see . * */ #include "evtrace/traceManager.hpp" #include "evtrace/traceBuffer.hpp" #include "evtrace/traceBufferQueue.hpp" #include "evtrace/traceReaderThread.hpp" #include "evtrace/traceEvents.hpp" #include "runtime/javaCalls.hpp" #include "runtime/thread.hpp" volatile bool TraceManager::_is_initialized = false; TraceReaderThread *TraceManager::_thread = NULL; Monitor *TraceManager::_thread_mtx = NULL; volatile bool TraceManager::_thread_running = false; TraceBufferQueue *TraceManager::_flush_queue = NULL; TraceBufferQueue *TraceManager::_free_queue = NULL; TraceMetadata *TraceManager::_metadata = NULL; bool TraceManager::_classes_unloaded_in_current_safepoint = false; DEBUG_ONLY(int TraceManager::_current_safepoint_counter); // statistics volatile intptr_t TraceManager::_buffer_count = 0xdead; volatile intptr_t TraceManager::_max_buffer_count = 0xdead; volatile intptr_t TraceManager::_allocated_buffer_count = 0xdead; volatile intptr_t TraceManager::_submitted_trace_bytes = 0xdead; intptr_t TraceManager::_reclaimed_trace_bytes = 0xdead; volatile intptr_t TraceManager::_reclaim_time_nanos = 0xdead; intptr_t TraceManager::_stored_free_enqueue_ops = 0xdead; intptr_t TraceManager::_stored_free_dequeue_ops = 0xdead; intptr_t TraceManager::_stored_flush_enqueue_ops = 0xdead; intptr_t TraceManager::_stored_flush_dequeue_ops = 0xdead; volatile intptr_t TraceManager::_total_stack_traces = 0xdead; volatile intptr_t TraceManager::_truncated_stack_traces = 0xdead; volatile intptr_t TraceManager::_reused_memento_stack_traces = 0xdead; volatile intptr_t TraceManager::_total_stack_frames = 0xdead; volatile intptr_t TraceManager::_reused_memento_stack_frames = 0xdead; void TraceManager::initialize() { assert(!_is_initialized, "already initialized"); assert(_flush_queue == NULL, "flush queue exists"); assert(_free_queue == NULL, "free queue exists"); assert(_thread == NULL, "thread exists"); assert(!_thread_running, "thread running"); assert(_metadata == NULL, "metadata exists"); assert(!_classes_unloaded_in_current_safepoint, "must be unset"); // nothing should be allocated before this point // NOTE: requires -XX:NativeMemoryTracking=summary or =detail assert(MallocMemorySummary::as_snapshot()->by_type(mtEventTracing)->malloc_size() == 0, "premature event tracing allocations"); guarantee(EventTracingStackDepthLimit <= TRACE_STACK_MAX_FRAMES, "stack depth limit is too high"); if (EventTracingStackDepthLimit == 0) { EnableEventTracingStackTraces = false; } TraceEvents::initialize(); _metadata = new TraceMetadata(); _thread_mtx = new Monitor(Mutex::nonleaf + 1, "TraceManager::_thread_mtx"); _buffer_count = 0; _flush_queue = new TraceBufferQueue(); if (EnableEventTracingBufferReuse) { _free_queue = new TraceBufferQueue(); for (uintx i = 0; i < EventTracingPreallocatedBuffers; i++) { _free_queue->enqueue(allocate_buffer()); } assert((jlong)_free_queue->count() == (jlong) EventTracingPreallocatedBuffers, "sanity"); } _is_initialized = true; reset_stats(); } Handle TraceManager::create_queue_object(TraceBufferQueue *q, instanceKlassHandle klass, TRAPS) { JavaValue result(T_OBJECT); result.set_jobject(NULL); if (q != NULL) { JavaCallArguments args; args.push_long((jlong) q); JavaCalls::call_static(&result, klass, vmSymbols::TraceBufferQueue_from_handle_method_name(), vmSymbols::TraceBufferQueue_from_handle_method_signature(), &args, CHECK_NH); } return Handle(THREAD, (oop) result.get_jobject()); } void TraceManager::start_threads(TRAPS) { assert(_thread == NULL && !_thread_running, "trace reader thread exists"); Klass* qk = SystemDictionary::resolve_or_fail(vmSymbols::sun_evtracing_TraceBufferQueue(), true, CHECK); instanceKlassHandle q_klass(THREAD, qk); q_klass->initialize(CHECK); Handle flushq = create_queue_object(_flush_queue, q_klass, CHECK); Handle freeq = create_queue_object(_free_queue, q_klass, CHECK); _thread = TraceReaderThread::start(flushq(), freeq(), THREAD); _thread_running = true; } class TraceManager::ReclaimTraceBuffersClosure : public ThreadClosure { bool _deinitialize; jlong _reclaimed_bytes; public: ReclaimTraceBuffersClosure(bool deinit) : _deinitialize(deinit), _reclaimed_bytes(0) { } ~ReclaimTraceBuffersClosure() { } virtual void do_thread(Thread *t) { assert(t != NULL, "null thread"); TraceBuffer *buffer = t->trace_buffer(); if (buffer != NULL && (_deinitialize || buffer->filled_size() > 0)) { TraceManager::pre_submit_buffer(buffer); _reclaimed_bytes += buffer->filled_size(); TraceManager::_flush_queue->enqueue(buffer); buffer = NULL; if (!_deinitialize && EnableEventTracingBufferReuse) { buffer = TraceManager::_free_queue->try_dequeue(); if (buffer != NULL) { buffer->owner = t; } } t->set_trace_buffer(buffer); } } jlong reclaimed_bytes() { return _reclaimed_bytes; } }; class TraceManager::VM_ReclaimTraceBuffers : public VM_Operation { bool _deinitialize; jlong _queue_fill_mark; public: VM_ReclaimTraceBuffers(bool deinit) : _deinitialize(deinit) {} VMOp_Type type() const { return VMOp_ReclaimTraceBuffers; } void doit(); jlong queue_fill_mark() { return _queue_fill_mark; } }; void TraceManager::VM_ReclaimTraceBuffers::doit() { assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sanity"); if (_deinitialize) { TraceEvents::write_vm_end(); // stop threads from acquiring buffers TraceManager::_is_initialized = false; } TraceManager::ReclaimTraceBuffersClosure cl(_deinitialize); Threads::threads_do(&cl); _queue_fill_mark = TraceManager::_flush_queue->fill_mark(); TraceManager::_reclaimed_trace_bytes += cl.reclaimed_bytes(); if (_thread != NULL && _thread->is_polling_queue()) { // reader thread is sleeping in poll loop, interrupt // we don't want to interrupt the thread elsewhere, for example during // NIO channel operations, when an interrupt shuts down the channel Thread::interrupt(_thread); } } void TraceManager::reclaim_buffers_in_safepoint(bool wait_until_processed) { assert_initialized(); jlong reclaim_start = os::javaTimeNanos(); jlong fill_mark = do_reclaim_buffers_in_safepoint(false); if (wait_until_processed) { // NOTE: this doesn't actually wait until the last buffer has been // *processed*, only until the reader thread has *dequeued* // the last reclaimed buffer (usually when the second-to-last // buffer has been processed) while (!_flush_queue->has_dequeued_at_mark(fill_mark)) { os::sleep(Thread::current(), 1, true); } } if (EnableEventTracingDiagnostics) { // FIXME: counter value depends on whether callers set wait_until_processed jlong duration = os::javaTimeNanos() - reclaim_start; Atomic::add_ptr(duration, &_reclaim_time_nanos); } } jlong TraceManager::do_reclaim_buffers_in_safepoint(bool deinit) { VM_ReclaimTraceBuffers op(deinit); VMThread::execute(&op); return op.queue_fill_mark(); } class TraceManager::VM_ResetTraceMetadata : public VM_Operation { public: VM_ResetTraceMetadata() {} VMOp_Type type() const { return VMOp_ResetTraceMetadata; } void doit(); bool allow_nested_vm_operations() const { return true; } }; void TraceManager::VM_ResetTraceMetadata::doit() { assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sanity"); VM_ReclaimTraceBuffers reclaim_op(false); VMThread::execute(&reclaim_op); if (_thread != NULL && _thread->is_polling_queue() && _thread->trace_buffer() != NULL) { // The ReclaimTraceBuffers VM op has interrupted the reader thread, which // generated a ThreadInterrupt event containing a sequence number. // Therefore, this event must be submitted before the metadata reset event. submit_buffer(Thread::current()->trace_buffer()); Thread::current()->set_trace_buffer(NULL); } TraceManager::_metadata->purge_all(); TraceEvents::write_metadata_reset(); if (Thread::current()->trace_buffer() != NULL) { submit_buffer(Thread::current()->trace_buffer()); Thread::current()->set_trace_buffer(NULL); } } void TraceManager::reset_metadata() { VM_ResetTraceMetadata op; VMThread::execute(&op); } void TraceManager::nmethod_is_unloading(const nmethod *nm) { if (_is_initialized) { _metadata->purge_unloading_nmethod(nm); } } void TraceManager::class_loader_is_unloading(ClassLoaderData *loader) { assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sanity"); if (!_is_initialized) return; // When classes and methods are unloaded during garbage collection, their // memory is reclaimed and other classes and methods can be loaded in their // place. Since we use addresses as unique identifiers, we need to observe // such unload events and purge any references to unloaded metadata from // our records. // // Trace analysis must also be aware of unloads as well since any trace // events after an unload can use the same identifiers to refer to different // data. Therefore, we reclaim the trace buffers from all threads at this // point. We then write one or multiple unload events to the trace buffer of // the VM thread, and finally enqueue that buffer before the end of the // safepoint. Therefore, when processing the buffers in the order in which // they are enqueued, trace analysis encounters the necessary unload events // before any events after the unload. if (!_classes_unloaded_in_current_safepoint) { // G1 doesn't allow nested VM ops VM_ReclaimTraceBuffers(false).doit(); _classes_unloaded_in_current_safepoint = true; DEBUG_ONLY(_current_safepoint_counter = SafepointSynchronize::_safepoint_counter); } else { assert(_current_safepoint_counter == SafepointSynchronize::_safepoint_counter, "different safepoint"); } TraceEvents::write_class_loader_unload(loader); _metadata->purge_unloading_classes(loader); } void TraceManager::do_work_before_safepoint_end() { assert(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread(), "sanity"); if (_is_initialized) { VM_ReclaimTraceBuffers(false).doit(); // don't bother with VM op if (_classes_unloaded_in_current_safepoint) { assert(_current_safepoint_counter == SafepointSynchronize::_safepoint_counter, "different safepoint"); _classes_unloaded_in_current_safepoint = false; } _metadata->do_maintenance(); } } void TraceManager::thread_is_exiting(TraceReaderThread* t) { guarantee(!_is_initialized, "thread exiting prematurely"); guarantee(t == _thread, "unexpected thread"); guarantee(_thread_running, "not running?"); MonitorLockerEx ml(_thread_mtx); _thread_running = false; ml.notify_all(); } void TraceManager::finish_and_destroy(TRAPS) { assert_initialized(); assert(_thread != NULL, "thread not initialized"); jlong reclaim_start = os::javaTimeNanos(); // This just sets a flag that the reader thread only checks once queue // operations fail after the reclaim op _thread->shutdown(CHECK); _thread_mtx->lock(); do_reclaim_buffers_in_safepoint(true); while(_thread_running) { _thread_mtx->wait(); // wait for trace reader thread to call back } _thread_mtx->unlock(); delete _thread_mtx; _thread_mtx = NULL; _thread = NULL; // NOTE: thread deletes itself on exit if (EnableEventTracingDiagnostics) { _reclaim_time_nanos += (os::javaTimeNanos() - reclaim_start); } // We keep these around to provide statistics to agents after deinitialization (during shutdown) // FIXME: we should fill a single TraceStatistics object instead of all those variables if (EnableEventTracingBufferReuse) { _stored_free_enqueue_ops = _free_queue->enqueue_ops() - _stored_free_enqueue_ops; _stored_free_dequeue_ops = _free_queue->dequeue_ops() - _stored_free_dequeue_ops; } _stored_flush_enqueue_ops = _flush_queue->enqueue_ops() - _stored_flush_enqueue_ops; _stored_flush_dequeue_ops = _flush_queue->dequeue_ops() - _stored_flush_dequeue_ops; assert(_flush_queue->is_empty(), "flush queue not empty"); delete _flush_queue; _flush_queue = NULL; if (EnableEventTracingBufferReuse) { while (!_free_queue->is_empty()) { TraceBuffer *buffer = _free_queue->try_dequeue(); assert(buffer != NULL, "null buffer from non-empty queue"); free_buffer(buffer); } delete _free_queue; _free_queue = NULL; } assert(_free_queue == NULL, "sanity"); delete _metadata; _metadata = NULL; // all event tracing memory must be deallocated here // NOTE: requires -XX:NativeMemoryTracking=summary or =detail assert(MallocMemorySummary::as_snapshot()->by_type(mtEventTracing)->malloc_size() == 0, "memory leak"); } void TraceManager::write_stats(TraceStatistics *stats) { assert(!_is_initialized || _flush_queue != NULL, "sanity"); if (EnableEventTracingBufferReuse) { jlong free_enqueue_ops, free_dequeue_ops; free_enqueue_ops = _is_initialized ? _free_queue->enqueue_ops() - _stored_free_enqueue_ops : _stored_free_enqueue_ops; free_dequeue_ops = _is_initialized ? _free_queue->dequeue_ops() - _stored_free_dequeue_ops : _stored_free_dequeue_ops; stats->add_entry("free_enqueue_ops", free_enqueue_ops); stats->add_entry("free_dequeue_ops", free_dequeue_ops); } jlong flush_enqueue_ops, flush_dequeue_ops; flush_enqueue_ops = _is_initialized ? _flush_queue->enqueue_ops() - _stored_flush_enqueue_ops : _stored_flush_enqueue_ops; flush_dequeue_ops = _is_initialized ? _flush_queue->dequeue_ops() - _stored_flush_dequeue_ops : _stored_flush_dequeue_ops; stats->add_entry("flush_enqueue_ops", flush_enqueue_ops); stats->add_entry("flush_dequeue_ops", flush_dequeue_ops); stats->add_entry("mean_buffer_capacity", EventTracingBufferCapacity); stats->add_entry("reclaimed_trace_bytes", _reclaimed_trace_bytes); if (EnableEventTracingDiagnostics) { stats->add_entry("submitted_trace_bytes", _submitted_trace_bytes); stats->add_entry("reclaim_time_nanos", _reclaim_time_nanos); stats->add_entry("stack_cache_lookups", _metadata->stack_cache_lookups()); stats->add_entry("stack_cache_lookup_misses", _metadata->stack_cache_lookup_misses()); stats->add_entry("stack_cache_lookup_collisions", _metadata->stack_cache_lookup_collisions()); stats->add_entry("stack_cache_probes", _metadata->stack_cache_probes()); stats->add_entry("stack_cache_probe_collisions", _metadata->stack_cache_probe_collisions()); stats->add_entry("stack_cache_purge_millis", _metadata->stack_cache_purge_millis()); stats->add_entry("stack_cache_maintenance_millis", _metadata->stack_cache_maintenance_millis()); stats->add_entry("total_stack_traces", _total_stack_traces); stats->add_entry("truncated_stack_traces", _truncated_stack_traces); stats->add_entry("reused_memento_stack_traces", _reused_memento_stack_traces); stats->add_entry("total_stack_frames", _total_stack_frames); stats->add_entry("reused_memento_stack_frames", _reused_memento_stack_frames); stats->add_entry("buffer_count", _buffer_count); stats->add_entry("max_buffer_count", _max_buffer_count); stats->add_entry("allocated_buffer_count", _allocated_buffer_count); } } void TraceManager::reset_stats() { assert(!_is_initialized || _flush_queue != NULL, "sanity"); if (_is_initialized) { if (EnableEventTracingBufferReuse) { _stored_free_enqueue_ops = _free_queue->enqueue_ops(); _stored_free_dequeue_ops = _free_queue->dequeue_ops(); } _stored_flush_enqueue_ops = _flush_queue->enqueue_ops(); _stored_flush_dequeue_ops = _flush_queue->dequeue_ops(); } _reclaimed_trace_bytes = 0; if (EnableEventTracingDiagnostics) { _submitted_trace_bytes = 0; _reclaim_time_nanos = 0; _total_stack_traces = 0; _truncated_stack_traces = 0; _reused_memento_stack_traces = 0; _total_stack_frames = 0; _reused_memento_stack_frames = 0; _max_buffer_count = _buffer_count; _allocated_buffer_count = 0; _metadata->reset_stack_cache_stats(); } }