--- /dev/null 2016-10-25 08:46:44.038854975 +0200
+++ new/src/share/vm/evtrace/traceEvents.hpp 2016-10-25 10:40:13.978780704 +0200
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2014, 2015, Dynatrace and/or its affiliates. All rights reserved.
+ *
+ * This file is part of the Lock Contention Tracing Subsystem for the HotSpot
+ * Virtual Machine, which is developed at Christian Doppler Laboratory on
+ * Monitoring and Evolution of Very-Large-Scale Software Systems. Please
+ * contact us at if you need additional information
+ * or have any questions.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work. If not, see .
+ *
+ */
+
+#ifndef SHARE_VM_EVTRACE_TRACEEVENTS_HPP
+#define SHARE_VM_EVTRACE_TRACEEVENTS_HPP
+
+#include "evtrace/traceManager.hpp"
+#include "evtrace/traceMetadata.hpp"
+
+#include "memory/allocation.hpp"
+#include "runtime/objectMonitor.hpp"
+#include "runtime/thread.hpp"
+
+class TraceWriter;
+class Klass;
+class ClassLoaderData;
+
+class TraceEvents: private TraceTypes // for mere convenience
+{
+public:
+ static void initialize();
+
+ static void write_thread_start();
+ static void write_thread_name_change(Thread *t);
+ static void write_thread_state_change(Thread *t);
+ static void write_thread_interrupt(Thread *t);
+ static void write_thread_exit();
+
+ static void write_thread_park_begin(JavaThread *t, bool is_absolute, timestamp park_time);
+ static void write_thread_park_end(Thread *t, seq_num seq, seq_num unpark_seq, park_return_code return_code);
+ static void write_thread_unpark(thread_id thread, seq_num seq, seq_num chained_seq);
+
+ static void write_monitor_inflate(ObjectMonitor *m, seq_num seq);
+ static void write_monitor_deflate(ObjectMonitor *m);
+ static void write_monitor_contended_enter(ObjectMonitor *m, monitor_enter_wait wait);
+ static void write_monitor_contended_entered(ObjectMonitor *m, monitor_entered_flags flags);
+ static void write_monitor_contended_exited(ObjectMonitor *m, seq_num seq, stack_id preallocated_stack_id, bool resolve_stack);
+ static void write_monitor_dummy(ObjectMonitor *m, seq_num seq);
+
+ static void write_class_loader_unload(ClassLoaderData *cld);
+
+ static void write_safepoint_begin(safepoint_reason reason);
+ static void write_safepoint_end(u4 vmops_processed);
+
+ static void write_vm_end();
+
+ static void write_metadata_reset();
+
+ static void write_group(JavaThread* t, seq_num park_seq_begin_ref, oop source);
+
+ static void write_marker(const char *label);
+
+private:
+ TraceEvents() { }
+
+ static bool can_write();
+
+ static class_id retrieve_class_id_or_write_metadata(Klass *k);
+ static method_id retrieve_method_id_or_write_metadata(Method *m);
+ static stack_id retrieve_stack_id_or_write_metadata(JavaThread *t, stack_id preallocated_id = 0);
+ static void write_stack_metadata(stack_id id, const CompositeTraceStack &ts);
+ static void write_identical_stacks_metadata(stack_id id, stack_id known);
+
+ static timestamp time_now();
+ static thread_id thread_id_for(Thread *t);
+ static object_id object_id_for(oop obj);
+ static objmonitor_id objmonitor_id_for(ObjectMonitor *om);
+ static object_id objmonitor_object_id_for(ObjectMonitor *om);
+ static classloader_id classloader_id_for(ClassLoaderData *cld);
+ static method_id method_id_for(Method *m);
+};
+
+class TraceEventThreadParkEnd: public StackObj {
+private:
+ No_Safepoint_Verifier _nsv;
+ bool _enabled;
+ bool _filled;
+ Thread *_thread;
+ TraceTypes::seq_num _seq;
+ TraceTypes::seq_num _unpark_seq;
+ TraceTypes::park_return_code _return_code;
+
+ void do_write();
+
+public:
+ TraceEventThreadParkEnd(Thread *t)
+ : _nsv(false, false), _thread(t), _filled(false)
+ {
+ assert(t != NULL, "null thread");
+ _enabled = EnableEventTracing && EnableEventTracingParkEvents && t->park_priority() >= 0;
+ }
+
+ void fill(TraceTypes::seq_num unpark_seq, TraceTypes::park_return_code return_code) {
+ assert(!_filled, "already filled");
+ _nsv.enable();
+ _enabled = _enabled && TraceManager::is_initialized();
+ if (_enabled) {
+ _seq = TraceManager::metadata()->next_global_seq();
+ _thread->set_park_last_global_seq(_seq);
+ _unpark_seq = unpark_seq;
+ _return_code = return_code;
+ }
+ _filled = true;
+ }
+
+ ~TraceEventThreadParkEnd() {
+ assert(_filled, "must have been filled");
+ if (_enabled) {
+ do_write();
+ }
+ }
+};
+
+class TraceEventThreadUnpark: public StackObj {
+private:
+ No_Safepoint_Verifier _nsv;
+ bool _enabled;
+ TraceTypes::thread_id _thread_id;
+ TraceTypes::seq_num _seq;
+ TraceTypes::seq_num _chained_seq;
+
+ void do_write();
+
+public:
+ TraceEventThreadUnpark(Thread *t) : _nsv(true, false), _chained_seq(-1) {
+ assert(t != NULL, "null thread");
+ _enabled = EnableEventTracing && EnableEventTracingParkEvents && TraceManager::is_initialized();
+ if (_enabled) {
+ _thread_id = TraceManager::metadata()->thread_id(t);
+ _seq = TraceManager::metadata()->next_global_seq();
+ } else {
+ _thread_id = 0;
+ _seq = 1;
+ }
+ }
+
+ ~TraceEventThreadUnpark() {
+ if (_enabled) {
+ do_write();
+ }
+ }
+
+ TraceTypes::seq_num seq() { return _seq; }
+
+ void set_chained_seq(TraceTypes::seq_num chained_seq) { _chained_seq = chained_seq; }
+};
+
+class TraceEventMonitorContendedExited: public StackObj {
+private:
+ No_Safepoint_Verifier _nsv;
+ bool _enabled;
+ ObjectMonitor *_monitor;
+ TraceTypes::seq_num _seq;
+ TraceTypes::stack_id *_stack_id_at;
+ TraceTypes::stack_id _preallocated_stack_id;
+ bool _resolve_stack;
+
+public:
+ TraceEventMonitorContendedExited(ObjectMonitor *m)
+ : _nsv(true, false), _enabled(false), _monitor(m), _seq(0), _stack_id_at(NULL), _resolve_stack(true)
+ {
+ if (EnableEventTracing && EventTracingStrictMonitorEventOrder) {
+ _seq = _monitor->next_trace_seq();
+ }
+ }
+
+ ~TraceEventMonitorContendedExited() {
+ if (_enabled) {
+ TraceTypes::stack_id id = 0;
+ if (_stack_id_at != NULL) {
+ if (*_stack_id_at == 0 && EnableEventTracingStackTraces) {
+ *_stack_id_at = TraceManager::metadata()->next_stack_id();
+ }
+ id = *_stack_id_at;
+ }
+ TraceEvents::write_monitor_contended_exited(_monitor, _seq, id, _resolve_stack);
+ } else if (EnableEventTracing && EventTracingStrictMonitorEventOrder && TraceManager::is_initialized()) {
+ assert(!_resolve_stack || _stack_id_at == NULL || *_stack_id_at == 0,
+ "event must be enabled if there is a stack id to resolve");
+
+ // must consume eagerly acquired sequence number
+ TraceEvents::write_monitor_dummy(_monitor, _seq);
+ }
+ }
+
+ void set_use_or_preallocate_stack_id_at(TraceTypes::stack_id *p) {
+ assert (_stack_id_at == NULL, "set only once");
+ _stack_id_at = p;
+ }
+
+ void set_use_stack_id(TraceTypes::stack_id id) {
+ _preallocated_stack_id = id;
+ set_use_or_preallocate_stack_id_at(&_preallocated_stack_id);
+ }
+
+ void set_resolve_stack(bool resolve) {
+ _resolve_stack = resolve;
+ }
+
+ void enable() {
+ if (EnableEventTracing && TraceManager::is_initialized()) {
+ if (!_enabled && !EventTracingStrictMonitorEventOrder) {
+ // lazily acquire sequence number, racing with other threads which try
+ // to spin-acquire the monitor and then write contended-entered events
+ _seq = _monitor->next_trace_seq();
+ }
+ _enabled = true;
+ }
+ }
+};
+
+// for convenience, so this is the only file to include for writing events
+#include "evtrace/traceWriter.hpp"
+#include "evtrace/traceMacros.hpp"
+
+#endif /* SHARE_VM_EVTRACE_TRACEEVENTS_HPP */