--- /dev/null 2016-10-25 08:46:44.038854975 +0200
+++ new/src/share/vm/evtrace/traceEvents.cpp 2016-10-25 10:40:13.832780607 +0200
@@ -0,0 +1,636 @@
+/*
+ * Copyright (c) 2014, 2015, Dynatrace and/or its affiliates. All rights reserved.
+ *
+ * This file is part of the Lock Contention Tracing Subsystem for the HotSpot
+ * Virtual Machine, which is developed at Christian Doppler Laboratory on
+ * Monitoring and Evolution of Very-Large-Scale Software Systems. Please
+ * contact us at if you need additional information
+ * or have any questions.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work. If not, see .
+ *
+ */
+
+#include "evtrace/traceEvents.hpp"
+
+#include "evtrace/traceWriter.hpp"
+#include "evtrace/traceManager.hpp"
+#include "evtrace/traceMetadata.hpp"
+
+#include "runtime/vframe.hpp"
+
+void TraceEvents::initialize() {
+ assert(_event_max <= UINT8_MAX, "event type does not fit");
+ assert(_park_return_code_max <= UINT8_MAX, "park return code does not fit");
+ assert(_safepoint_reason_max <= UINT8_MAX, "safepoint reason does not fit");
+ assert(_monitor_enter_wait_max <= UINT8_MAX, "monitor wait code does not fit");
+ assert(_monitor_entered_flags_max <= UINT8_MAX, "monitor entered flags do not fit");
+}
+
+inline bool TraceEvents::can_write() {
+ return TraceManager::is_initialized();
+}
+
+inline TraceTypes::timestamp TraceEvents::time_now() {
+ return TraceManager::metadata()->time_now();
+}
+
+inline TraceTypes::thread_id TraceEvents::thread_id_for(Thread* t) {
+ return TraceManager::metadata()->thread_id(t);
+}
+
+inline TraceTypes::object_id TraceEvents::object_id_for(oop obj) {
+ return TraceManager::metadata()->object_id(obj);
+}
+
+inline TraceTypes::objmonitor_id TraceEvents::objmonitor_id_for(ObjectMonitor* om) {
+ return TraceManager::metadata()->objmonitor_id(om);
+}
+
+inline TraceTypes::object_id TraceEvents::objmonitor_object_id_for(ObjectMonitor* om) {
+ return TraceManager::metadata()->objmonitor_object_id(om);
+}
+
+inline TraceTypes::classloader_id TraceEvents::classloader_id_for(ClassLoaderData* cld) {
+ return TraceManager::metadata()->classloader_id(cld);
+}
+
+inline TraceTypes::method_id TraceEvents::method_id_for(Method* m) {
+ bool added;
+ method_id mid = TraceManager::metadata()->method_id(m, added);
+ assert(!added, "must have been known");
+ return mid;
+}
+
+void TraceEvents::write_thread_start() {
+ if (!can_write())
+ return;
+
+ Thread *t = Thread::current();
+ assert(t != NULL, "thread not attached");
+
+ ResourceMark rm;
+ const char *name = NULL;
+ if (t->is_Java_thread()) {
+ name = ((JavaThread *) t)->get_thread_name();
+ } else if (t->is_Named_thread()) {
+ name = ((NamedThread *) t)->name();
+ }
+
+ const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 64);
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + name_nbytes);
+ wr.put_event_type(event_thread_start);
+ wr.put_timestamp(time_now());
+ wr.put_utf8str(name, name_nbytes);
+}
+
+void TraceEvents::write_thread_name_change(Thread *t) {
+ assert(t != NULL, "null thread");
+
+ if (!can_write())
+ return;
+
+ ResourceMark rm;
+ const char *name = NULL;
+ if (t->is_Java_thread()) {
+ name = ((JavaThread *) t)->get_thread_name();
+ }
+
+ const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 64);
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(thread_id) + name_nbytes);
+ wr.put_event_type(event_thread_name_change);
+ wr.put_timestamp(time_now());
+ wr.put_thread_id(thread_id_for(t));
+ wr.put_utf8str(name, name_nbytes);
+}
+
+void TraceEvents::write_thread_state_change(Thread *t) {
+ assert(t != NULL, "null thread");
+
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(thread_id) + sizeof(thread_state));
+ wr.put_event_type(event_thread_state_change);
+ wr.put_timestamp(time_now());
+ wr.put_thread_id(thread_id_for(t));
+ wr.put_thread_state(TraceManager::metadata()->thread_state(t));
+}
+
+void TraceEvents::write_thread_interrupt(Thread *t) {
+ assert(t != NULL, "null thread");
+
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(thread_id));
+ wr.put_event_type(event_thread_interrupt);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(TraceManager::metadata()->next_global_seq());
+ wr.put_thread_id(thread_id_for(t));
+}
+
+void TraceEvents::write_thread_exit() {
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp));
+ wr.put_event_type(event_thread_exit);
+ wr.put_timestamp(time_now());
+}
+
+void TraceEvents::write_thread_park_begin(JavaThread *t, bool is_absolute, timestamp park_time) {
+ assert(t != NULL, "null thread");
+
+ if (!can_write())
+ return;
+
+ Klass* klass = NULL;
+ object_id oid = 0;
+ {
+ oop blocker = t->current_park_blocker();
+ if (blocker != NULL) {
+ klass = blocker->klass();
+ oid = object_id_for(blocker);
+ assert(oid != 0, "just checking");
+ }
+ } // blocker oop may not be valid anymore after object_id_for()
+
+ // recheck necessary, because object_id_for can cause a safepoint check
+ if (!can_write())
+ return;
+
+ class_id cid = 0;
+ if (klass != NULL) {
+ cid = retrieve_class_id_or_write_metadata(klass);
+ assert(cid != 0, "just checking");
+ }
+
+ assert(t->has_last_Java_frame(), "must have");
+ stack_id stid = retrieve_stack_id_or_write_metadata(t);
+
+ int nesting_level = t->nesting_level();
+ if (nesting_level > UINT8_MAX) {
+ nesting_level = UINT8_MAX;
+ }
+
+ No_Safepoint_Verifier nsv(true, false);
+ seq_num seq = TraceManager::metadata()->next_global_seq();
+ t->set_park_last_global_seq(seq);
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(object_id) + sizeof(class_id) + sizeof(stack_id) + sizeof(u1) + sizeof(u1) + sizeof(timestamp));
+ wr.put_event_type(event_thread_park_begin);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(seq);
+ wr.put_object_id(oid);
+ wr.put_class_id(cid);
+ wr.put_stack_id(stid);
+ wr.put_u1(nesting_level);
+ wr.put_u1(is_absolute);
+ wr.put_timestamp(park_time);
+}
+
+void TraceEvents::write_thread_park_end(Thread *t, seq_num seq, seq_num unpark_seq, park_return_code return_code) {
+ assert(t != NULL, "null thread");
+ assert(return_code > _park_return_code_min && return_code < _park_return_code_max, "invalid return_code");
+
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(seq_num) + sizeof(u1));
+ wr.put_event_type(event_thread_park_end);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(seq);
+ wr.put_seq_num(unpark_seq);
+ wr.put_u1(return_code);
+}
+
+void TraceEvents::write_thread_unpark(thread_id thread, seq_num seq, seq_num chained_seq) {
+ if (!can_write())
+ return;
+
+ stack_id stid = 0;
+ Thread *t = Thread::current();
+ if (t->is_Java_thread()) {
+ JavaThread *jt = (JavaThread*) t;
+ assert(jt->has_last_Java_frame(), "must have");
+ stid = retrieve_stack_id_or_write_metadata(jt);
+ }
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(seq_num) + sizeof(thread_id) + sizeof(stack_id));
+ wr.put_event_type(event_thread_unpark);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(seq);
+ wr.put_seq_num(chained_seq);
+ wr.put_thread_id(thread);
+ wr.put_stack_id(stid);
+}
+
+void TraceEvents::write_monitor_inflate(ObjectMonitor* m, seq_num seq) {
+ if (!can_write())
+ return;
+
+ class_id cid = retrieve_class_id_or_write_metadata(((oop)m->object())->klass());
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(object_id) + sizeof(class_id));
+ wr.put_event_type(event_monitor_inflate);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(seq);
+ wr.put_objmonitor_id(objmonitor_id_for(m));
+ wr.put_object_id(objmonitor_object_id_for(m));
+ wr.put_class_id(cid);
+}
+
+void TraceEvents::write_monitor_deflate(ObjectMonitor* m) {
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id));
+ wr.put_event_type(event_monitor_deflate);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(m->next_trace_seq());
+ wr.put_objmonitor_id(objmonitor_id_for(m));
+}
+
+TraceTypes::class_id TraceEvents::retrieve_class_id_or_write_metadata(Klass *klass) {
+ assert(can_write(), "caller's responsibility");
+
+ bool is_new;
+ class_id id = TraceManager::metadata()->class_id(klass, is_new);
+ if (is_new) {
+ ResourceMark rm;
+ const char *name = klass->name()->as_utf8();
+
+ const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 256);
+ TraceWriter wr(sizeof(event_type) + sizeof(class_id) + sizeof(classloader_id) + name_nbytes);
+ wr.put_event_type(event_class_metadata);
+ wr.put_class_id(id);
+ wr.put_classloader_id(classloader_id_for(klass->class_loader_data()));
+ wr.put_utf8str(name, name_nbytes);
+ }
+ return id;
+}
+
+TraceTypes::method_id TraceEvents::retrieve_method_id_or_write_metadata(Method *method) {
+ assert(can_write(), "caller's responsibility");
+
+ bool is_new;
+ method_id id = TraceManager::metadata()->method_id(method, is_new);
+ if (is_new) {
+ class_id holder_id = retrieve_class_id_or_write_metadata(method->method_holder());
+
+ ResourceMark rm;
+ const char *name = method->name()->as_utf8(),
+ *sig = method->signature()->as_utf8();
+
+ const size_t name_nbytes = TraceWriter::nbytes_for_utf8str(name, 256),
+ sig_nbytes = TraceWriter::nbytes_for_utf8str(sig, 1024);
+ TraceWriter wr(sizeof(event_type) + sizeof(method_id) + sizeof(class_id) + name_nbytes + sig_nbytes);
+ wr.put_event_type(event_method_metadata);
+ wr.put_method_id(id);
+ wr.put_class_id(holder_id);
+ wr.put_utf8str(name, name_nbytes);
+ wr.put_utf8str(sig, sig_nbytes);
+ }
+ return id;
+}
+
+TraceTypes::stack_id TraceEvents::retrieve_stack_id_or_write_metadata(JavaThread *t, stack_id preallocated_id) {
+ class StatsUpdate : StackObj {
+ public:
+ bool truncated;
+ intptr_t frames, reused_memento_frames;
+ StatsUpdate() : truncated(false), frames(0), reused_memento_frames(0) {}
+ ~StatsUpdate() {
+ TraceManager::update_stack_trace_stats(truncated, frames, reused_memento_frames);
+ }
+ };
+
+ assert(can_write(), "caller's responsibility");
+
+ if (!EnableEventTracingStackTraces) {
+ assert(preallocated_id == 0, "no preallocation allowed");
+ return (TraceTypes::stack_id) 0;
+ }
+
+ StatsUpdate stats;
+
+ bool have_memento = (t->memento_original_return_address() != NULL);
+ const CachedTraceStack *memento_stack = NULL;
+ if (have_memento) {
+ // we have patched a stack frame earlier and it has not returned yet
+ memento_stack = t->memento_stack_trace();
+ // NOTE: after metadata has been purged, memento stack trace is NULL (safe to ignore)
+ }
+
+ TraceStackBuilder sb;
+ bool new_memento = false;
+
+ RegisterMap rm(t, false);
+ frame fr = t->last_frame();
+ int index = -1;
+ while (!fr.is_first_frame() && !sb.is_full()) {
+ if ((fr.cb() != NULL && fr.cb()->is_nmethod()) || fr.is_interpreted_frame()) {
+ index++;
+ sb.add_frame(&fr);
+
+ if (memento_stack != NULL) {
+ // if the memento stack trace is truncated, we can't use it if we have fewer
+ // frames above the patched frame or we'll be missing frames at the bottom
+ if (!memento_stack->is_truncated() || index >= EventTracingStackMementoFrame) {
+ if (fr.is_memento_marked(t)) {
+ break;
+ }
+ }
+ } else if (!have_memento && index == EventTracingStackMementoFrame) {
+ fr.memento_mark(t);
+ new_memento = true;
+ }
+ }
+ fr = fr.sender(&rm);
+ }
+
+ if (!fr.is_first_frame() && sb.is_full()) {
+ sb.set_truncated();
+ }
+
+ CompositeTraceStack cps(sb);
+
+ if (memento_stack != NULL && !fr.is_first_frame()) {
+ // reached memento frame
+
+#ifdef ASSERT
+ // another stack walk for validation
+ TraceStackBuilder sb0;
+ RegisterMap rm0(t, false);
+ frame fr0 = t->last_frame();
+ while (!fr0.is_first_frame() && !sb0.is_full()) {
+ if (fr0.is_interpreted_frame() || fr0.is_compiled_frame() || fr0.is_native_frame()) {
+ sb0.add_frame(&fr0);
+ }
+ fr0 = fr0.sender(&rm0);
+ }
+ if (!fr0.is_first_frame()) {
+ assert(sb0.is_full(), "incomplete stack walk");
+ sb0.set_truncated();
+ }
+ CompositeTraceStack cps0(sb0);
+#endif
+
+ if (index == EventTracingStackMementoFrame && sb.range_equals(0, memento_stack, 0, index + 1)) {
+ // top frames are equal, exact match
+ assert(cps0.equals(memento_stack), "sanity");
+ stats.frames = memento_stack->count();
+ stats.reused_memento_frames = stats.frames - sb.count(); // still had to walk the top frames
+ stats.truncated = memento_stack->is_truncated();
+ return memento_stack->id();
+ }
+
+ // complete with frames from cache
+ cps.set_bottom(memento_stack, EventTracingStackMementoFrame + 1);
+ stats.reused_memento_frames = cps.count() - sb.count();
+
+ assert(cps.equals(cps0), "sanity");
+ }
+
+ stats.frames = cps.count();
+ stats.truncated = cps.is_truncated();
+
+ bool is_known;
+ const CachedTraceStack *cached = TraceManager::metadata()->get_or_try_add_stack(cps, is_known, preallocated_id);
+ TraceTypes::stack_id id;
+ if (cached != NULL) {
+ id = cached->id();
+ assert(is_known || preallocated_id == 0 || cached->id() == preallocated_id, "sanity");
+ } else {
+ // insert failed (full)
+ id = preallocated_id;
+ if (id == 0) {
+ // allocate a one-time id
+ // FIXME: wasteful, since get_or_try_add_stack() likely also allocated an id and threw it away
+ id = TraceManager::metadata()->next_stack_id();
+ }
+ }
+ if (!is_known) {
+ write_stack_metadata(id, cps);
+ } else if (preallocated_id != 0) {
+ write_identical_stacks_metadata(preallocated_id, cached->id());
+ }
+ if (new_memento) {
+ t->set_memento_stack_trace(cached);
+ }
+ return id;
+}
+
+void TraceEvents::write_stack_metadata(stack_id id, const CompositeTraceStack &ts) {
+ assert(can_write(), "caller's responsibility");
+
+ // TODO: with heavy inlining, stack traces can become so large
+ // that they no longer fit in a single trace buffer
+
+ size_t count = 0;
+
+ // make methods known
+ TraceStackVframeIterator it(ts);
+ while (it.has_next()) {
+ it.next();
+ retrieve_class_id_or_write_metadata(it.method()->method_holder());
+ retrieve_method_id_or_write_metadata(it.method());
+ count++;
+ }
+ it.reset();
+
+ TraceWriter wr(sizeof(event_type) + sizeof(stack_id) + sizeof(u1) + sizeof(u2) + count * (sizeof(method_id) + sizeof(method_bci)));
+ wr.put_event_type(event_stack_metadata);
+ wr.put_stack_id(id);
+ wr.put_u1(ts.is_truncated() ? 0 : 1);
+ assert(count == (u2) count, "must fit");
+ wr.put_u2(count);
+ while (it.has_next()) {
+ it.next();
+ wr.put_method_id(method_id_for(it.method()));
+ wr.put_method_bci(it.bci());
+ }
+}
+
+void TraceEvents::write_identical_stacks_metadata(stack_id id, stack_id known) {
+ assert(can_write(), "caller's responsibility");
+
+ TraceWriter wr(sizeof(event_type) + sizeof(stack_id) + sizeof(stack_id));
+ wr.put_event_type(event_identical_stacks_metadata);
+ wr.put_stack_id(id);
+ wr.put_stack_id(known);
+}
+
+void TraceEvents::write_class_loader_unload(ClassLoaderData* cld) {
+ if(!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(classloader_id));
+ wr.put_event_type(event_class_loader_unload);
+ wr.put_classloader_id(classloader_id_for(cld));
+}
+
+void TraceEvents::write_monitor_contended_enter(ObjectMonitor *m, monitor_enter_wait wait) {
+ if (!can_write())
+ return;
+
+ stack_id stid = 0;
+ if (JavaThread::current()->has_last_Java_frame()) {
+ stid = retrieve_stack_id_or_write_metadata(JavaThread::current());
+ }
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(stack_id) + sizeof(u1));
+ wr.put_event_type(event_monitor_contended_enter);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(m->next_trace_seq());
+ wr.put_objmonitor_id(objmonitor_id_for(m));
+ wr.put_stack_id(stid);
+ wr.put_u1(wait);
+}
+
+void TraceEvents::write_monitor_contended_entered(ObjectMonitor *m, monitor_entered_flags flags) {
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(u1));
+ wr.put_event_type(event_monitor_contended_entered);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(m->next_trace_seq());
+ wr.put_objmonitor_id(objmonitor_id_for(m));
+ wr.put_u1(flags);
+}
+
+void TraceEvents::write_monitor_contended_exited(ObjectMonitor *m, seq_num seq, stack_id preallocated_stack_id, bool resolve_stack) {
+ if (!can_write())
+ return;
+
+ TraceTypes::stack_id stid = preallocated_stack_id;
+ if (resolve_stack) {
+ if (JavaThread::current()->has_last_Java_frame()) {
+ stid = retrieve_stack_id_or_write_metadata(JavaThread::current(), preallocated_stack_id);
+ } else if (preallocated_stack_id != 0) {
+ // we preallocated a stack id, so we must resolve it to the unknown stack
+ write_identical_stacks_metadata(preallocated_stack_id, 0);
+ }
+ }
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(objmonitor_id) + sizeof(stack_id));
+ wr.put_event_type(event_monitor_contended_exited);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(seq);
+ wr.put_objmonitor_id(objmonitor_id_for(m));
+ wr.put_stack_id(stid);
+}
+
+void TraceEvents::write_monitor_dummy(ObjectMonitor *m, seq_num seq) {
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(seq_num) + sizeof(objmonitor_id));
+ wr.put_event_type(event_monitor_dummy);
+ wr.put_seq_num(seq);
+ wr.put_objmonitor_id(objmonitor_id_for(m));
+}
+
+void TraceEvents::write_safepoint_begin(safepoint_reason reason) {
+ assert(reason > _safepoint_reason_min && reason < _safepoint_reason_max, "invalid reason");
+
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(u1));
+ wr.put_event_type(event_safepoint_begin);
+ wr.put_timestamp(time_now());
+ wr.put_u1((u1) reason);
+}
+
+void TraceEvents::write_safepoint_end(u4 vmops_processed) {
+ if (!can_write())
+ return;
+
+ if (vmops_processed > UINT8_MAX)
+ vmops_processed = UINT8_MAX;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(u1));
+ wr.put_event_type(event_safepoint_end);
+ wr.put_timestamp(time_now());
+ wr.put_u1((u1) vmops_processed);
+}
+
+void TraceEvents::write_vm_end() {
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp));
+ wr.put_event_type(event_vm_end);
+ wr.put_timestamp(time_now());
+}
+
+void TraceEvents::write_metadata_reset() {
+ if (!can_write())
+ return;
+
+ TraceWriter wr(sizeof(event_type));
+ wr.put_event_type(event_metadata_reset);
+}
+
+void TraceEvents::write_group(JavaThread* t, seq_num park_global_seq_begin_ref, oop source) {
+ assert(t != NULL, "null thread");
+ assert(source != NULL, "null source");
+
+ if (!can_write())
+ return;
+
+ Klass* klass = source->klass();
+ object_id oid = object_id_for(source);
+ source = NULL; // source oop may not be valid anymore after object_id_for()
+ assert(oid != 0, "just checking");
+
+ // recheck necessary, because object_id_for() can cause a safepoint check
+ if (!can_write())
+ return;
+
+ class_id cid = retrieve_class_id_or_write_metadata(klass);
+ assert(cid != 0, "just checking");
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + sizeof(seq_num) + sizeof(seq_num) + sizeof(object_id) + sizeof(class_id));
+ wr.put_event_type(event_group);
+ wr.put_timestamp(time_now());
+ wr.put_seq_num(TraceManager::metadata()->next_global_seq());
+ wr.put_seq_num(park_global_seq_begin_ref);
+ wr.put_object_id(oid);
+ wr.put_class_id(cid);
+}
+
+void TraceEvents::write_marker(const char *label) {
+ if (!can_write())
+ return;
+
+ const size_t label_nbytes = TraceWriter::nbytes_for_utf8str(label, 64);
+
+ TraceWriter wr(sizeof(event_type) + sizeof(timestamp) + label_nbytes);
+ wr.put_event_type(event_marker);
+ wr.put_timestamp(time_now());
+ wr.put_utf8str(label, label_nbytes);
+}
+
+void TraceEventThreadParkEnd::do_write() {
+ TraceEvents::write_thread_park_end(_thread, _seq, _unpark_seq, _return_code);
+}
+
+void TraceEventThreadUnpark::do_write() {
+ assert(_chained_seq != -1, "chained_seq not set");
+ TraceEvents::write_thread_unpark(_thread_id, _seq, _chained_seq);
+}