--- /dev/null 2016-10-25 08:46:44.038854975 +0200
+++ new/src/share/vm/evtrace/traceManager.inline.hpp 2016-10-25 10:40:14.841781274 +0200
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2014, 2015, Dynatrace and/or its affiliates. All rights reserved.
+ *
+ * This file is part of the Lock Contention Tracing Subsystem for the HotSpot
+ * Virtual Machine, which is developed at Christian Doppler Laboratory on
+ * Monitoring and Evolution of Very-Large-Scale Software Systems. Please
+ * contact us at if you need additional information
+ * or have any questions.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work. If not, see .
+ *
+ */
+
+#ifndef SHARE_VM_EVTRACE_TRACEMANAGER_INLINE_HPP
+#define SHARE_VM_EVTRACE_TRACEMANAGER_INLINE_HPP
+
+#include "evtrace/traceBuffer.hpp"
+#include "evtrace/traceBufferQueue.hpp"
+#include "evtrace/traceMetadata.hpp"
+
+inline bool TraceManager::is_initialized() {
+ return _is_initialized;
+}
+
+inline void TraceManager::assert_initialized() {
+ assert(_is_initialized, "not initialized");
+ assert(_free_queue != NULL || !EnableEventTracingBufferReuse, "freeQueue not initialized");
+ assert(_flush_queue != NULL, "flushQueue not initialized");
+}
+
+inline TraceBuffer *TraceManager::allocate_buffer() {
+ // tradeoff: small buffer sizes reduce the time it takes to process a
+ // buffer and make it available again, but cause more queue operations
+ // (and possible contentions). large buffers are wasteful for short-lived
+ // threads, but cause less queue contention and reclaim operations
+ // waiting for buffers to be processed can take significantly longer.
+ size_t capacity = EventTracingBufferCapacity;
+ if (EnableEventTracingRandomizedBufferCapacity) {
+ // use random buffer sizes to avoid that threads which do similar work
+ // submit and request buffers all at once
+ capacity = round_to(capacity / 2 + ((size_t) os::random()) % capacity, 128);
+ }
+ TraceBuffer *buffer = new (capacity) TraceBuffer(capacity);
+ if (EnableEventTracingDiagnostics) {
+ Atomic::inc_ptr(&_allocated_buffer_count);
+ jlong count = Atomic::add_ptr(1, &_buffer_count);
+ jlong max;
+ do {
+ max = _max_buffer_count;
+ } while (count > max && Atomic::cmpxchg(count, &_max_buffer_count, max) != max);
+ }
+ return buffer;
+}
+
+inline void TraceManager::free_buffer(TraceBuffer *buffer) {
+ assert(buffer != NULL, "sanity");
+ delete buffer;
+
+ if (EnableEventTracingDiagnostics) {
+ Atomic::dec_ptr(&_buffer_count);
+ }
+}
+
+inline TraceBuffer *TraceManager::request_buffer() {
+ assert_initialized();
+
+ TraceBuffer *buffer = NULL;
+ if (EnableEventTracingBufferReuse) {
+ buffer = _free_queue->try_dequeue();
+ }
+ if (buffer == NULL) {
+ buffer = allocate_buffer();
+ }
+ return buffer;
+}
+
+inline void TraceManager::pre_submit_buffer(TraceBuffer *buffer) {
+ assert(buffer != NULL, "no buffer given");
+ assert(buffer->owner_id == 0, "must not be set at this point");
+ buffer->owner_id = _metadata->thread_id(buffer->owner);
+ buffer->owner = NULL;
+}
+
+inline void TraceManager::submit_buffer(TraceBuffer *buffer) {
+ assert_initialized();
+ assert(buffer != NULL, "buffer is NULL");
+
+ TraceManager::pre_submit_buffer(buffer);
+ size_t bytes = buffer->filled_size();
+ _flush_queue->enqueue(buffer);
+
+ if (EnableEventTracingDiagnostics) {
+ Atomic::add(bytes, &_submitted_trace_bytes);
+ }
+}
+
+inline TraceMetadata *TraceManager::metadata() {
+ assert_initialized();
+ return _metadata;
+}
+
+inline void TraceManager::update_stack_trace_stats(bool truncated, intptr_t total_frames, intptr_t memento_frames) {
+ assert_initialized();
+ if (EnableEventTracingDiagnostics) {
+ assert(total_frames > 0 && memento_frames >= 0, "sanity");
+ Atomic::inc_ptr(&_total_stack_traces);
+ if (truncated) {
+ Atomic::inc_ptr(&_truncated_stack_traces);
+ }
+ Atomic::add_ptr(total_frames, &_total_stack_frames);
+ if (memento_frames != 0) {
+ assert(memento_frames < total_frames, "sanity");
+ Atomic::inc_ptr(&_reused_memento_stack_traces);
+ Atomic::add_ptr(memento_frames, &_reused_memento_stack_frames);
+ }
+ }
+}
+
+#endif /* SHARE_VM_EVTRACE_TRACEMANAGER_INLINE_HPP */