1 /*
   2  * Copyright (c) 2014, 2015, Dynatrace and/or its affiliates. All rights reserved.
   3  *
   4  * This file is part of the Lock Contention Tracing Subsystem for the HotSpot
   5  * Virtual Machine, which is developed at Christian Doppler Laboratory on
   6  * Monitoring and Evolution of Very-Large-Scale Software Systems. Please
   7  * contact us at <http://mevss.jku.at/> if you need additional information
   8  * or have any questions.
   9  *
  10  * This code is free software; you can redistribute it and/or modify it
  11  * under the terms of the GNU General Public License version 2 only, as
  12  * published by the Free Software Foundation.
  13  *
  14  * This code is distributed in the hope that it will be useful, but WITHOUT
  15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  17  * version 2 for more details (a copy is included in the LICENSE file that
  18  * accompanied this code).
  19  *
  20  * You should have received a copy of the GNU General Public License version
  21  * 2 along with this work. If not, see <http://www.gnu.org/licenses/>.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_EVTRACE_TRACEMANAGER_INLINE_HPP
  26 #define SHARE_VM_EVTRACE_TRACEMANAGER_INLINE_HPP
  27 
  28 #include "evtrace/traceBuffer.hpp"
  29 #include "evtrace/traceBufferQueue.hpp"
  30 #include "evtrace/traceMetadata.hpp"
  31 
  32 inline bool TraceManager::is_initialized() {
  33   return _is_initialized;
  34 }
  35 
  36 inline void TraceManager::assert_initialized() {
  37   assert(_is_initialized, "not initialized");
  38   assert(_free_queue != NULL || !EnableEventTracingBufferReuse, "freeQueue not initialized");
  39   assert(_flush_queue != NULL, "flushQueue not initialized");
  40 }
  41 
  42 inline TraceBuffer *TraceManager::allocate_buffer() {
  43   // tradeoff: small buffer sizes reduce the time it takes to process a
  44   // buffer and make it available again, but cause more queue operations
  45   // (and possible contentions). large buffers are wasteful for short-lived
  46   // threads, but cause less queue contention and reclaim operations
  47   // waiting for buffers to be processed can take significantly longer.
  48   size_t capacity = EventTracingBufferCapacity;
  49   if (EnableEventTracingRandomizedBufferCapacity) {
  50     // use random buffer sizes to avoid that threads which do similar work
  51     // submit and request buffers all at once
  52     capacity = round_to(capacity / 2 + ((size_t) os::random()) % capacity, 128);
  53   }
  54   TraceBuffer *buffer = new (capacity) TraceBuffer(capacity);
  55   if (EnableEventTracingDiagnostics) {
  56     Atomic::inc_ptr(&_allocated_buffer_count);
  57     jlong count = Atomic::add_ptr(1, &_buffer_count);
  58     jlong max;
  59     do {
  60       max = _max_buffer_count;
  61     } while (count > max && Atomic::cmpxchg(count, &_max_buffer_count, max) != max);
  62   }
  63   return buffer;
  64 }
  65 
  66 inline void TraceManager::free_buffer(TraceBuffer *buffer) {
  67   assert(buffer != NULL, "sanity");
  68   delete buffer;
  69 
  70   if (EnableEventTracingDiagnostics) {
  71     Atomic::dec_ptr(&_buffer_count);
  72   }
  73 }
  74 
  75 inline TraceBuffer *TraceManager::request_buffer() {
  76   assert_initialized();
  77 
  78   TraceBuffer *buffer = NULL;
  79   if (EnableEventTracingBufferReuse) {
  80     buffer = _free_queue->try_dequeue();
  81   }
  82   if (buffer == NULL) {
  83     buffer = allocate_buffer();
  84   }
  85   return buffer;
  86 }
  87 
  88 inline void TraceManager::pre_submit_buffer(TraceBuffer *buffer) {
  89   assert(buffer != NULL, "no buffer given");
  90   assert(buffer->owner_id == 0, "must not be set at this point");
  91   buffer->owner_id = _metadata->thread_id(buffer->owner);
  92   buffer->owner = NULL;
  93 }
  94 
  95 inline void TraceManager::submit_buffer(TraceBuffer *buffer) {
  96   assert_initialized();
  97   assert(buffer != NULL, "buffer is NULL");
  98 
  99   TraceManager::pre_submit_buffer(buffer);
 100   size_t bytes = buffer->filled_size();
 101   _flush_queue->enqueue(buffer);
 102 
 103   if (EnableEventTracingDiagnostics) {
 104     Atomic::add(bytes, &_submitted_trace_bytes);
 105   }
 106 }
 107 
 108 inline TraceMetadata *TraceManager::metadata() {
 109   assert_initialized();
 110   return _metadata;
 111 }
 112 
 113 inline void TraceManager::update_stack_trace_stats(bool truncated, intptr_t total_frames, intptr_t memento_frames) {
 114   assert_initialized();
 115   if (EnableEventTracingDiagnostics) {
 116     assert(total_frames > 0 && memento_frames >= 0, "sanity");
 117     Atomic::inc_ptr(&_total_stack_traces);
 118     if (truncated) {
 119       Atomic::inc_ptr(&_truncated_stack_traces);
 120     }
 121     Atomic::add_ptr(total_frames, &_total_stack_frames);
 122     if (memento_frames != 0) {
 123       assert(memento_frames < total_frames, "sanity");
 124       Atomic::inc_ptr(&_reused_memento_stack_traces);
 125       Atomic::add_ptr(memento_frames, &_reused_memento_stack_frames);
 126     }
 127   }
 128 }
 129 
 130 #endif /* SHARE_VM_EVTRACE_TRACEMANAGER_INLINE_HPP */