1 /*
   2  * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "gc_interface/collectedHeap.hpp"
  26 #include "jfr/leakprofiler/utilities/granularTimer.hpp"
  27 #include "jfr/leakprofiler/chains/rootSetClosure.hpp"
  28 #include "jfr/leakprofiler/chains/edge.hpp"
  29 #include "jfr/leakprofiler/chains/edgeQueue.hpp"
  30 #include "jfr/leakprofiler/chains/edgeStore.hpp"
  31 #include "jfr/leakprofiler/chains/bitset.hpp"
  32 #include "jfr/leakprofiler/sampling/objectSample.hpp"
  33 #include "jfr/leakprofiler/leakProfiler.hpp"
  34 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
  35 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
  36 #include "jfr/leakprofiler/emitEventOperation.hpp"
  37 #include "jfr/leakprofiler/chains/bfsClosure.hpp"
  38 #include "jfr/leakprofiler/chains/dfsClosure.hpp"
  39 #include "jfr/leakprofiler/chains/objectSampleMarker.hpp"
  40 #include "jfr/recorder/access/jfrbackend.hpp"
  41 #include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
  42 //#include "logging/log.hpp"
  43 #include "memory/resourceArea.hpp"
  44 #include "memory/universe.hpp"
  45 #include "oops/markOop.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "runtime/safepoint.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "trace/tracing.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 
  52 /* The EdgeQueue is backed by directly managed virtual memory.
  53  * We will attempt to dimension an initial reservation
  54  * in proportion to the size of the heap (represented by heap_region).
  55  * Initial memory reservation: 5% of the heap OR at least 32 Mb
  56  * Commit ratio: 1 : 10 (subject to allocation granularties)
  57  */
  58 static size_t edge_queue_memory_reservation(const MemRegion& heap_region) {
  59   const size_t memory_reservation_bytes = MAX2(heap_region.byte_size() / 20, 32*M);
  60   assert(memory_reservation_bytes >= (size_t)32*M, "invariant");
  61   return memory_reservation_bytes;
  62 }
  63 
  64 static size_t edge_queue_memory_commit_size(size_t memory_reservation_bytes) {
  65   const size_t memory_commit_block_size_bytes = memory_reservation_bytes / 10;
  66   assert(memory_commit_block_size_bytes >= (size_t)3*M, "invariant");
  67   return memory_commit_block_size_bytes;
  68 }
  69 
  70 static void log_edge_queue_summary(const EdgeQueue& edge_queue) {
  71   log_trace(jfr, system)("EdgeQueue reserved size total: " SIZE_FORMAT " [KB]", edge_queue.reserved_size() / K);
  72   log_trace(jfr, system)("EdgeQueue edges total: " SIZE_FORMAT, edge_queue.top());
  73   log_trace(jfr, system)("EdgeQueue liveset total: " SIZE_FORMAT " [KB]", edge_queue.live_set() / K);
  74   if (edge_queue.reserved_size() > 0) {
  75     log_trace(jfr, system)("EdgeQueue commit reserve ratio: %f\n",
  76       ((double)edge_queue.live_set() / (double)edge_queue.reserved_size()));
  77   }
  78 }
  79 
  80 void EmitEventOperation::doit() {
  81   assert(LeakProfiler::is_running(), "invariant");
  82   _object_sampler = LeakProfiler::object_sampler();
  83   assert(_object_sampler != NULL, "invariant");
  84 
  85   _vm_thread = VMThread::vm_thread();
  86   assert(_vm_thread == Thread::current(), "invariant");
  87   _vm_thread_data = _vm_thread->trace_data();
  88   assert(_vm_thread_data != NULL, "invariant");
  89   assert(_vm_thread->trace_data()->thread_id() == THREAD_TRACE_ID(_vm_thread), "invariant");
  90 
  91   // The VM_Operation::evaluate() which invoked doit()
  92   // contains a top level ResourceMark
  93 
  94   // save the original markWord for the potential leak objects
  95   // to be restored on function exit
  96   ObjectSampleMarker marker;
  97   if (ObjectSampleCheckpoint::mark(marker, _emit_all) == 0) {
  98     return;
  99   }
 100 
 101   EdgeStore edge_store;
 102 
 103   GranularTimer::start(_cutoff_ticks, 1000000);
 104   if (_cutoff_ticks <= 0) {
 105     // no chains
 106     write_events(&edge_store);
 107     return;
 108   }
 109 
 110   assert(_cutoff_ticks > 0, "invariant");
 111 
 112   // The bitset used for marking is dimensioned as a function of the heap size
 113   const MemRegion heap_region = Universe::heap()->reserved_region();
 114   BitSet mark_bits(heap_region);
 115 
 116   // The edge queue is dimensioned as a fraction of the heap size
 117   const size_t edge_queue_reservation_size = edge_queue_memory_reservation(heap_region);
 118   EdgeQueue edge_queue(edge_queue_reservation_size, edge_queue_memory_commit_size(edge_queue_reservation_size));
 119 
 120   // The initialize() routines will attempt to reserve and allocate backing storage memory.
 121   // Failure to accommodate will render root chain processing impossible.
 122   // As a fallback on failure, just write out the existing samples, flat, without chains.
 123   if (!(mark_bits.initialize() && edge_queue.initialize())) {
 124     log_warning(jfr)("Unable to allocate memory for root chain processing");
 125     write_events(&edge_store);
 126     return;
 127   }
 128 
 129   // necessary condition for attempting a root set iteration
 130   Universe::heap()->ensure_parsability(false);
 131 
 132   RootSetClosure::add_to_queue(&edge_queue);
 133   if (edge_queue.is_full()) {
 134     // Pathological case where roots don't fit in queue
 135     // Do a depth-first search, but mark roots first
 136     // to avoid walking sideways over roots
 137     DFSClosure::find_leaks_from_root_set(&edge_store, &mark_bits);
 138   } else {
 139     BFSClosure bfs(&edge_queue, &edge_store, &mark_bits);
 140     bfs.process();
 141   }
 142   GranularTimer::stop();
 143   write_events(&edge_store);
 144   log_edge_queue_summary(edge_queue);
 145 }
 146 
 147 int EmitEventOperation::write_events(EdgeStore* edge_store) {
 148   assert(_object_sampler != NULL, "invariant");
 149   assert(edge_store != NULL, "invariant");
 150   assert(_vm_thread != NULL, "invariant");
 151   assert(_vm_thread_data != NULL, "invariant");
 152   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
 153 
 154   // save thread id in preparation for thread local trace data manipulations
 155   const traceid vmthread_id = _vm_thread_data->thread_id();
 156   assert(_vm_thread_data->thread_id() == THREAD_TRACE_ID(_vm_thread), "invariant");
 157 
 158   const jlong last_sweep = _emit_all ? max_jlong : _object_sampler->last_sweep().value();
 159   int count = 0;
 160 
 161   for (int i = 0; i < _object_sampler->item_count(); ++i) {
 162     const ObjectSample* sample = _object_sampler->item_at(i);
 163     if (sample->is_alive_and_older_than(last_sweep)) {
 164       write_event(sample, edge_store);
 165       ++count;
 166     }
 167   }
 168 
 169   // restore thread local stack trace and thread id
 170   _vm_thread_data->set_thread_id(vmthread_id);
 171   _vm_thread_data->clear_cached_stack_trace();
 172   assert(_vm_thread_data->thread_id() == THREAD_TRACE_ID(_vm_thread), "invariant");
 173 
 174   if (count > 0) {
 175     // serialize assoicated checkpoints
 176     ObjectSampleCheckpoint::write(edge_store, _emit_all, _vm_thread);
 177   }
 178   return count;
 179 }
 180 
 181 static int array_size(const oop object) {
 182   assert(object != NULL, "invariant");
 183   if (object->is_array()) {
 184     return arrayOop(object)->length();
 185   }
 186   return -1;
 187 }
 188 
 189 void EmitEventOperation::write_event(const ObjectSample* sample, EdgeStore* edge_store) {
 190   assert(sample != NULL, "invariant");
 191   assert(!sample->is_dead(), "invariant");
 192   assert(edge_store != NULL, "invariant");
 193   assert(_vm_thread_data != NULL, "invariant");
 194   const oop* object_addr = sample->object_addr();
 195   assert(*object_addr != NULL, "invariant");
 196 
 197   const Edge* edge = (const Edge*)(*object_addr)->mark();
 198   traceid gc_root_id = 0;
 199   if (edge == NULL) {
 200     // In order to dump out a representation of the event
 201     // even though it was not reachable / too long to reach,
 202     // we need to register a top level edge for this object
 203     Edge e(NULL, object_addr);
 204     edge_store->add_chain(&e, 1);
 205     edge = (const Edge*)(*object_addr)->mark();
 206   } else {
 207     gc_root_id = edge_store->get_root_id(edge);
 208   }
 209 
 210   assert(edge != NULL, "invariant");
 211   assert(edge->pointee() == *object_addr, "invariant");
 212   const traceid object_id = edge_store->get_id(edge);
 213   assert(object_id != 0, "invariant");
 214 
 215   EventOldObjectSample e(UNTIMED);
 216   e.set_starttime(GranularTimer::start_time());
 217   e.set_endtime(GranularTimer::end_time());
 218   e.set_allocationTime(sample->allocation_time());
 219   e.set_object(object_id);
 220   e.set_arrayElements(array_size(*object_addr));
 221   e.set_root(gc_root_id);
 222 
 223   // Temporarily assigning both the stack trace id and thread id
 224   // onto the thread local data structure of the VMThread (for the duration
 225   // of the commit() call). This trick provides a means to override
 226   // the event generation mechanism by injecting externally provided id's.
 227   // Here, in particular, this allows us to emit an old object event
 228   // supplying information from where the actual sampling occurred.
 229   _vm_thread_data->set_cached_stack_trace_id(sample->stack_trace_id());
 230   assert(sample->has_thread(), "invariant");
 231   _vm_thread_data->set_thread_id(sample->thread_id());
 232   e.commit();
 233 }