1 /*
   2  * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "runtime/mutex.hpp"
  27 #include "services/memBaseline.hpp"
  28 #include "services/memReporter.hpp"
  29 #include "services/mallocTracker.inline.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "utilities/defaultStream.hpp"
  32 
  33 #ifdef SOLARIS
  34   volatile bool NMT_stack_walkable = false;
  35 #else
  36   volatile bool NMT_stack_walkable = true;
  37 #endif
  38 
  39 volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
  40 NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
  41 
  42 MemBaseline MemTracker::_baseline;
  43 Mutex*      MemTracker::_query_lock = NULL;
  44 bool MemTracker::_is_nmt_env_valid = true;
  45 
  46 
  47 NMT_TrackingLevel MemTracker::init_tracking_level() {
  48   NMT_TrackingLevel level = NMT_off;
  49   char buf[64];
  50   jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
  51   const char *nmt_option = ::getenv(buf);
  52   if (nmt_option != NULL) {
  53     if (strcmp(nmt_option, "summary") == 0) {
  54       level = NMT_summary;
  55     } else if (strcmp(nmt_option, "detail") == 0) {
  56       level = NMT_detail;
  57     } else if (strcmp(nmt_option, "off") != 0) {
  58       // The option value is invalid
  59       _is_nmt_env_valid = false;
  60     }
  61 
  62     // Remove the environment variable to avoid leaking to child processes
  63     os::unsetenv(buf);
  64   }
  65 
  66   // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice,
  67   // but it is benign, the results are the same.
  68   ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false);
  69 
  70   if (!MallocTracker::initialize(level) ||
  71       !VirtualMemoryTracker::initialize(level)) {
  72     level = NMT_off;
  73   }
  74   return level;
  75 }
  76 
  77 void MemTracker::init() {
  78   NMT_TrackingLevel level = tracking_level();
  79   if (level >= NMT_summary) {
  80     if (!VirtualMemoryTracker::late_initialize(level)) {
  81       shutdown();
  82       return;
  83     }
  84     _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
  85     // Already OOM. It is unlikely, but still have to handle it.
  86     if (_query_lock == NULL) {
  87       shutdown();
  88     }
  89   }
  90 }
  91 
  92 bool MemTracker::check_launcher_nmt_support(const char* value) {
  93   if (strcmp(value, "=detail") == 0) {
  94     if (MemTracker::tracking_level() != NMT_detail) {
  95       return false;
  96     }
  97   } else if (strcmp(value, "=summary") == 0) {
  98     if (MemTracker::tracking_level() != NMT_summary) {
  99       return false;
 100     }
 101   } else if (strcmp(value, "=off") == 0) {
 102     if (MemTracker::tracking_level() != NMT_off) {
 103       return false;
 104     }
 105   } else {
 106     _is_nmt_env_valid = false;
 107   }
 108 
 109   return true;
 110 }
 111 
 112 bool MemTracker::verify_nmt_option() {
 113   return _is_nmt_env_valid;
 114 }
 115 
 116 void* MemTracker::malloc_base(void* memblock) {
 117   return MallocTracker::get_base(memblock);
 118 }
 119 
 120 void Tracker::record(address addr, size_t size) {
 121   if (MemTracker::tracking_level() < NMT_summary) return;
 122   switch(_type) {
 123     case uncommit:
 124       VirtualMemoryTracker::remove_uncommitted_region(addr, size);
 125       break;
 126     case release:
 127       VirtualMemoryTracker::remove_released_region(addr, size);
 128         break;
 129     default:
 130       ShouldNotReachHere();
 131   }
 132 }
 133 
 134 
 135 // Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
 136 void MemTracker::shutdown() {
 137   // We can only shutdown NMT to minimal tracking level if it is ever on.
 138   if (tracking_level () > NMT_minimal) {
 139     transition_to(NMT_minimal);
 140   }
 141 }
 142 
 143 bool MemTracker::transition_to(NMT_TrackingLevel level) {
 144   NMT_TrackingLevel current_level = tracking_level();
 145 
 146   assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
 147 
 148   if (current_level == level) {
 149     return true;
 150   } else if (current_level > level) {
 151     // Downgrade tracking level, we want to lower the tracking level first
 152     _tracking_level = level;
 153     // Make _tracking_level visible immediately.
 154     OrderAccess::fence();
 155     VirtualMemoryTracker::transition(current_level, level);
 156     MallocTracker::transition(current_level, level);
 157   } else {
 158     // Upgrading tracking level is not supported and has never been supported.
 159     // Allocating and deallocating malloc tracking structures is not thread safe and
 160     // leads to inconsistencies unless a lot coarser locks are added.
 161   }
 162   return true;
 163 }
 164 
 165 void MemTracker::report(bool summary_only, outputStream* output) {
 166  assert(output != NULL, "No output stream");
 167   MemBaseline baseline;
 168   if (baseline.baseline(summary_only)) {
 169     if (summary_only) {
 170       MemSummaryReporter rpt(baseline, output);
 171       rpt.report();
 172     } else {
 173       MemDetailReporter rpt(baseline, output);
 174       rpt.report();
 175     }
 176   }
 177 }
 178 
 179 // This is a walker to gather malloc site hashtable statistics,
 180 // the result is used for tuning.
 181 class StatisticsWalker : public MallocSiteWalker {
 182  private:
 183   enum Threshold {
 184     // aggregates statistics over this threshold into one
 185     // line item.
 186     report_threshold = 20
 187   };
 188 
 189  private:
 190   // Number of allocation sites that have all memory freed
 191   int   _empty_entries;
 192   // Total number of allocation sites, include empty sites
 193   int   _total_entries;
 194   // Number of captured call stack distribution
 195   int   _stack_depth_distribution[NMT_TrackingStackDepth];
 196   // Hash distribution
 197   int   _hash_distribution[report_threshold];
 198   // Number of hash buckets that have entries over the threshold
 199   int   _bucket_over_threshold;
 200 
 201   // The hash bucket that walker is currently walking
 202   int   _current_hash_bucket;
 203   // The length of current hash bucket
 204   int   _current_bucket_length;
 205   // Number of hash buckets that are not empty
 206   int   _used_buckets;
 207   // Longest hash bucket length
 208   int   _longest_bucket_length;
 209 
 210  public:
 211   StatisticsWalker() : _empty_entries(0), _total_entries(0) {
 212     int index = 0;
 213     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 214       _stack_depth_distribution[index] = 0;
 215     }
 216     for (index = 0; index < report_threshold; index ++) {
 217       _hash_distribution[index] = 0;
 218     }
 219     _bucket_over_threshold = 0;
 220     _longest_bucket_length = 0;
 221     _current_hash_bucket = -1;
 222     _current_bucket_length = 0;
 223     _used_buckets = 0;
 224   }
 225 
 226   virtual bool do_malloc_site(const MallocSite* e) {
 227     if (e->size() == 0) _empty_entries ++;
 228     _total_entries ++;
 229 
 230     // stack depth distrubution
 231     int frames = e->call_stack()->frames();
 232     _stack_depth_distribution[frames - 1] ++;
 233 
 234     // hash distribution
 235     int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
 236     if (_current_hash_bucket == -1) {
 237       _current_hash_bucket = hash_bucket;
 238       _current_bucket_length = 1;
 239     } else if (_current_hash_bucket == hash_bucket) {
 240       _current_bucket_length ++;
 241     } else {
 242       record_bucket_length(_current_bucket_length);
 243       _current_hash_bucket = hash_bucket;
 244       _current_bucket_length = 1;
 245     }
 246     return true;
 247   }
 248 
 249   // walk completed
 250   void completed() {
 251     record_bucket_length(_current_bucket_length);
 252   }
 253 
 254   void report_statistics(outputStream* out) {
 255     int index;
 256     out->print_cr("Malloc allocation site table:");
 257     out->print_cr("\tTotal entries: %d", _total_entries);
 258     out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
 259     out->print_cr(" ");
 260     out->print_cr("Hash distribution:");
 261     if (_used_buckets < MallocSiteTable::hash_buckets()) {
 262       out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
 263     }
 264     for (index = 0; index < report_threshold; index ++) {
 265       if (_hash_distribution[index] != 0) {
 266         if (index == 0) {
 267           out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
 268         } else if (index < 9) { // single digit
 269           out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
 270         } else {
 271           out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
 272         }
 273       }
 274     }
 275     if (_bucket_over_threshold > 0) {
 276       out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
 277     }
 278     out->print_cr("most entries: %d", _longest_bucket_length);
 279     out->print_cr(" ");
 280     out->print_cr("Call stack depth distribution:");
 281     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 282       if (_stack_depth_distribution[index] > 0) {
 283         out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
 284       }
 285     }
 286   }
 287 
 288  private:
 289   void record_bucket_length(int length) {
 290     _used_buckets ++;
 291     if (length <= report_threshold) {
 292       _hash_distribution[length - 1] ++;
 293     } else {
 294       _bucket_over_threshold ++;
 295     }
 296     _longest_bucket_length = MAX2(_longest_bucket_length, length);
 297   }
 298 };
 299 
 300 
 301 void MemTracker::tuning_statistics(outputStream* out) {
 302   // NMT statistics
 303   StatisticsWalker walker;
 304   MallocSiteTable::walk_malloc_site(&walker);
 305   walker.completed();
 306 
 307   out->print_cr("Native Memory Tracking Statistics:");
 308   out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
 309   out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
 310   NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
 311   out->print_cr(" ");
 312   walker.report_statistics(out);
 313 }