1 /*
   2  * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "jvm.h"
  26 
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/orderAccess.hpp"
  29 #include "runtime/vmThread.hpp"
  30 #include "runtime/vmOperations.hpp"
  31 #include "services/memBaseline.hpp"
  32 #include "services/memReporter.hpp"
  33 #include "services/mallocTracker.inline.hpp"
  34 #include "services/memTracker.hpp"
  35 #include "services/threadStackTracker.hpp"
  36 #include "utilities/debug.hpp"
  37 #include "utilities/defaultStream.hpp"
  38 #include "utilities/vmError.hpp"
  39 
  40 #ifdef _WINDOWS
  41 #include <windows.h>
  42 #endif
  43 
  44 volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
  45 NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
  46 
  47 MemBaseline MemTracker::_baseline;
  48 bool MemTracker::_is_nmt_env_valid = true;
  49 
  50 static const size_t buffer_size = 64;
  51 
  52 NMT_TrackingLevel MemTracker::init_tracking_level() {
  53   // Memory type is encoded into tracking header as a byte field,
  54   // make sure that we don't overflow it.
  55   STATIC_ASSERT(mt_number_of_types <= max_jubyte);
  56 
  57   char nmt_env_variable[buffer_size];
  58   jio_snprintf(nmt_env_variable, sizeof(nmt_env_variable), "NMT_LEVEL_%d", os::current_process_id());
  59   const char* nmt_env_value;
  60 #ifdef _WINDOWS
  61   // Read the NMT environment variable from the PEB instead of the CRT
  62   char value[buffer_size];
  63   nmt_env_value = GetEnvironmentVariable(nmt_env_variable, value, (DWORD)sizeof(value)) != 0 ? value : NULL;
  64 #else
  65   nmt_env_value = ::getenv(nmt_env_variable);
  66 #endif
  67   NMT_TrackingLevel level = NMT_off;
  68   if (nmt_env_value != NULL) {
  69     if (strcmp(nmt_env_value, "summary") == 0) {
  70       level = NMT_summary;
  71     } else if (strcmp(nmt_env_value, "detail") == 0) {
  72       level = NMT_detail;
  73     } else if (strcmp(nmt_env_value, "off") != 0) {
  74       // The value of the environment variable is invalid
  75       _is_nmt_env_valid = false;
  76     }
  77     // Remove the environment variable to avoid leaking to child processes
  78     os::unsetenv(nmt_env_variable);
  79   }
  80 
  81   if (!MallocTracker::initialize(level) ||
  82       !VirtualMemoryTracker::initialize(level)) {
  83     level = NMT_off;
  84   }
  85   return level;
  86 }
  87 
  88 void MemTracker::init() {
  89   NMT_TrackingLevel level = tracking_level();
  90   if (level >= NMT_summary) {
  91     if (!VirtualMemoryTracker::late_initialize(level) ||
  92         !ThreadStackTracker::late_initialize(level)) {
  93       shutdown();
  94       return;
  95     }
  96   }
  97 }
  98 
  99 bool MemTracker::check_launcher_nmt_support(const char* value) {
 100   if (strcmp(value, "=detail") == 0) {
 101     if (MemTracker::tracking_level() != NMT_detail) {
 102       return false;
 103     }
 104   } else if (strcmp(value, "=summary") == 0) {
 105     if (MemTracker::tracking_level() != NMT_summary) {
 106       return false;
 107     }
 108   } else if (strcmp(value, "=off") == 0) {
 109     if (MemTracker::tracking_level() != NMT_off) {
 110       return false;
 111     }
 112   } else {
 113     _is_nmt_env_valid = false;
 114   }
 115 
 116   return true;
 117 }
 118 
 119 bool MemTracker::verify_nmt_option() {
 120   return _is_nmt_env_valid;
 121 }
 122 
 123 void* MemTracker::malloc_base(void* memblock) {
 124   return MallocTracker::get_base(memblock);
 125 }
 126 
 127 void Tracker::record(address addr, size_t size) {
 128   if (MemTracker::tracking_level() < NMT_summary) return;
 129   switch(_type) {
 130     case uncommit:
 131       VirtualMemoryTracker::remove_uncommitted_region(addr, size);
 132       break;
 133     case release:
 134       VirtualMemoryTracker::remove_released_region(addr, size);
 135         break;
 136     default:
 137       ShouldNotReachHere();
 138   }
 139 }
 140 
 141 
 142 // Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
 143 void MemTracker::shutdown() {
 144   // We can only shutdown NMT to minimal tracking level if it is ever on.
 145   if (tracking_level() > NMT_minimal) {
 146     transition_to(NMT_minimal);
 147   }
 148 }
 149 
 150 bool MemTracker::transition_to(NMT_TrackingLevel level) {
 151   NMT_TrackingLevel current_level = tracking_level();
 152 
 153   assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
 154 
 155   if (current_level == level) {
 156     return true;
 157   } else if (current_level > level) {
 158     // Downgrade tracking level, we want to lower the tracking level first
 159     _tracking_level = level;
 160     // Make _tracking_level visible immediately.
 161     OrderAccess::fence();
 162     VirtualMemoryTracker::transition(current_level, level);
 163     MallocTracker::transition(current_level, level);
 164     ThreadStackTracker::transition(current_level, level);
 165   } else {
 166     // Upgrading tracking level is not supported and has never been supported.
 167     // Allocating and deallocating malloc tracking structures is not thread safe and
 168     // leads to inconsistencies unless a lot coarser locks are added.
 169   }
 170   return true;
 171 }
 172 
 173 
 174 static volatile bool g_final_report_did_run = false;
 175 void MemTracker::final_report(outputStream* output) {
 176   // This function is called during both error reporting and normal VM exit.
 177   // However, it should only ever run once.  E.g. if the VM crashes after
 178   // printing the final report during normal VM exit, it should not print
 179   // the final report again. In addition, it should be guarded from
 180   // recursive calls in case NMT reporting itself crashes.
 181   if (Atomic::cmpxchg(&g_final_report_did_run, false, true) == false) {
 182     NMT_TrackingLevel level = tracking_level();
 183     if (level >= NMT_summary) {
 184       report(level == NMT_summary, output);
 185     }
 186   }
 187 }
 188 
 189 void MemTracker::report(bool summary_only, outputStream* output) {
 190  assert(output != NULL, "No output stream");
 191   MemBaseline baseline;
 192   if (baseline.baseline(summary_only)) {
 193     if (summary_only) {
 194       MemSummaryReporter rpt(baseline, output);
 195       rpt.report();
 196     } else {
 197       MemDetailReporter rpt(baseline, output);
 198       rpt.report();
 199       output->print("Metaspace:");
 200       // The basic metaspace report avoids any locking and should be safe to
 201       // be called at any time.
 202       MetaspaceUtils::print_basic_report(output, K);
 203     }
 204   }
 205 }
 206 
 207 // This is a walker to gather malloc site hashtable statistics,
 208 // the result is used for tuning.
 209 class StatisticsWalker : public MallocSiteWalker {
 210  private:
 211   enum Threshold {
 212     // aggregates statistics over this threshold into one
 213     // line item.
 214     report_threshold = 20
 215   };
 216 
 217  private:
 218   // Number of allocation sites that have all memory freed
 219   int   _empty_entries;
 220   // Total number of allocation sites, include empty sites
 221   int   _total_entries;
 222   // Number of captured call stack distribution
 223   int   _stack_depth_distribution[NMT_TrackingStackDepth];
 224   // Hash distribution
 225   int   _hash_distribution[report_threshold];
 226   // Number of hash buckets that have entries over the threshold
 227   int   _bucket_over_threshold;
 228 
 229   // The hash bucket that walker is currently walking
 230   int   _current_hash_bucket;
 231   // The length of current hash bucket
 232   int   _current_bucket_length;
 233   // Number of hash buckets that are not empty
 234   int   _used_buckets;
 235   // Longest hash bucket length
 236   int   _longest_bucket_length;
 237 
 238  public:
 239   StatisticsWalker() : _empty_entries(0), _total_entries(0) {
 240     int index = 0;
 241     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 242       _stack_depth_distribution[index] = 0;
 243     }
 244     for (index = 0; index < report_threshold; index ++) {
 245       _hash_distribution[index] = 0;
 246     }
 247     _bucket_over_threshold = 0;
 248     _longest_bucket_length = 0;
 249     _current_hash_bucket = -1;
 250     _current_bucket_length = 0;
 251     _used_buckets = 0;
 252   }
 253 
 254   virtual bool do_malloc_site(const MallocSite* e) {
 255     if (e->size() == 0) _empty_entries ++;
 256     _total_entries ++;
 257 
 258     // stack depth distrubution
 259     int frames = e->call_stack()->frames();
 260     _stack_depth_distribution[frames - 1] ++;
 261 
 262     // hash distribution
 263     int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
 264     if (_current_hash_bucket == -1) {
 265       _current_hash_bucket = hash_bucket;
 266       _current_bucket_length = 1;
 267     } else if (_current_hash_bucket == hash_bucket) {
 268       _current_bucket_length ++;
 269     } else {
 270       record_bucket_length(_current_bucket_length);
 271       _current_hash_bucket = hash_bucket;
 272       _current_bucket_length = 1;
 273     }
 274     return true;
 275   }
 276 
 277   // walk completed
 278   void completed() {
 279     record_bucket_length(_current_bucket_length);
 280   }
 281 
 282   void report_statistics(outputStream* out) {
 283     int index;
 284     out->print_cr("Malloc allocation site table:");
 285     out->print_cr("\tTotal entries: %d", _total_entries);
 286     out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
 287     out->print_cr(" ");
 288     out->print_cr("Hash distribution:");
 289     if (_used_buckets < MallocSiteTable::hash_buckets()) {
 290       out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
 291     }
 292     for (index = 0; index < report_threshold; index ++) {
 293       if (_hash_distribution[index] != 0) {
 294         if (index == 0) {
 295           out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
 296         } else if (index < 9) { // single digit
 297           out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
 298         } else {
 299           out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
 300         }
 301       }
 302     }
 303     if (_bucket_over_threshold > 0) {
 304       out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
 305     }
 306     out->print_cr("most entries: %d", _longest_bucket_length);
 307     out->print_cr(" ");
 308     out->print_cr("Call stack depth distribution:");
 309     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 310       if (_stack_depth_distribution[index] > 0) {
 311         out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
 312       }
 313     }
 314   }
 315 
 316  private:
 317   void record_bucket_length(int length) {
 318     _used_buckets ++;
 319     if (length <= report_threshold) {
 320       _hash_distribution[length - 1] ++;
 321     } else {
 322       _bucket_over_threshold ++;
 323     }
 324     _longest_bucket_length = MAX2(_longest_bucket_length, length);
 325   }
 326 };
 327 
 328 
 329 void MemTracker::tuning_statistics(outputStream* out) {
 330   // NMT statistics
 331   StatisticsWalker walker;
 332   MallocSiteTable::walk_malloc_site(&walker);
 333   walker.completed();
 334 
 335   out->print_cr("Native Memory Tracking Statistics:");
 336   out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
 337   out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
 338   NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
 339   out->print_cr(" ");
 340   walker.report_statistics(out);
 341 }