1 /*
   2  * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "jvm.h"
  26 
  27 #include "runtime/mutex.hpp"
  28 #include "runtime/orderAccess.hpp"
  29 #include "runtime/os.hpp"
  30 #include "runtime/vmThread.hpp"
  31 #include "runtime/vm_operations.hpp"
  32 #include "services/memBaseline.hpp"
  33 #include "services/memReporter.hpp"
  34 #include "services/mallocTracker.inline.hpp"
  35 #include "services/memTracker.hpp"
  36 #include "utilities/defaultStream.hpp"
  37 #include "utilities/vmError.hpp"
  38 
  39 #ifdef SOLARIS
  40   volatile bool NMT_stack_walkable = false;
  41 #else
  42   volatile bool NMT_stack_walkable = true;
  43 #endif
  44 
  45 volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
  46 NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
  47 
  48 MemBaseline MemTracker::_baseline;
  49 Mutex*      MemTracker::_query_lock = NULL;
  50 bool MemTracker::_is_nmt_env_valid = true;
  51 
  52 
  53 NMT_TrackingLevel MemTracker::init_tracking_level() {
  54   NMT_TrackingLevel level = NMT_off;
  55   char buf[64];
  56   jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
  57   const char *nmt_option = ::getenv(buf);
  58   if (nmt_option != NULL) {
  59     if (strcmp(nmt_option, "summary") == 0) {
  60       level = NMT_summary;
  61     } else if (strcmp(nmt_option, "detail") == 0) {
  62       level = NMT_detail;
  63     } else if (strcmp(nmt_option, "off") != 0) {
  64       // The option value is invalid
  65       _is_nmt_env_valid = false;
  66     }
  67 
  68     // Remove the environment variable to avoid leaking to child processes
  69     os::unsetenv(buf);
  70   }
  71 
  72   // Construct NativeCallStack::EMPTY_STACK. It may get constructed twice,
  73   // but it is benign, the results are the same.
  74   ::new ((void*)&NativeCallStack::EMPTY_STACK) NativeCallStack(0, false);
  75 
  76   if (!MallocTracker::initialize(level) ||
  77       !VirtualMemoryTracker::initialize(level)) {
  78     level = NMT_off;
  79   }
  80   return level;
  81 }
  82 
  83 void MemTracker::init() {
  84   NMT_TrackingLevel level = tracking_level();
  85   if (level >= NMT_summary) {
  86     if (!VirtualMemoryTracker::late_initialize(level)) {
  87       shutdown();
  88       return;
  89     }
  90     _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
  91     // Already OOM. It is unlikely, but still have to handle it.
  92     if (_query_lock == NULL) {
  93       shutdown();
  94     }
  95   }
  96 }
  97 
  98 bool MemTracker::check_launcher_nmt_support(const char* value) {
  99   if (strcmp(value, "=detail") == 0) {
 100     if (MemTracker::tracking_level() != NMT_detail) {
 101       return false;
 102     }
 103   } else if (strcmp(value, "=summary") == 0) {
 104     if (MemTracker::tracking_level() != NMT_summary) {
 105       return false;
 106     }
 107   } else if (strcmp(value, "=off") == 0) {
 108     if (MemTracker::tracking_level() != NMT_off) {
 109       return false;
 110     }
 111   } else {
 112     _is_nmt_env_valid = false;
 113   }
 114 
 115   return true;
 116 }
 117 
 118 bool MemTracker::verify_nmt_option() {
 119   return _is_nmt_env_valid;
 120 }
 121 
 122 void* MemTracker::malloc_base(void* memblock) {
 123   return MallocTracker::get_base(memblock);
 124 }
 125 
 126 void Tracker::record(address addr, size_t size) {
 127   if (MemTracker::tracking_level() < NMT_summary) return;
 128   switch(_type) {
 129     case uncommit:
 130       VirtualMemoryTracker::remove_uncommitted_region(addr, size);
 131       break;
 132     case release:
 133       VirtualMemoryTracker::remove_released_region(addr, size);
 134         break;
 135     default:
 136       ShouldNotReachHere();
 137   }
 138 }
 139 
 140 
 141 // Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
 142 void MemTracker::shutdown() {
 143   // We can only shutdown NMT to minimal tracking level if it is ever on.
 144   if (tracking_level () > NMT_minimal) {
 145     transition_to(NMT_minimal);
 146   }
 147 }
 148 
 149 bool MemTracker::transition_to(NMT_TrackingLevel level) {
 150   NMT_TrackingLevel current_level = tracking_level();
 151 
 152   assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
 153 
 154   if (current_level == level) {
 155     return true;
 156   } else if (current_level > level) {
 157     // Downgrade tracking level, we want to lower the tracking level first
 158     _tracking_level = level;
 159     // Make _tracking_level visible immediately.
 160     OrderAccess::fence();
 161     VirtualMemoryTracker::transition(current_level, level);
 162     MallocTracker::transition(current_level, level);
 163   } else {
 164     // Upgrading tracking level is not supported and has never been supported.
 165     // Allocating and deallocating malloc tracking structures is not thread safe and
 166     // leads to inconsistencies unless a lot coarser locks are added.
 167   }
 168   return true;
 169 }
 170 
 171 void MemTracker::report(bool summary_only, outputStream* output) {
 172  assert(output != NULL, "No output stream");
 173   MemBaseline baseline;
 174   if (baseline.baseline(summary_only)) {
 175     if (summary_only) {
 176       MemSummaryReporter rpt(baseline, output);
 177       rpt.report();
 178     } else {
 179       MemDetailReporter rpt(baseline, output);
 180       rpt.report();
 181       output->print("Metaspace:");
 182       // Metadata reporting requires a safepoint, so avoid it if VM is not in good state.
 183       assert(!VMError::fatal_error_in_progress(), "Do not report metadata in error report");
 184       VM_PrintMetadata vmop(output, K,
 185           MetaspaceUtils::rf_show_loaders |
 186           MetaspaceUtils::rf_break_down_by_spacetype);
 187       VMThread::execute(&vmop);
 188     }
 189   }
 190 }
 191 
 192 // This is a walker to gather malloc site hashtable statistics,
 193 // the result is used for tuning.
 194 class StatisticsWalker : public MallocSiteWalker {
 195  private:
 196   enum Threshold {
 197     // aggregates statistics over this threshold into one
 198     // line item.
 199     report_threshold = 20
 200   };
 201 
 202  private:
 203   // Number of allocation sites that have all memory freed
 204   int   _empty_entries;
 205   // Total number of allocation sites, include empty sites
 206   int   _total_entries;
 207   // Number of captured call stack distribution
 208   int   _stack_depth_distribution[NMT_TrackingStackDepth];
 209   // Hash distribution
 210   int   _hash_distribution[report_threshold];
 211   // Number of hash buckets that have entries over the threshold
 212   int   _bucket_over_threshold;
 213 
 214   // The hash bucket that walker is currently walking
 215   int   _current_hash_bucket;
 216   // The length of current hash bucket
 217   int   _current_bucket_length;
 218   // Number of hash buckets that are not empty
 219   int   _used_buckets;
 220   // Longest hash bucket length
 221   int   _longest_bucket_length;
 222 
 223  public:
 224   StatisticsWalker() : _empty_entries(0), _total_entries(0) {
 225     int index = 0;
 226     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 227       _stack_depth_distribution[index] = 0;
 228     }
 229     for (index = 0; index < report_threshold; index ++) {
 230       _hash_distribution[index] = 0;
 231     }
 232     _bucket_over_threshold = 0;
 233     _longest_bucket_length = 0;
 234     _current_hash_bucket = -1;
 235     _current_bucket_length = 0;
 236     _used_buckets = 0;
 237   }
 238 
 239   virtual bool do_malloc_site(const MallocSite* e) {
 240     if (e->size() == 0) _empty_entries ++;
 241     _total_entries ++;
 242 
 243     // stack depth distrubution
 244     int frames = e->call_stack()->frames();
 245     _stack_depth_distribution[frames - 1] ++;
 246 
 247     // hash distribution
 248     int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
 249     if (_current_hash_bucket == -1) {
 250       _current_hash_bucket = hash_bucket;
 251       _current_bucket_length = 1;
 252     } else if (_current_hash_bucket == hash_bucket) {
 253       _current_bucket_length ++;
 254     } else {
 255       record_bucket_length(_current_bucket_length);
 256       _current_hash_bucket = hash_bucket;
 257       _current_bucket_length = 1;
 258     }
 259     return true;
 260   }
 261 
 262   // walk completed
 263   void completed() {
 264     record_bucket_length(_current_bucket_length);
 265   }
 266 
 267   void report_statistics(outputStream* out) {
 268     int index;
 269     out->print_cr("Malloc allocation site table:");
 270     out->print_cr("\tTotal entries: %d", _total_entries);
 271     out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
 272     out->print_cr(" ");
 273     out->print_cr("Hash distribution:");
 274     if (_used_buckets < MallocSiteTable::hash_buckets()) {
 275       out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
 276     }
 277     for (index = 0; index < report_threshold; index ++) {
 278       if (_hash_distribution[index] != 0) {
 279         if (index == 0) {
 280           out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
 281         } else if (index < 9) { // single digit
 282           out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
 283         } else {
 284           out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
 285         }
 286       }
 287     }
 288     if (_bucket_over_threshold > 0) {
 289       out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
 290     }
 291     out->print_cr("most entries: %d", _longest_bucket_length);
 292     out->print_cr(" ");
 293     out->print_cr("Call stack depth distribution:");
 294     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 295       if (_stack_depth_distribution[index] > 0) {
 296         out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
 297       }
 298     }
 299   }
 300 
 301  private:
 302   void record_bucket_length(int length) {
 303     _used_buckets ++;
 304     if (length <= report_threshold) {
 305       _hash_distribution[length - 1] ++;
 306     } else {
 307       _bucket_over_threshold ++;
 308     }
 309     _longest_bucket_length = MAX2(_longest_bucket_length, length);
 310   }
 311 };
 312 
 313 
 314 void MemTracker::record_thread_stack(void* addr, size_t size) {
 315   if (tracking_level() < NMT_summary) return;
 316   if (addr != NULL) {
 317     // uses thread stack malloc slot for book keeping number of threads
 318     MallocMemorySummary::record_malloc(0, mtThreadStack);
 319     // NMT assumes thread stacks to be page aligned; but that does not have to be the case.
 320     // So, for the purpose of keeping track of thread stacks, just use the inner page aligned
 321     // part of the thread stack.
 322     const os::range_t inner = os::inner_page_aligned_boundaries((address)addr, size);
 323     record_virtual_memory_reserve(inner.addr, inner.size, CALLER_PC, mtThreadStack);
 324   }
 325 }
 326 
 327 void MemTracker::release_thread_stack(void* addr, size_t size) {
 328   if (tracking_level() < NMT_summary) return;
 329   if (addr != NULL) {
 330     // uses thread stack malloc slot for book keeping number of threads
 331     MallocMemorySummary::record_free(0, mtThreadStack);
 332     ThreadCritical tc;
 333     if (tracking_level() < NMT_summary) return;
 334     // NMT assumes thread stacks to be page aligned; but that does not have to be the case.
 335     // So, for the purpose of keeping track of thread stacks, just use the inner page aligned
 336     // part of the thread stack.
 337     const os::range_t inner = os::inner_page_aligned_boundaries((address)addr, size);
 338     VirtualMemoryTracker::remove_released_region(inner.addr, inner.size);
 339   }
 340 }
 341 
 342 void MemTracker::tuning_statistics(outputStream* out) {
 343   // NMT statistics
 344   StatisticsWalker walker;
 345   MallocSiteTable::walk_malloc_site(&walker);
 346   walker.completed();
 347 
 348   out->print_cr("Native Memory Tracking Statistics:");
 349   out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
 350   out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
 351   NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
 352   out->print_cr(" ");
 353   walker.report_statistics(out);
 354 }