1 /*
   2  * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "runtime/mutex.hpp"
  27 #include "services/memBaseline.hpp"
  28 #include "services/memReporter.hpp"
  29 #include "services/mallocTracker.inline.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "utilities/defaultStream.hpp"
  32 
  33 #ifdef SOLARIS
  34   volatile bool NMT_stack_walkable = false;
  35 #else
  36   volatile bool NMT_stack_walkable = true;
  37 #endif
  38 
  39 volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
  40 NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
  41 
  42 NativeCallStack emptyStack(0, false);
  43 
  44 MemBaseline MemTracker::_baseline;
  45 Mutex*      MemTracker::_query_lock = NULL;
  46 bool MemTracker::_is_nmt_env_valid = true;
  47 
  48 
  49 NMT_TrackingLevel MemTracker::init_tracking_level() {
  50   NMT_TrackingLevel level = NMT_off;
  51   char buf[64];
  52   char nmt_option[64];
  53   jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
  54   if (os::getenv(buf, nmt_option, sizeof(nmt_option))) {
  55     if (strcmp(nmt_option, "summary") == 0) {
  56       level = NMT_summary;
  57     } else if (strcmp(nmt_option, "detail") == 0) {
  58 #if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
  59       level = NMT_detail;
  60 #else
  61       level = NMT_summary;
  62 #endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
  63     } else if (strcmp(nmt_option, "off") != 0) {
  64       // The option value is invalid
  65       _is_nmt_env_valid = false;
  66     }
  67 
  68     // Remove the environment variable to avoid leaking to child processes
  69     os::unsetenv(buf);
  70   }
  71 
  72   if (!MallocTracker::initialize(level) ||
  73       !VirtualMemoryTracker::initialize(level)) {
  74     level = NMT_off;
  75   }
  76   return level;
  77 }
  78 
  79 void MemTracker::init() {
  80   if (tracking_level() >= NMT_summary) {
  81     _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
  82     // Already OOM. It is unlikely, but still have to handle it.
  83     if (_query_lock == NULL) {
  84       shutdown();
  85     }
  86   }
  87 }
  88 
  89 bool MemTracker::check_launcher_nmt_support(const char* value) {
  90   if (strcmp(value, "=detail") == 0) {
  91 #if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
  92       jio_fprintf(defaultStream::error_stream(),
  93         "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
  94     if (MemTracker::tracking_level() != NMT_summary) {
  95     return false;
  96   }
  97 #else
  98     if (MemTracker::tracking_level() != NMT_detail) {
  99       return false;
 100     }
 101 #endif
 102   } else if (strcmp(value, "=summary") == 0) {
 103     if (MemTracker::tracking_level() != NMT_summary) {
 104       return false;
 105     }
 106   } else if (strcmp(value, "=off") == 0) {
 107     if (MemTracker::tracking_level() != NMT_off) {
 108       return false;
 109     }
 110   } else {
 111     _is_nmt_env_valid = false;
 112   }
 113 
 114   return true;
 115 }
 116 
 117 bool MemTracker::verify_nmt_option() {
 118   return _is_nmt_env_valid;
 119 }
 120 
 121 void* MemTracker::malloc_base(void* memblock) {
 122   return MallocTracker::get_base(memblock);
 123 }
 124 
 125 void Tracker::record(address addr, size_t size) {
 126   if (MemTracker::tracking_level() < NMT_summary) return;
 127   switch(_type) {
 128     case uncommit:
 129       VirtualMemoryTracker::remove_uncommitted_region(addr, size);
 130       break;
 131     case release:
 132       VirtualMemoryTracker::remove_released_region(addr, size);
 133         break;
 134     default:
 135       ShouldNotReachHere();
 136   }
 137 }
 138 
 139 
 140 // Shutdown can only be issued via JCmd, and NMT JCmd is serialized
 141 // by lock
 142 void MemTracker::shutdown() {
 143   // We can only shutdown NMT to minimal tracking level if it is
 144   // ever on.
 145   if (tracking_level () > NMT_minimal) {
 146     transition_to(NMT_minimal);
 147   }
 148 }
 149 
 150 bool MemTracker::transition_to(NMT_TrackingLevel level) {
 151   NMT_TrackingLevel current_level = tracking_level();
 152 
 153   if (current_level == level) {
 154     return true;
 155   } else if (current_level > level) {
 156     // Downgrade tracking level, we want to lower the tracking
 157     // level first
 158     _tracking_level = level;
 159     // Make _tracking_level visible immediately.
 160     OrderAccess::fence();
 161     VirtualMemoryTracker::transition(current_level, level);
 162     MallocTracker::transition(current_level, level);
 163 
 164     if (level == NMT_minimal) _baseline.reset();
 165   } else {
 166     VirtualMemoryTracker::transition(current_level, level);
 167     MallocTracker::transition(current_level, level);
 168 
 169     _tracking_level = level;
 170     // Make _tracking_level visible immediately.
 171     OrderAccess::fence();
 172   }
 173 
 174   return true;
 175 }
 176 
 177 void MemTracker::final_report(outputStream* output) {
 178   assert(output != NULL, "No output stream");
 179   if (tracking_level() >= NMT_summary) {
 180     MallocMemorySnapshot* malloc_memory_snapshot =
 181       MallocMemorySummary::as_snapshot();
 182     malloc_memory_snapshot->make_adjustment();
 183 
 184     VirtualMemorySnapshot* virtual_memory_snapshot =
 185       VirtualMemorySummary::as_snapshot();
 186 
 187     MemSummaryReporter rptr(malloc_memory_snapshot,
 188       virtual_memory_snapshot, output);
 189     rptr.report();
 190     // shutdown NMT, the data no longer accurate
 191     shutdown();
 192   }
 193 }
 194 
 195 // This is a walker to gather malloc site hashtable statistics,
 196 // the result is used for tuning.
 197 class StatisticsWalker : public MallocSiteWalker {
 198  private:
 199   enum Threshold {
 200     // aggregates statistics over this threshold into one
 201     // line item.
 202     report_threshold = 20
 203   };
 204 
 205  private:
 206   // Number of allocation sites that have all memory freed
 207   int   _empty_entries;
 208   // Total number of allocation sites, include empty sites
 209   int   _total_entries;
 210   // Number of captured call stack distribution
 211   int   _stack_depth_distribution[NMT_TrackingStackDepth];
 212   // Hash distribution
 213   int   _hash_distribution[report_threshold];
 214   // Number of hash buckets that have entries over the threshold
 215   int   _bucket_over_threshold;
 216 
 217   // The hash bucket that walker is currently walking
 218   int   _current_hash_bucket;
 219   // The length of current hash bucket
 220   int   _current_bucket_length;
 221   // Number of hash buckets that are not empty
 222   int   _used_buckets;
 223   // Longest hash bucket length
 224   int   _longest_bucket_length;
 225 
 226  public:
 227   StatisticsWalker() : _empty_entries(0), _total_entries(0) {
 228     int index = 0;
 229     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 230       _stack_depth_distribution[index] = 0;
 231     }
 232     for (index = 0; index < report_threshold; index ++) {
 233       _hash_distribution[index] = 0;
 234     }
 235     _bucket_over_threshold = 0;
 236     _longest_bucket_length = 0;
 237     _current_hash_bucket = -1;
 238     _current_bucket_length = 0;
 239     _used_buckets = 0;
 240   }
 241 
 242   virtual bool do_malloc_site(const MallocSite* e) {
 243     if (e->size() == 0) _empty_entries ++;
 244     _total_entries ++;
 245 
 246     // stack depth distrubution
 247     int frames = e->call_stack()->frames();
 248     _stack_depth_distribution[frames - 1] ++;
 249 
 250     // hash distribution
 251     int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
 252     if (_current_hash_bucket == -1) {
 253       _current_hash_bucket = hash_bucket;
 254       _current_bucket_length = 1;
 255     } else if (_current_hash_bucket == hash_bucket) {
 256       _current_bucket_length ++;
 257     } else {
 258       record_bucket_length(_current_bucket_length);
 259       _current_hash_bucket = hash_bucket;
 260       _current_bucket_length = 1;
 261     }
 262     return true;
 263   }
 264 
 265   // walk completed
 266   void completed() {
 267     record_bucket_length(_current_bucket_length);
 268   }
 269 
 270   void report_statistics(outputStream* out) {
 271     int index;
 272     out->print_cr("Malloc allocation site table:");
 273     out->print_cr("\tTotal entries: %d", _total_entries);
 274     out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
 275     out->print_cr(" ");
 276     out->print_cr("Hash distribution:");
 277     if (_used_buckets < MallocSiteTable::hash_buckets()) {
 278       out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
 279     }
 280     for (index = 0; index < report_threshold; index ++) {
 281       if (_hash_distribution[index] != 0) {
 282         if (index == 0) {
 283           out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
 284         } else if (index < 9) { // single digit
 285           out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
 286         } else {
 287           out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
 288         }
 289       }
 290     }
 291     if (_bucket_over_threshold > 0) {
 292       out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
 293     }
 294     out->print_cr("most entries: %d", _longest_bucket_length);
 295     out->print_cr(" ");
 296     out->print_cr("Call stack depth distribution:");
 297     for (index = 0; index < NMT_TrackingStackDepth; index ++) {
 298       if (_stack_depth_distribution[index] > 0) {
 299         out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
 300       }
 301     }
 302   }
 303 
 304  private:
 305   void record_bucket_length(int length) {
 306     _used_buckets ++;
 307     if (length <= report_threshold) {
 308       _hash_distribution[length - 1] ++;
 309     } else {
 310       _bucket_over_threshold ++;
 311     }
 312     _longest_bucket_length = MAX2(_longest_bucket_length, length);
 313   }
 314 };
 315 
 316 
 317 void MemTracker::tuning_statistics(outputStream* out) {
 318   // NMT statistics
 319   StatisticsWalker walker;
 320   MallocSiteTable::walk_malloc_site(&walker);
 321   walker.completed();
 322 
 323   out->print_cr("Native Memory Tracking Statistics:");
 324   out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
 325   out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
 326   NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
 327   out->print_cr(" ");
 328   walker.report_statistics(out);
 329 }
 330