1 /*
   2  * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP
  26 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP
  27 
  28 #include "services/nmtCommon.hpp"
  29 #include "utilities/nativeCallStack.hpp"
  30 
  31 
  32 #if !INCLUDE_NMT
  33 
  34 #define CURRENT_PC   NativeCallStack::empty_stack()
  35 #define CALLER_PC    NativeCallStack::empty_stack()
  36 
  37 class Tracker : public StackObj {
  38  public:
  39   Tracker() { }
  40   void record(address addr, size_t size) { }
  41 };
  42 
  43 class MemTracker : AllStatic {
  44  public:
  45   static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
  46   static inline void shutdown() { }
  47   static inline void init() { }
  48   static bool check_launcher_nmt_support(const char* value) { return true; }
  49   static bool verify_nmt_option() { return true; }
  50 
  51   static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
  52     const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; }
  53   static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; }
  54   static inline size_t malloc_header_size(void* memblock) { return 0; }
  55   static inline void* malloc_base(void* memblock) { return memblock; }
  56   static inline void* record_free(void* memblock) { return memblock; }
  57 
  58   static inline void record_new_arena(MEMFLAGS flag) { }
  59   static inline void record_arena_free(MEMFLAGS flag) { }
  60   static inline void record_arena_size_change(int diff, MEMFLAGS flag) { }
  61   static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
  62                        MEMFLAGS flag = mtNone) { }
  63   static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
  64     const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
  65   static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
  66   static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
  67   static inline Tracker get_virtual_memory_release_tracker() { }
  68   static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
  69   static inline void record_thread_stack(void* addr, size_t size) { }
  70   static inline void release_thread_stack(void* addr, size_t size) { }
  71 
  72   static void final_report(outputStream*) { }
  73   static void error_report(outputStream*) { }
  74 };
  75 
  76 #else
  77 
  78 #include "runtime/atomic.inline.hpp"
  79 #include "runtime/threadCritical.hpp"
  80 #include "services/mallocTracker.hpp"
  81 #include "services/virtualMemoryTracker.hpp"
  82 
  83 extern volatile bool NMT_stack_walkable;
  84 
  85 #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \
  86                     NativeCallStack(0, true) : NativeCallStack::empty_stack())
  87 #define CALLER_PC  ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ?  \
  88                     NativeCallStack(1, true) : NativeCallStack::empty_stack())
  89 
  90 class MemBaseline;
  91 class Mutex;
  92 
  93 // Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid
  94 // the other thread obtains and records the same region that is just 'released' by current
  95 // thread but before it can record the operation.
  96 class Tracker : public StackObj {
  97  public:
  98   enum TrackerType {
  99      uncommit,
 100      release
 101   };
 102 
 103  public:
 104   Tracker(enum TrackerType type) : _type(type) { }
 105   void record(address addr, size_t size);
 106  private:
 107   enum TrackerType  _type;
 108   // Virtual memory tracking data structures are protected by ThreadCritical lock.
 109   ThreadCritical    _tc;
 110 };
 111 
 112 class MemTracker : AllStatic {
 113  public:
 114   static inline NMT_TrackingLevel tracking_level() {
 115     if (_tracking_level == NMT_unknown) {
 116       // No fencing is needed here, since JVM is in single-threaded
 117       // mode.
 118       _tracking_level = init_tracking_level();
 119       _cmdline_tracking_level = _tracking_level;
 120     }
 121     return _tracking_level;
 122   }
 123 
 124   // A late initialization, for the stuff(s) can not be
 125   // done in init_tracking_level(), which can NOT malloc
 126   // any memory.
 127   static void init();
 128 
 129   // Shutdown native memory tracking
 130   static void shutdown();
 131 
 132   // Verify native memory tracking command line option.
 133   // This check allows JVM to detect if compatible launcher
 134   // is used.
 135   // If an incompatible launcher is used, NMT may not be
 136   // able to start, even it is enabled by command line option.
 137   // A warning message should be given if it is encountered.
 138   static bool check_launcher_nmt_support(const char* value);
 139 
 140   // This method checks native memory tracking environment
 141   // variable value passed by launcher.
 142   // Launcher only obligates to pass native memory tracking
 143   // option value, but not obligates to validate the value,
 144   // and launcher has option to discard native memory tracking
 145   // option from the command line once it sets up the environment
 146   // variable, so NMT has to catch the bad value here.
 147   static bool verify_nmt_option();
 148 
 149   // Transition the tracking level to specified level
 150   static bool transition_to(NMT_TrackingLevel level);
 151 
 152   static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
 153     const NativeCallStack& stack, NMT_TrackingLevel level) {
 154     return MallocTracker::record_malloc(mem_base, size, flag, stack, level);
 155   }
 156 
 157   static inline size_t malloc_header_size(NMT_TrackingLevel level) {
 158     return MallocTracker::malloc_header_size(level);
 159   }
 160 
 161   static size_t malloc_header_size(void* memblock) {
 162     if (tracking_level() != NMT_off) {
 163       return MallocTracker::get_header_size(memblock);
 164     }
 165     return 0;
 166   }
 167 
 168   // To malloc base address, which is the starting address
 169   // of malloc tracking header if tracking is enabled.
 170   // Otherwise, it returns the same address.
 171   static void* malloc_base(void* memblock);
 172 
 173   // Record malloc free and return malloc base address
 174   static inline void* record_free(void* memblock) {
 175     return MallocTracker::record_free(memblock);
 176   }
 177 
 178 
 179   // Record creation of an arena
 180   static inline void record_new_arena(MEMFLAGS flag) {
 181     if (tracking_level() < NMT_summary) return;
 182     MallocTracker::record_new_arena(flag);
 183   }
 184 
 185   // Record destruction of an arena
 186   static inline void record_arena_free(MEMFLAGS flag) {
 187     if (tracking_level() < NMT_summary) return;
 188     MallocTracker::record_arena_free(flag);
 189   }
 190 
 191   // Record arena size change. Arena size is the size of all arena
 192   // chuncks that backing up the arena.
 193   static inline void record_arena_size_change(int diff, MEMFLAGS flag) {
 194     if (tracking_level() < NMT_summary) return;
 195     MallocTracker::record_arena_size_change(diff, flag);
 196   }
 197 
 198   static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
 199     MEMFLAGS flag = mtNone) {
 200     if (tracking_level() < NMT_summary) return;
 201     if (addr != NULL) {
 202       ThreadCritical tc;
 203       // Recheck to avoid potential racing during NMT shutdown
 204       if (tracking_level() < NMT_summary) return;
 205       VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
 206     }
 207   }
 208 
 209   static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
 210     const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
 211     if (tracking_level() < NMT_summary) return;
 212     if (addr != NULL) {
 213       ThreadCritical tc;
 214       if (tracking_level() < NMT_summary) return;
 215       VirtualMemoryTracker::add_reserved_region((address)addr, size,
 216         stack, flag, true);
 217     }
 218   }
 219 
 220   static inline void record_virtual_memory_commit(void* addr, size_t size,
 221     const NativeCallStack& stack) {
 222     if (tracking_level() < NMT_summary) return;
 223     if (addr != NULL) {
 224       ThreadCritical tc;
 225       if (tracking_level() < NMT_summary) return;
 226       VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
 227     }
 228   }
 229 
 230   static inline Tracker get_virtual_memory_uncommit_tracker() {
 231     assert(tracking_level() >= NMT_summary, "Check by caller");
 232     return Tracker(Tracker::uncommit);
 233   }
 234 
 235   static inline Tracker get_virtual_memory_release_tracker() {
 236     assert(tracking_level() >= NMT_summary, "Check by caller");
 237     return Tracker(Tracker::release);
 238   }
 239 
 240   static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
 241     if (tracking_level() < NMT_summary) return;
 242     if (addr != NULL) {
 243       ThreadCritical tc;
 244       if (tracking_level() < NMT_summary) return;
 245       VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
 246     }
 247   }
 248 
 249   static inline void record_thread_stack(void* addr, size_t size) {
 250     if (tracking_level() < NMT_summary) return;
 251     if (addr != NULL) {
 252       // uses thread stack malloc slot for book keeping number of threads
 253       MallocMemorySummary::record_malloc(0, mtThreadStack);
 254       record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack);
 255     }
 256   }
 257 
 258   static inline void release_thread_stack(void* addr, size_t size) {
 259     if (tracking_level() < NMT_summary) return;
 260     if (addr != NULL) {
 261       // uses thread stack malloc slot for book keeping number of threads
 262       MallocMemorySummary::record_free(0, mtThreadStack);
 263       ThreadCritical tc;
 264       if (tracking_level() < NMT_summary) return;
 265       VirtualMemoryTracker::remove_released_region((address)addr, size);
 266     }
 267   }
 268 
 269   // Query lock is used to synchronize the access to tracking data.
 270   // So far, it is only used by JCmd query, but it may be used by
 271   // other tools.
 272   static inline Mutex* query_lock() { return _query_lock; }
 273 
 274   // Make a final report or report for hs_err file.
 275   static void error_report(outputStream* output) {
 276     if (tracking_level() >= NMT_summary) {
 277       report(true, output);  // just print summary for error case.
 278     }
 279    }
 280 
 281   static void final_report(outputStream* output) {
 282     NMT_TrackingLevel level = tracking_level();
 283     if (level >= NMT_summary) {
 284       report(level == NMT_summary, output);
 285     }
 286   }
 287 
 288 
 289   // Stored baseline
 290   static inline MemBaseline& get_baseline() {
 291     return _baseline;
 292   }
 293 
 294   static NMT_TrackingLevel cmdline_tracking_level() {
 295     return _cmdline_tracking_level;
 296   }
 297 
 298   static void tuning_statistics(outputStream* out);
 299 
 300  private:
 301   static NMT_TrackingLevel init_tracking_level();
 302   static void report(bool summary_only, outputStream* output);
 303 
 304  private:
 305   // Tracking level
 306   static volatile NMT_TrackingLevel   _tracking_level;
 307   // If NMT option value passed by launcher through environment
 308   // variable is valid
 309   static bool                         _is_nmt_env_valid;
 310   // command line tracking level
 311   static NMT_TrackingLevel            _cmdline_tracking_level;
 312   // Stored baseline
 313   static MemBaseline      _baseline;
 314   // Query lock
 315   static Mutex*           _query_lock;
 316 };
 317 
 318 #endif // INCLUDE_NMT
 319 
 320 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP
 321