1 /* 2 * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP 26 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP 27 28 #include "services/nmtCommon.hpp" 29 #include "utilities/nativeCallStack.hpp" 30 31 32 #if !INCLUDE_NMT 33 34 #define CURRENT_PC NativeCallStack::EMPTY_STACK 35 #define CALLER_PC NativeCallStack::EMPTY_STACK 36 37 class Tracker : public StackObj { 38 public: 39 enum TrackerType { 40 uncommit, 41 release 42 }; 43 Tracker(enum TrackerType type) : _type(type) { } 44 void record(address addr, size_t size); 45 private: 46 enum TrackerType _type; 47 }; 48 49 class MemTracker : AllStatic { 50 public: 51 static inline NMT_TrackingLevel tracking_level() { return NMT_off; } 52 static inline void shutdown() { } 53 static inline void init() { } 54 static bool check_launcher_nmt_support(const char* value) { return true; } 55 static bool verify_nmt_option() { return true; } 56 57 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, 58 const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; } 59 static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; } 60 static inline size_t malloc_header_size(void* memblock) { return 0; } 61 static inline void* malloc_base(void* memblock) { return memblock; } 62 static inline void* record_free(void* memblock) { return memblock; } 63 64 static inline void record_new_arena(MEMFLAGS flag) { } 65 static inline void record_arena_free(MEMFLAGS flag) { } 66 static inline void record_arena_size_change(int diff, MEMFLAGS flag) { } 67 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, 68 MEMFLAGS flag = mtNone) { } 69 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, 70 const NativeCallStack& stack, MEMFLAGS flag = mtNone) { } 71 static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { } 72 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { } 73 static inline void record_thread_stack(void* addr, size_t size) { } 74 static inline void release_thread_stack(void* addr, size_t size) { } 75 76 static void final_report(outputStream*) { } 77 static void error_report(outputStream*) { } 78 }; 79 80 #else 81 82 #include "runtime/threadCritical.hpp" 83 #include "services/mallocTracker.hpp" 84 #include "services/virtualMemoryTracker.hpp" 85 86 extern volatile bool NMT_stack_walkable; 87 88 #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 89 NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK) 90 #define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 91 NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK) 92 93 class MemBaseline; 94 class Mutex; 95 96 // Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid 97 // the other thread obtains and records the same region that is just 'released' by current 98 // thread but before it can record the operation. 99 class Tracker : public StackObj { 100 public: 101 enum TrackerType { 102 uncommit, 103 release 104 }; 105 106 public: 107 Tracker(enum TrackerType type) : _type(type) { } 108 void record(address addr, size_t size); 109 private: 110 enum TrackerType _type; 111 // Virtual memory tracking data structures are protected by ThreadCritical lock. 112 ThreadCritical _tc; 113 }; 114 115 class MemTracker : AllStatic { 116 public: 117 static inline NMT_TrackingLevel tracking_level() { 118 if (_tracking_level == NMT_unknown) { 119 // No fencing is needed here, since JVM is in single-threaded 120 // mode. 121 _tracking_level = init_tracking_level(); 122 _cmdline_tracking_level = _tracking_level; 123 } 124 return _tracking_level; 125 } 126 127 // A late initialization, for the stuff(s) can not be 128 // done in init_tracking_level(), which can NOT malloc 129 // any memory. 130 static void init(); 131 132 // Shutdown native memory tracking 133 static void shutdown(); 134 135 // Verify native memory tracking command line option. 136 // This check allows JVM to detect if compatible launcher 137 // is used. 138 // If an incompatible launcher is used, NMT may not be 139 // able to start, even it is enabled by command line option. 140 // A warning message should be given if it is encountered. 141 static bool check_launcher_nmt_support(const char* value); 142 143 // This method checks native memory tracking environment 144 // variable value passed by launcher. 145 // Launcher only obligated to pass native memory tracking 146 // option value, but not obligated to validate the value, 147 // and launcher has option to discard native memory tracking 148 // option from the command line once it sets up the environment 149 // variable, so NMT has to catch the bad value here. 150 static bool verify_nmt_option(); 151 152 // Transition the tracking level to specified level 153 static bool transition_to(NMT_TrackingLevel level); 154 155 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, 156 const NativeCallStack& stack, NMT_TrackingLevel level) { 157 return MallocTracker::record_malloc(mem_base, size, flag, stack, level); 158 } 159 160 static inline size_t malloc_header_size(NMT_TrackingLevel level) { 161 return MallocTracker::malloc_header_size(level); 162 } 163 164 static size_t malloc_header_size(void* memblock) { 165 if (tracking_level() != NMT_off) { 166 return MallocTracker::get_header_size(memblock); 167 } 168 return 0; 169 } 170 171 // To malloc base address, which is the starting address 172 // of malloc tracking header if tracking is enabled. 173 // Otherwise, it returns the same address. 174 static void* malloc_base(void* memblock); 175 176 // Record malloc free and return malloc base address 177 static inline void* record_free(void* memblock) { 178 return MallocTracker::record_free(memblock); 179 } 180 181 182 // Record creation of an arena 183 static inline void record_new_arena(MEMFLAGS flag) { 184 if (tracking_level() < NMT_summary) return; 185 MallocTracker::record_new_arena(flag); 186 } 187 188 // Record destruction of an arena 189 static inline void record_arena_free(MEMFLAGS flag) { 190 if (tracking_level() < NMT_summary) return; 191 MallocTracker::record_arena_free(flag); 192 } 193 194 // Record arena size change. Arena size is the size of all arena 195 // chuncks that backing up the arena. 196 static inline void record_arena_size_change(int diff, MEMFLAGS flag) { 197 if (tracking_level() < NMT_summary) return; 198 MallocTracker::record_arena_size_change(diff, flag); 199 } 200 201 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, 202 MEMFLAGS flag = mtNone) { 203 if (tracking_level() < NMT_summary) return; 204 if (addr != NULL) { 205 ThreadCritical tc; 206 // Recheck to avoid potential racing during NMT shutdown 207 if (tracking_level() < NMT_summary) return; 208 VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag); 209 } 210 } 211 212 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, 213 const NativeCallStack& stack, MEMFLAGS flag = mtNone) { 214 if (tracking_level() < NMT_summary) return; 215 if (addr != NULL) { 216 ThreadCritical tc; 217 if (tracking_level() < NMT_summary) return; 218 VirtualMemoryTracker::add_reserved_region((address)addr, size, 219 stack, flag, true); 220 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 221 } 222 } 223 224 static inline void record_virtual_memory_commit(void* addr, size_t size, 225 const NativeCallStack& stack) { 226 if (tracking_level() < NMT_summary) return; 227 if (addr != NULL) { 228 ThreadCritical tc; 229 if (tracking_level() < NMT_summary) return; 230 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 231 } 232 } 233 234 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { 235 if (tracking_level() < NMT_summary) return; 236 if (addr != NULL) { 237 ThreadCritical tc; 238 if (tracking_level() < NMT_summary) return; 239 VirtualMemoryTracker::set_reserved_region_type((address)addr, flag); 240 } 241 } 242 243 static inline void record_thread_stack(void* addr, size_t size) { 244 if (tracking_level() < NMT_summary) return; 245 if (addr != NULL) { 246 // uses thread stack malloc slot for book keeping number of threads 247 MallocMemorySummary::record_malloc(0, mtThreadStack); 248 record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack); 249 } 250 } 251 252 static inline void release_thread_stack(void* addr, size_t size) { 253 if (tracking_level() < NMT_summary) return; 254 if (addr != NULL) { 255 // uses thread stack malloc slot for book keeping number of threads 256 MallocMemorySummary::record_free(0, mtThreadStack); 257 ThreadCritical tc; 258 if (tracking_level() < NMT_summary) return; 259 VirtualMemoryTracker::remove_released_region((address)addr, size); 260 } 261 } 262 263 // Query lock is used to synchronize the access to tracking data. 264 // So far, it is only used by JCmd query, but it may be used by 265 // other tools. 266 static inline Mutex* query_lock() { return _query_lock; } 267 268 // Make a final report or report for hs_err file. 269 static void error_report(outputStream* output) { 270 if (tracking_level() >= NMT_summary) { 271 report(true, output); // just print summary for error case. 272 } 273 } 274 275 static void final_report(outputStream* output) { 276 NMT_TrackingLevel level = tracking_level(); 277 if (level >= NMT_summary) { 278 report(level == NMT_summary, output); 279 } 280 } 281 282 283 // Stored baseline 284 static inline MemBaseline& get_baseline() { 285 return _baseline; 286 } 287 288 static NMT_TrackingLevel cmdline_tracking_level() { 289 return _cmdline_tracking_level; 290 } 291 292 static void tuning_statistics(outputStream* out); 293 294 private: 295 static NMT_TrackingLevel init_tracking_level(); 296 static void report(bool summary_only, outputStream* output); 297 298 private: 299 // Tracking level 300 static volatile NMT_TrackingLevel _tracking_level; 301 // If NMT option value passed by launcher through environment 302 // variable is valid 303 static bool _is_nmt_env_valid; 304 // command line tracking level 305 static NMT_TrackingLevel _cmdline_tracking_level; 306 // Stored baseline 307 static MemBaseline _baseline; 308 // Query lock 309 static Mutex* _query_lock; 310 }; 311 312 #endif // INCLUDE_NMT 313 314 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP 315