1 /* 2 * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_SERVICES_MEM_TRACKER_HPP 26 #define SHARE_VM_SERVICES_MEM_TRACKER_HPP 27 28 #include "services/nmtCommon.hpp" 29 #include "utilities/nativeCallStack.hpp" 30 31 32 #if !INCLUDE_NMT 33 34 #define CURRENT_PC NativeCallStack::EMPTY_STACK 35 #define CALLER_PC NativeCallStack::EMPTY_STACK 36 37 class Tracker : public StackObj { 38 public: 39 Tracker() { } 40 void record(address addr, size_t size) { } 41 }; 42 43 class MemTracker : AllStatic { 44 public: 45 static inline NMT_TrackingLevel tracking_level() { return NMT_off; } 46 static inline void shutdown() { } 47 static inline void init() { } 48 static bool check_launcher_nmt_support(const char* value) { return true; } 49 static bool verify_nmt_option() { return true; } 50 51 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, 52 const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; } 53 static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; } 54 static inline size_t malloc_header_size(void* memblock) { return 0; } 55 static inline void* malloc_base(void* memblock) { return memblock; } 56 static inline void* record_free(void* memblock) { return memblock; } 57 58 static inline void record_new_arena(MEMFLAGS flag) { } 59 static inline void record_arena_free(MEMFLAGS flag) { } 60 static inline void record_arena_size_change(int diff, MEMFLAGS flag) { } 61 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, 62 MEMFLAGS flag = mtNone) { } 63 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, 64 const NativeCallStack& stack, MEMFLAGS flag = mtNone) { } 65 static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { } 66 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { } 67 static inline void record_thread_stack(void* addr, size_t size) { } 68 static inline void release_thread_stack(void* addr, size_t size) { } 69 70 static void final_report(outputStream*) { } 71 static void error_report(outputStream*) { } 72 }; 73 74 #else 75 76 #include "runtime/threadCritical.hpp" 77 #include "services/mallocTracker.hpp" 78 #include "services/virtualMemoryTracker.hpp" 79 80 extern volatile bool NMT_stack_walkable; 81 82 #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 83 NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK) 84 #define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 85 NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK) 86 87 class MemBaseline; 88 class Mutex; 89 90 // Tracker is used for guarding 'release' semantics of virtual memory operation, to avoid 91 // the other thread obtains and records the same region that is just 'released' by current 92 // thread but before it can record the operation. 93 class Tracker : public StackObj { 94 public: 95 enum TrackerType { 96 uncommit, 97 release 98 }; 99 100 public: 101 Tracker(enum TrackerType type) : _type(type) { } 102 void record(address addr, size_t size); 103 private: 104 enum TrackerType _type; 105 // Virtual memory tracking data structures are protected by ThreadCritical lock. 106 ThreadCritical _tc; 107 }; 108 109 class MemTracker : AllStatic { 110 public: 111 static inline NMT_TrackingLevel tracking_level() { 112 if (_tracking_level == NMT_unknown) { 113 // No fencing is needed here, since JVM is in single-threaded 114 // mode. 115 _tracking_level = init_tracking_level(); 116 _cmdline_tracking_level = _tracking_level; 117 } 118 return _tracking_level; 119 } 120 121 // A late initialization, for the stuff(s) can not be 122 // done in init_tracking_level(), which can NOT malloc 123 // any memory. 124 static void init(); 125 126 // Shutdown native memory tracking 127 static void shutdown(); 128 129 // Verify native memory tracking command line option. 130 // This check allows JVM to detect if compatible launcher 131 // is used. 132 // If an incompatible launcher is used, NMT may not be 133 // able to start, even it is enabled by command line option. 134 // A warning message should be given if it is encountered. 135 static bool check_launcher_nmt_support(const char* value); 136 137 // This method checks native memory tracking environment 138 // variable value passed by launcher. 139 // Launcher only obligated to pass native memory tracking 140 // option value, but not obligated to validate the value, 141 // and launcher has option to discard native memory tracking 142 // option from the command line once it sets up the environment 143 // variable, so NMT has to catch the bad value here. 144 static bool verify_nmt_option(); 145 146 // Transition the tracking level to specified level 147 static bool transition_to(NMT_TrackingLevel level); 148 149 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, 150 const NativeCallStack& stack, NMT_TrackingLevel level) { 151 return MallocTracker::record_malloc(mem_base, size, flag, stack, level); 152 } 153 154 static inline size_t malloc_header_size(NMT_TrackingLevel level) { 155 return MallocTracker::malloc_header_size(level); 156 } 157 158 static size_t malloc_header_size(void* memblock) { 159 if (tracking_level() != NMT_off) { 160 return MallocTracker::get_header_size(memblock); 161 } 162 return 0; 163 } 164 165 // To malloc base address, which is the starting address 166 // of malloc tracking header if tracking is enabled. 167 // Otherwise, it returns the same address. 168 static void* malloc_base(void* memblock); 169 170 // Record malloc free and return malloc base address 171 static inline void* record_free(void* memblock) { 172 return MallocTracker::record_free(memblock); 173 } 174 175 176 // Record creation of an arena 177 static inline void record_new_arena(MEMFLAGS flag) { 178 if (tracking_level() < NMT_summary) return; 179 MallocTracker::record_new_arena(flag); 180 } 181 182 // Record destruction of an arena 183 static inline void record_arena_free(MEMFLAGS flag) { 184 if (tracking_level() < NMT_summary) return; 185 MallocTracker::record_arena_free(flag); 186 } 187 188 // Record arena size change. Arena size is the size of all arena 189 // chuncks that backing up the arena. 190 static inline void record_arena_size_change(int diff, MEMFLAGS flag) { 191 if (tracking_level() < NMT_summary) return; 192 MallocTracker::record_arena_size_change(diff, flag); 193 } 194 195 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, 196 MEMFLAGS flag = mtNone) { 197 if (tracking_level() < NMT_summary) return; 198 if (addr != NULL) { 199 ThreadCritical tc; 200 // Recheck to avoid potential racing during NMT shutdown 201 if (tracking_level() < NMT_summary) return; 202 VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag); 203 } 204 } 205 206 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, 207 const NativeCallStack& stack, MEMFLAGS flag = mtNone) { 208 if (tracking_level() < NMT_summary) return; 209 if (addr != NULL) { 210 ThreadCritical tc; 211 if (tracking_level() < NMT_summary) return; 212 VirtualMemoryTracker::add_reserved_region((address)addr, size, 213 stack, flag, true); 214 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 215 } 216 } 217 218 static inline void record_virtual_memory_commit(void* addr, size_t size, 219 const NativeCallStack& stack) { 220 if (tracking_level() < NMT_summary) return; 221 if (addr != NULL) { 222 ThreadCritical tc; 223 if (tracking_level() < NMT_summary) return; 224 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 225 } 226 } 227 228 static inline void record_virtual_memory_uncommit(void* addr, size_t size) { 229 if (MemTracker::tracking_level() > NMT_minimal) { 230 Tracker tracker(Tracker::uncommit); 231 tracker.record((address)addr, size); 232 } 233 } 234 235 static inline void record_virtual_memory_release(void* addr, size_t size) { 236 if (MemTracker::tracking_level() > NMT_minimal) { 237 Tracker tracker(Tracker::release); 238 tracker.record((address)addr, size); 239 } 240 } 241 242 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { 243 if (tracking_level() < NMT_summary) return; 244 if (addr != NULL) { 245 ThreadCritical tc; 246 if (tracking_level() < NMT_summary) return; 247 VirtualMemoryTracker::set_reserved_region_type((address)addr, flag); 248 } 249 } 250 251 static inline void record_thread_stack(void* addr, size_t size) { 252 if (tracking_level() < NMT_summary) return; 253 if (addr != NULL) { 254 // uses thread stack malloc slot for book keeping number of threads 255 MallocMemorySummary::record_malloc(0, mtThreadStack); 256 record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack); 257 } 258 } 259 260 static inline void release_thread_stack(void* addr, size_t size) { 261 if (tracking_level() < NMT_summary) return; 262 if (addr != NULL) { 263 // uses thread stack malloc slot for book keeping number of threads 264 MallocMemorySummary::record_free(0, mtThreadStack); 265 ThreadCritical tc; 266 if (tracking_level() < NMT_summary) return; 267 VirtualMemoryTracker::remove_released_region((address)addr, size); 268 } 269 } 270 271 // Query lock is used to synchronize the access to tracking data. 272 // So far, it is only used by JCmd query, but it may be used by 273 // other tools. 274 static inline Mutex* query_lock() { return _query_lock; } 275 276 // Make a final report or report for hs_err file. 277 static void error_report(outputStream* output) { 278 if (tracking_level() >= NMT_summary) { 279 report(true, output); // just print summary for error case. 280 } 281 } 282 283 static void final_report(outputStream* output) { 284 NMT_TrackingLevel level = tracking_level(); 285 if (level >= NMT_summary) { 286 report(level == NMT_summary, output); 287 } 288 } 289 290 291 // Stored baseline 292 static inline MemBaseline& get_baseline() { 293 return _baseline; 294 } 295 296 static NMT_TrackingLevel cmdline_tracking_level() { 297 return _cmdline_tracking_level; 298 } 299 300 static void tuning_statistics(outputStream* out); 301 302 private: 303 static NMT_TrackingLevel init_tracking_level(); 304 static void report(bool summary_only, outputStream* output); 305 306 private: 307 // Tracking level 308 static volatile NMT_TrackingLevel _tracking_level; 309 // If NMT option value passed by launcher through environment 310 // variable is valid 311 static bool _is_nmt_env_valid; 312 // command line tracking level 313 static NMT_TrackingLevel _cmdline_tracking_level; 314 // Stored baseline 315 static MemBaseline _baseline; 316 // Query lock 317 static Mutex* _query_lock; 318 }; 319 320 #endif // INCLUDE_NMT 321 322 #endif // SHARE_VM_SERVICES_MEM_TRACKER_HPP 323