46 static inline void shutdown() { } 47 static inline void init() { } 48 static bool check_launcher_nmt_support(const char* value) { return true; } 49 static bool verify_nmt_option() { return true; } 50 51 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, 52 const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; } 53 static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; } 54 static inline size_t malloc_header_size(void* memblock) { return 0; } 55 static inline void* malloc_base(void* memblock) { return memblock; } 56 static inline void* record_free(void* memblock) { return memblock; } 57 58 static inline void record_new_arena(MEMFLAGS flag) { } 59 static inline void record_arena_free(MEMFLAGS flag) { } 60 static inline void record_arena_size_change(int diff, MEMFLAGS flag) { } 61 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, 62 MEMFLAGS flag = mtNone) { } 63 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, 64 const NativeCallStack& stack, MEMFLAGS flag = mtNone) { } 65 static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { } 66 static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); } 67 static inline Tracker get_virtual_memory_release_tracker() { return Tracker(); } 68 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { } 69 static inline void record_thread_stack(void* addr, size_t size) { } 70 static inline void release_thread_stack(void* addr, size_t size) { } 71 72 static void final_report(outputStream*) { } 73 static void error_report(outputStream*) { } 74 }; 75 76 #else 77 78 #include "runtime/threadCritical.hpp" 79 #include "services/mallocTracker.hpp" 80 #include "services/virtualMemoryTracker.hpp" 81 82 extern volatile bool NMT_stack_walkable; 83 84 #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 85 NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK) 86 #define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 87 NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK) 210 if (tracking_level() < NMT_summary) return; 211 if (addr != NULL) { 212 ThreadCritical tc; 213 if (tracking_level() < NMT_summary) return; 214 VirtualMemoryTracker::add_reserved_region((address)addr, size, 215 stack, flag, true); 216 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 217 } 218 } 219 220 static inline void record_virtual_memory_commit(void* addr, size_t size, 221 const NativeCallStack& stack) { 222 if (tracking_level() < NMT_summary) return; 223 if (addr != NULL) { 224 ThreadCritical tc; 225 if (tracking_level() < NMT_summary) return; 226 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 227 } 228 } 229 230 static inline Tracker get_virtual_memory_uncommit_tracker() { 231 assert(tracking_level() >= NMT_summary, "Check by caller"); 232 return Tracker(Tracker::uncommit); 233 } 234 235 static inline Tracker get_virtual_memory_release_tracker() { 236 assert(tracking_level() >= NMT_summary, "Check by caller"); 237 return Tracker(Tracker::release); 238 } 239 240 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { 241 if (tracking_level() < NMT_summary) return; 242 if (addr != NULL) { 243 ThreadCritical tc; 244 if (tracking_level() < NMT_summary) return; 245 VirtualMemoryTracker::set_reserved_region_type((address)addr, flag); 246 } 247 } 248 249 static inline void record_thread_stack(void* addr, size_t size) { 250 if (tracking_level() < NMT_summary) return; 251 if (addr != NULL) { 252 // uses thread stack malloc slot for book keeping number of threads 253 MallocMemorySummary::record_malloc(0, mtThreadStack); 254 record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack); 255 } 256 } 257 | 46 static inline void shutdown() { } 47 static inline void init() { } 48 static bool check_launcher_nmt_support(const char* value) { return true; } 49 static bool verify_nmt_option() { return true; } 50 51 static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, 52 const NativeCallStack& stack, NMT_TrackingLevel level) { return mem_base; } 53 static inline size_t malloc_header_size(NMT_TrackingLevel level) { return 0; } 54 static inline size_t malloc_header_size(void* memblock) { return 0; } 55 static inline void* malloc_base(void* memblock) { return memblock; } 56 static inline void* record_free(void* memblock) { return memblock; } 57 58 static inline void record_new_arena(MEMFLAGS flag) { } 59 static inline void record_arena_free(MEMFLAGS flag) { } 60 static inline void record_arena_size_change(int diff, MEMFLAGS flag) { } 61 static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, 62 MEMFLAGS flag = mtNone) { } 63 static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, 64 const NativeCallStack& stack, MEMFLAGS flag = mtNone) { } 65 static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { } 66 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { } 67 static inline void record_thread_stack(void* addr, size_t size) { } 68 static inline void release_thread_stack(void* addr, size_t size) { } 69 70 static void final_report(outputStream*) { } 71 static void error_report(outputStream*) { } 72 }; 73 74 #else 75 76 #include "runtime/threadCritical.hpp" 77 #include "services/mallocTracker.hpp" 78 #include "services/virtualMemoryTracker.hpp" 79 80 extern volatile bool NMT_stack_walkable; 81 82 #define CURRENT_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 83 NativeCallStack(0, true) : NativeCallStack::EMPTY_STACK) 84 #define CALLER_PC ((MemTracker::tracking_level() == NMT_detail && NMT_stack_walkable) ? \ 85 NativeCallStack(1, true) : NativeCallStack::EMPTY_STACK) 208 if (tracking_level() < NMT_summary) return; 209 if (addr != NULL) { 210 ThreadCritical tc; 211 if (tracking_level() < NMT_summary) return; 212 VirtualMemoryTracker::add_reserved_region((address)addr, size, 213 stack, flag, true); 214 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 215 } 216 } 217 218 static inline void record_virtual_memory_commit(void* addr, size_t size, 219 const NativeCallStack& stack) { 220 if (tracking_level() < NMT_summary) return; 221 if (addr != NULL) { 222 ThreadCritical tc; 223 if (tracking_level() < NMT_summary) return; 224 VirtualMemoryTracker::add_committed_region((address)addr, size, stack); 225 } 226 } 227 228 static inline void record_virtual_memory_uncommit(void* addr, size_t size) { 229 if (MemTracker::tracking_level() > NMT_minimal) { 230 Tracker tracker(Tracker::uncommit); 231 tracker.record((address)addr, size); 232 } 233 } 234 235 static inline void record_virtual_memory_release(void* addr, size_t size) { 236 if (MemTracker::tracking_level() > NMT_minimal) { 237 Tracker tracker(Tracker::release); 238 tracker.record((address)addr, size); 239 } 240 } 241 242 static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { 243 if (tracking_level() < NMT_summary) return; 244 if (addr != NULL) { 245 ThreadCritical tc; 246 if (tracking_level() < NMT_summary) return; 247 VirtualMemoryTracker::set_reserved_region_type((address)addr, flag); 248 } 249 } 250 251 static inline void record_thread_stack(void* addr, size_t size) { 252 if (tracking_level() < NMT_summary) return; 253 if (addr != NULL) { 254 // uses thread stack malloc slot for book keeping number of threads 255 MallocMemorySummary::record_malloc(0, mtThreadStack); 256 record_virtual_memory_reserve_and_commit(addr, size, CALLER_PC, mtThreadStack); 257 } 258 } 259 |