rev 47399 : [mq]: add_ptr
1 /*
2 * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
26 #define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
27
28 #if INCLUDE_NMT
29
30 #include "memory/allocation.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/threadCritical.hpp"
33 #include "services/nmtCommon.hpp"
34 #include "utilities/nativeCallStack.hpp"
35
36 /*
37 * This counter class counts memory allocation and deallocation,
38 * records total memory allocation size and number of allocations.
39 * The counters are updated atomically.
40 */
41 class MemoryCounter VALUE_OBJ_CLASS_SPEC {
42 private:
43 volatile size_t _count;
44 volatile size_t _size;
45
46 DEBUG_ONLY(size_t _peak_count;)
47 DEBUG_ONLY(size_t _peak_size; )
48
49 public:
50 MemoryCounter() : _count(0), _size(0) {
51 DEBUG_ONLY(_peak_count = 0;)
52 DEBUG_ONLY(_peak_size = 0;)
53 }
54
55 inline void allocate(size_t sz) {
56 Atomic::inc(&_count);
57 if (sz > 0) {
58 Atomic::add(sz, &_size);
59 DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
60 }
61 DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
62 }
63
64 inline void deallocate(size_t sz) {
65 assert(_count > 0, "Nothing allocated yet");
66 assert(_size >= sz, "deallocation > allocated");
67 Atomic::dec(&_count);
68 if (sz > 0) {
69 // unary minus operator applied to unsigned type, result still unsigned
70 #pragma warning(suppress: 4146)
71 Atomic::sub(sz, &_size);
72 }
73 }
74
75 inline void resize(long sz) {
76 if (sz != 0) {
77 Atomic::add(size_t(sz), &_size);
78 DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
79 }
80 }
81
82 inline size_t count() const { return _count; }
83 inline size_t size() const { return _size; }
84 DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
85 DEBUG_ONLY(inline size_t peak_size() const { return _peak_size; })
86
87 };
88
89 /*
90 * Malloc memory used by a particular subsystem.
91 * It includes the memory acquired through os::malloc()
92 * call and arena's backing memory.
93 */
94 class MallocMemory VALUE_OBJ_CLASS_SPEC {
95 private:
96 MemoryCounter _malloc;
97 MemoryCounter _arena;
98
99 public:
100 MallocMemory() { }
101
102 inline void record_malloc(size_t sz) {
103 _malloc.allocate(sz);
104 }
105
106 inline void record_free(size_t sz) {
107 _malloc.deallocate(sz);
108 }
109
110 inline void record_new_arena() {
111 _arena.allocate(0);
112 }
113
114 inline void record_arena_free() {
115 _arena.deallocate(0);
116 }
117
118 inline void record_arena_size_change(long sz) {
119 _arena.resize(sz);
120 }
121
122 inline size_t malloc_size() const { return _malloc.size(); }
123 inline size_t malloc_count() const { return _malloc.count();}
124 inline size_t arena_size() const { return _arena.size(); }
125 inline size_t arena_count() const { return _arena.count(); }
126
127 DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
128 DEBUG_ONLY(inline const MemoryCounter& arena_counter() const { return _arena; })
129 };
130
131 class MallocMemorySummary;
132
133 // A snapshot of malloc'd memory, includes malloc memory
134 // usage by types and memory used by tracking itself.
135 class MallocMemorySnapshot : public ResourceObj {
136 friend class MallocMemorySummary;
137
138 private:
139 MallocMemory _malloc[mt_number_of_types];
140 MemoryCounter _tracking_header;
141
142
143 public:
144 inline MallocMemory* by_type(MEMFLAGS flags) {
145 int index = NMTUtil::flag_to_index(flags);
146 return &_malloc[index];
147 }
148
149 inline MallocMemory* by_index(int index) {
150 assert(index >= 0, "Index out of bound");
151 assert(index < mt_number_of_types, "Index out of bound");
152 return &_malloc[index];
153 }
154
155 inline MemoryCounter* malloc_overhead() {
156 return &_tracking_header;
157 }
158
159 // Total malloc'd memory amount
160 size_t total() const;
161 // Total malloc'd memory used by arenas
162 size_t total_arena() const;
163
164 inline size_t thread_count() const {
165 MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
166 return s->by_type(mtThreadStack)->malloc_count();
167 }
168
169 void copy_to(MallocMemorySnapshot* s) {
170 // Need to make sure that mtChunks don't get deallocated while the
171 // copy is going on, because their size is adjusted using this
172 // buffer in make_adjustment().
173 ThreadCritical tc;
174 s->_tracking_header = _tracking_header;
175 for (int index = 0; index < mt_number_of_types; index ++) {
176 s->_malloc[index] = _malloc[index];
177 }
178 }
179
180 // Make adjustment by subtracting chunks used by arenas
181 // from total chunks to get total free chunk size
182 void make_adjustment();
183 };
184
185 /*
186 * This class is for collecting malloc statistics at summary level
187 */
188 class MallocMemorySummary : AllStatic {
189 private:
190 // Reserve memory for placement of MallocMemorySnapshot object
191 static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
192
193 public:
194 static void initialize();
195
196 static inline void record_malloc(size_t size, MEMFLAGS flag) {
197 as_snapshot()->by_type(flag)->record_malloc(size);
198 }
199
200 static inline void record_free(size_t size, MEMFLAGS flag) {
201 as_snapshot()->by_type(flag)->record_free(size);
202 }
203
204 static inline void record_new_arena(MEMFLAGS flag) {
205 as_snapshot()->by_type(flag)->record_new_arena();
206 }
207
208 static inline void record_arena_free(MEMFLAGS flag) {
209 as_snapshot()->by_type(flag)->record_arena_free();
210 }
211
212 static inline void record_arena_size_change(long size, MEMFLAGS flag) {
213 as_snapshot()->by_type(flag)->record_arena_size_change(size);
214 }
215
216 static void snapshot(MallocMemorySnapshot* s) {
217 as_snapshot()->copy_to(s);
218 s->make_adjustment();
219 }
220
221 // Record memory used by malloc tracking header
222 static inline void record_new_malloc_header(size_t sz) {
223 as_snapshot()->malloc_overhead()->allocate(sz);
224 }
225
226 static inline void record_free_malloc_header(size_t sz) {
227 as_snapshot()->malloc_overhead()->deallocate(sz);
228 }
229
230 // The memory used by malloc tracking headers
231 static inline size_t tracking_overhead() {
232 return as_snapshot()->malloc_overhead()->size();
233 }
234
235 static MallocMemorySnapshot* as_snapshot() {
236 return (MallocMemorySnapshot*)_snapshot;
237 }
238 };
239
240
241 /*
242 * Malloc tracking header.
243 * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
244 * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
245 */
246
247 class MallocHeader VALUE_OBJ_CLASS_SPEC {
248 #ifdef _LP64
249 size_t _size : 64;
250 size_t _flags : 8;
251 size_t _pos_idx : 16;
252 size_t _bucket_idx: 40;
253 #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
254 #define MAX_BUCKET_LENGTH right_n_bits(16)
255 #else
256 size_t _size : 32;
257 size_t _flags : 8;
258 size_t _pos_idx : 8;
259 size_t _bucket_idx: 16;
260 #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(16)
261 #define MAX_BUCKET_LENGTH right_n_bits(8)
262 #endif // _LP64
263
264 public:
265 MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
266 assert(sizeof(MallocHeader) == sizeof(void*) * 2,
267 "Wrong header size");
268
269 if (level == NMT_minimal) {
270 return;
271 }
272
273 _flags = flags;
274 set_size(size);
275 if (level == NMT_detail) {
276 size_t bucket_idx;
277 size_t pos_idx;
278 if (record_malloc_site(stack, size, &bucket_idx, &pos_idx, flags)) {
279 assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
280 assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
281 _bucket_idx = bucket_idx;
282 _pos_idx = pos_idx;
283 }
284 }
285
286 MallocMemorySummary::record_malloc(size, flags);
287 MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
288 }
289
290 inline size_t size() const { return _size; }
291 inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
292 bool get_stack(NativeCallStack& stack) const;
293
294 // Cleanup tracking information before the memory is released.
295 void release() const;
296
297 private:
298 inline void set_size(size_t size) {
299 _size = size;
300 }
301 bool record_malloc_site(const NativeCallStack& stack, size_t size,
302 size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const;
303 };
304
305
306 // Main class called from MemTracker to track malloc activities
307 class MallocTracker : AllStatic {
308 public:
309 // Initialize malloc tracker for specific tracking level
310 static bool initialize(NMT_TrackingLevel level);
311
312 static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
313
314 // malloc tracking header size for specific tracking level
315 static inline size_t malloc_header_size(NMT_TrackingLevel level) {
316 return (level == NMT_off) ? 0 : sizeof(MallocHeader);
317 }
318
319 // Parameter name convention:
320 // memblock : the beginning address for user data
321 // malloc_base: the beginning address that includes malloc tracking header
322 //
323 // The relationship:
324 // memblock = (char*)malloc_base + sizeof(nmt header)
325 //
326
327 // Record malloc on specified memory block
328 static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
329 const NativeCallStack& stack, NMT_TrackingLevel level);
330
331 // Record free on specified memory block
332 static void* record_free(void* memblock);
333
334 // Offset memory address to header address
335 static inline void* get_base(void* memblock);
336 static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
337 if (memblock == NULL || level == NMT_off) return memblock;
338 return (char*)memblock - malloc_header_size(level);
339 }
340
341 // Get memory size
342 static inline size_t get_size(void* memblock) {
343 MallocHeader* header = malloc_header(memblock);
344 return header->size();
345 }
346
347 // Get memory type
348 static inline MEMFLAGS get_flags(void* memblock) {
349 MallocHeader* header = malloc_header(memblock);
350 return header->flags();
351 }
352
353 // Get header size
354 static inline size_t get_header_size(void* memblock) {
355 return (memblock == NULL) ? 0 : sizeof(MallocHeader);
356 }
357
358 static inline void record_new_arena(MEMFLAGS flags) {
359 MallocMemorySummary::record_new_arena(flags);
360 }
361
362 static inline void record_arena_free(MEMFLAGS flags) {
363 MallocMemorySummary::record_arena_free(flags);
364 }
365
366 static inline void record_arena_size_change(int size, MEMFLAGS flags) {
367 MallocMemorySummary::record_arena_size_change(size, flags);
368 }
369 private:
370 static inline MallocHeader* malloc_header(void *memblock) {
371 assert(memblock != NULL, "NULL pointer");
372 MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
373 return header;
374 }
375 };
376
377 #endif // INCLUDE_NMT
378
379
380 #endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
--- EOF ---