Print this page
rev 4499 : 8013120: NMT: Kitchensink crashes with assert(next_region == NULL || !next_region->is_committed_region()) failed: Sanity check
Summary: Fixed NMT to deal with releasing virtual memory region when there are still committed regions within it
Reviewed-by: acorn, coleenp
Split |
Split |
Close |
Expand all |
Collapse all |
--- old/src/share/vm/services/memSnapshot.cpp
+++ new/src/share/vm/services/memSnapshot.cpp
1 1 /*
2 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
3 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 4 *
5 5 * This code is free software; you can redistribute it and/or modify it
6 6 * under the terms of the GNU General Public License version 2 only, as
7 7 * published by the Free Software Foundation.
8 8 *
9 9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 12 * version 2 for more details (a copy is included in the LICENSE file that
13 13 * accompanied this code).
14 14 *
15 15 * You should have received a copy of the GNU General Public License version
16 16 * 2 along with this work; if not, write to the Free Software Foundation,
17 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 18 *
19 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 20 * or visit www.oracle.com if you need additional information or have any
21 21 * questions.
22 22 *
23 23 */
24 24
25 25 #include "precompiled.hpp"
26 26 #include "runtime/mutexLocker.hpp"
27 27 #include "utilities/decoder.hpp"
28 28 #include "services/memBaseline.hpp"
29 29 #include "services/memPtr.hpp"
30 30 #include "services/memPtrArray.hpp"
31 31 #include "services/memSnapshot.hpp"
32 32 #include "services/memTracker.hpp"
33 33
34 34 #ifdef ASSERT
35 35
36 36 void decode_pointer_record(MemPointerRecord* rec) {
37 37 tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(),
38 38 rec->addr() + rec->size(), (int)rec->size());
39 39 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
40 40 if (rec->is_vm_pointer()) {
41 41 if (rec->is_allocation_record()) {
42 42 tty->print_cr(" (reserve)");
43 43 } else if (rec->is_commit_record()) {
44 44 tty->print_cr(" (commit)");
45 45 } else if (rec->is_uncommit_record()) {
46 46 tty->print_cr(" (uncommit)");
47 47 } else if (rec->is_deallocation_record()) {
48 48 tty->print_cr(" (release)");
49 49 } else {
50 50 tty->print_cr(" (tag)");
51 51 }
52 52 } else {
53 53 if (rec->is_arena_memory_record()) {
54 54 tty->print_cr(" (arena size)");
55 55 } else if (rec->is_allocation_record()) {
56 56 tty->print_cr(" (malloc)");
57 57 } else {
58 58 tty->print_cr(" (free)");
59 59 }
60 60 }
61 61 if (MemTracker::track_callsite()) {
62 62 char buf[1024];
63 63 address pc = ((MemPointerRecordEx*)rec)->pc();
64 64 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
65 65 tty->print_cr("\tfrom %s", buf);
66 66 } else {
67 67 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
68 68 }
69 69 }
70 70 }
71 71
72 72 void decode_vm_region_record(VMMemRegion* rec) {
73 73 tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
74 74 rec->addr() + rec->size());
75 75 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
76 76 if (rec->is_allocation_record()) {
77 77 tty->print_cr(" (reserved)");
78 78 } else if (rec->is_commit_record()) {
79 79 tty->print_cr(" (committed)");
80 80 } else {
81 81 ShouldNotReachHere();
82 82 }
83 83 if (MemTracker::track_callsite()) {
84 84 char buf[1024];
85 85 address pc = ((VMMemRegionEx*)rec)->pc();
86 86 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
87 87 tty->print_cr("\tfrom %s", buf);
88 88 } else {
89 89 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
90 90 }
91 91
92 92 }
93 93 }
94 94
95 95 #endif
96 96
97 97
98 98 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
99 99 VMMemRegionEx new_rec;
100 100 assert(rec->is_allocation_record() || rec->is_commit_record(),
101 101 "Sanity check");
102 102 if (MemTracker::track_callsite()) {
103 103 new_rec.init((MemPointerRecordEx*)rec);
104 104 } else {
105 105 new_rec.init(rec);
106 106 }
107 107 return insert(&new_rec);
108 108 }
109 109
110 110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
111 111 VMMemRegionEx new_rec;
112 112 assert(rec->is_allocation_record() || rec->is_commit_record(),
113 113 "Sanity check");
114 114 if (MemTracker::track_callsite()) {
115 115 new_rec.init((MemPointerRecordEx*)rec);
116 116 } else {
117 117 new_rec.init(rec);
118 118 }
119 119 return insert_after(&new_rec);
120 120 }
121 121
122 122 // we don't consolidate reserved regions, since they may be categorized
123 123 // in different types.
124 124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
125 125 assert(rec->is_allocation_record(), "Sanity check");
126 126 VMMemRegion* reserved_region = (VMMemRegion*)current();
127 127
128 128 // we don't have anything yet
129 129 if (reserved_region == NULL) {
130 130 return insert_record(rec);
131 131 }
132 132
133 133 assert(reserved_region->is_reserved_region(), "Sanity check");
134 134 // duplicated records
135 135 if (reserved_region->is_same_region(rec)) {
136 136 return true;
137 137 }
138 138 // Overlapping stack regions indicate that a JNI thread failed to
139 139 // detach from the VM before exiting. This leaks the JavaThread object.
140 140 if (CheckJNICalls) {
141 141 guarantee(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) != mtThreadStack ||
142 142 !reserved_region->overlaps_region(rec),
143 143 "Attached JNI thread exited without being detached");
144 144 }
145 145 // otherwise, we should not have overlapping reserved regions
146 146 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
147 147 reserved_region->base() > rec->addr(), "Just check: locate()");
148 148 assert(FLAGS_TO_MEMORY_TYPE(reserved_region->flags()) == mtThreadStack ||
149 149 !reserved_region->overlaps_region(rec), "overlapping reserved regions");
150 150
151 151 return insert_record(rec);
152 152 }
153 153
154 154 // we do consolidate committed regions
155 155 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
156 156 assert(rec->is_commit_record(), "Sanity check");
157 157 VMMemRegion* reserved_rgn = (VMMemRegion*)current();
158 158 assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
159 159 "Sanity check");
160 160
161 161 // thread's native stack is always marked as "committed", ignore
162 162 // the "commit" operation for creating stack guard pages
163 163 if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
164 164 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
165 165 return true;
166 166 }
167 167
168 168 // if the reserved region has any committed regions
169 169 VMMemRegion* committed_rgn = (VMMemRegion*)next();
170 170 while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
171 171 // duplicated commit records
172 172 if(committed_rgn->contains_region(rec)) {
173 173 return true;
174 174 } else if (committed_rgn->overlaps_region(rec)) {
175 175 // overlaps front part
176 176 if (rec->addr() < committed_rgn->addr()) {
177 177 committed_rgn->expand_region(rec->addr(),
178 178 committed_rgn->addr() - rec->addr());
179 179 } else {
180 180 // overlaps tail part
181 181 address committed_rgn_end = committed_rgn->addr() +
182 182 committed_rgn->size();
183 183 assert(committed_rgn_end < rec->addr() + rec->size(),
184 184 "overlap tail part");
185 185 committed_rgn->expand_region(committed_rgn_end,
186 186 (rec->addr() + rec->size()) - committed_rgn_end);
187 187 }
188 188 } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
189 189 // adjunct each other
190 190 committed_rgn->expand_region(rec->addr(), rec->size());
191 191 VMMemRegion* next_reg = (VMMemRegion*)next();
192 192 // see if we can consolidate next committed region
193 193 if (next_reg != NULL && next_reg->is_committed_region() &&
194 194 next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
195 195 committed_rgn->expand_region(next_reg->base(), next_reg->size());
196 196 // delete merged region
197 197 remove();
198 198 }
199 199 return true;
200 200 } else if (committed_rgn->base() > rec->addr()) {
201 201 // found the location, insert this committed region
202 202 return insert_record(rec);
203 203 }
204 204 committed_rgn = (VMMemRegion*)next();
205 205 }
206 206 return insert_record(rec);
207 207 }
208 208
209 209 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
210 210 assert(rec->is_uncommit_record(), "sanity check");
211 211 VMMemRegion* cur;
212 212 cur = (VMMemRegion*)current();
213 213 assert(cur->is_reserved_region() && cur->contains_region(rec),
214 214 "Sanity check");
215 215 // thread's native stack is always marked as "committed", ignore
216 216 // the "commit" operation for creating stack guard pages
217 217 if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
218 218 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
219 219 return true;
220 220 }
221 221
222 222 cur = (VMMemRegion*)next();
223 223 while (cur != NULL && cur->is_committed_region()) {
224 224 // region already uncommitted, must be due to duplicated record
225 225 if (cur->addr() >= rec->addr() + rec->size()) {
226 226 break;
227 227 } else if (cur->contains_region(rec)) {
228 228 // uncommit whole region
229 229 if (cur->is_same_region(rec)) {
230 230 remove();
231 231 break;
232 232 } else if (rec->addr() == cur->addr() ||
233 233 rec->addr() + rec->size() == cur->addr() + cur->size()) {
234 234 // uncommitted from either end of current memory region.
235 235 cur->exclude_region(rec->addr(), rec->size());
236 236 break;
237 237 } else { // split the committed region and release the middle
238 238 address high_addr = cur->addr() + cur->size();
239 239 size_t sz = high_addr - rec->addr();
240 240 cur->exclude_region(rec->addr(), sz);
241 241 sz = high_addr - (rec->addr() + rec->size());
242 242 if (MemTracker::track_callsite()) {
243 243 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
244 244 ((VMMemRegionEx*)cur)->pc());
245 245 return insert_record_after(&tmp);
246 246 } else {
247 247 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
248 248 return insert_record_after(&tmp);
249 249 }
250 250 }
251 251 }
252 252 cur = (VMMemRegion*)next();
253 253 }
254 254
↓ open down ↓ |
254 lines elided |
↑ open up ↑ |
255 255 // we may not find committed record due to duplicated records
256 256 return true;
257 257 }
258 258
259 259 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
260 260 assert(rec->is_deallocation_record(), "Sanity check");
261 261 VMMemRegion* cur = (VMMemRegion*)current();
262 262 assert(cur->is_reserved_region() && cur->contains_region(rec),
263 263 "Sanity check");
264 264 if (rec->is_same_region(cur)) {
265 - // release whole reserved region
265 +
266 + // In snapshot, the virtual memory records are sorted in following orders:
267 + // 1. virtual memory's base address
268 + // 2. virtual memory reservation record, followed by commit records within this reservation.
269 + // The commit records are also in base address order.
270 + // When a reserved region is released, we want to remove the reservation record and all
271 + // commit records following it.
266 272 #ifdef ASSERT
267 - VMMemRegion* next_region = (VMMemRegion*)peek_next();
268 - // should not have any committed memory in this reserved region
269 - assert(next_region == NULL || !next_region->is_committed_region(), "Sanity check");
273 + address low_addr = cur->addr();
274 + address high_addr = low_addr + cur->size();
270 275 #endif
276 + // remove virtual memory reservation record
271 277 remove();
278 + // remove committed regions within above reservation
279 + VMMemRegion* next_region = (VMMemRegion*)current();
280 + while (next_region != NULL && next_region->is_committed_region()) {
281 + assert(next_region->addr() >= low_addr &&
282 + next_region->addr() + next_region->size() <= high_addr,
283 + "Range check");
284 + remove();
285 + next_region = (VMMemRegion*)current();
286 + }
272 287 } else if (rec->addr() == cur->addr() ||
273 288 rec->addr() + rec->size() == cur->addr() + cur->size()) {
274 289 // released region is at either end of this region
275 290 cur->exclude_region(rec->addr(), rec->size());
276 291 assert(check_reserved_region(), "Integrity check");
277 292 } else { // split the reserved region and release the middle
278 293 address high_addr = cur->addr() + cur->size();
279 294 size_t sz = high_addr - rec->addr();
280 295 cur->exclude_region(rec->addr(), sz);
281 296 sz = high_addr - rec->addr() - rec->size();
282 297 if (MemTracker::track_callsite()) {
283 298 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
284 299 ((VMMemRegionEx*)cur)->pc());
285 300 bool ret = insert_reserved_region(&tmp);
286 301 assert(!ret || check_reserved_region(), "Integrity check");
287 302 return ret;
288 303 } else {
289 304 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
290 305 bool ret = insert_reserved_region(&tmp);
291 306 assert(!ret || check_reserved_region(), "Integrity check");
292 307 return ret;
293 308 }
294 309 }
295 310 return true;
296 311 }
297 312
298 313 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
299 314 // skip all 'commit' records associated with previous reserved region
300 315 VMMemRegion* p = (VMMemRegion*)next();
301 316 while (p != NULL && p->is_committed_region() &&
302 317 p->base() + p->size() < rec->addr()) {
303 318 p = (VMMemRegion*)next();
304 319 }
305 320 return insert_record(rec);
306 321 }
307 322
308 323 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
309 324 assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
310 325 address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
311 326 if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
312 327 size_t sz = rgn->size() - new_rgn_size;
313 328 // the original region becomes 'new' region
314 329 rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
315 330 // remaining becomes next region
316 331 MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
317 332 return insert_reserved_region(&next_rgn);
318 333 } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
319 334 rgn->exclude_region(new_rgn_addr, new_rgn_size);
320 335 MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
321 336 return insert_reserved_region(&next_rgn);
322 337 } else {
323 338 // the orginal region will be split into three
324 339 address rgn_high_addr = rgn->base() + rgn->size();
325 340 // first region
326 341 rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
327 342 // the second region is the new region
328 343 MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
329 344 if (!insert_reserved_region(&new_rgn)) return false;
330 345 // the remaining region
331 346 MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
332 347 rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
333 348 return insert_reserved_region(&rem_rgn);
334 349 }
335 350 }
336 351
337 352 static int sort_in_seq_order(const void* p1, const void* p2) {
338 353 assert(p1 != NULL && p2 != NULL, "Sanity check");
339 354 const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
340 355 const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
341 356 return (mp1->seq() - mp2->seq());
342 357 }
343 358
344 359 bool StagingArea::init() {
345 360 if (MemTracker::track_callsite()) {
346 361 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
347 362 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
348 363 } else {
349 364 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
350 365 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
351 366 }
352 367
353 368 if (_malloc_data != NULL && _vm_data != NULL &&
354 369 !_malloc_data->out_of_memory() &&
355 370 !_vm_data->out_of_memory()) {
356 371 return true;
357 372 } else {
358 373 if (_malloc_data != NULL) delete _malloc_data;
359 374 if (_vm_data != NULL) delete _vm_data;
360 375 _malloc_data = NULL;
361 376 _vm_data = NULL;
362 377 return false;
363 378 }
364 379 }
365 380
366 381
367 382 VMRecordIterator StagingArea::virtual_memory_record_walker() {
368 383 MemPointerArray* arr = vm_data();
369 384 // sort into seq number order
370 385 arr->sort((FN_SORT)sort_in_seq_order);
371 386 return VMRecordIterator(arr);
372 387 }
373 388
374 389
375 390 MemSnapshot::MemSnapshot() {
376 391 if (MemTracker::track_callsite()) {
377 392 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
378 393 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
379 394 } else {
380 395 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
381 396 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
382 397 }
383 398
384 399 _staging_area.init();
385 400 _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
386 401 NOT_PRODUCT(_untracked_count = 0;)
387 402 _number_of_classes = 0;
388 403 }
389 404
390 405 MemSnapshot::~MemSnapshot() {
391 406 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
392 407 {
393 408 MutexLockerEx locker(_lock);
394 409 if (_alloc_ptrs != NULL) {
395 410 delete _alloc_ptrs;
396 411 _alloc_ptrs = NULL;
397 412 }
398 413
399 414 if (_vm_ptrs != NULL) {
400 415 delete _vm_ptrs;
401 416 _vm_ptrs = NULL;
402 417 }
403 418 }
404 419
405 420 if (_lock != NULL) {
406 421 delete _lock;
407 422 _lock = NULL;
408 423 }
409 424 }
410 425
411 426
412 427 void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
413 428 assert(dest != NULL && src != NULL, "Just check");
414 429 assert(dest->addr() == src->addr(), "Just check");
415 430 assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
416 431
417 432 if (MemTracker::track_callsite()) {
418 433 *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
419 434 } else {
420 435 *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
421 436 }
422 437 }
423 438
424 439 void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
425 440 assert(src != NULL && dest != NULL, "Just check");
426 441 assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
427 442
428 443 if (MemTracker::track_callsite()) {
429 444 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
430 445 } else {
431 446 *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
432 447 }
433 448 }
434 449
435 450 // merge a recorder to the staging area
436 451 bool MemSnapshot::merge(MemRecorder* rec) {
437 452 assert(rec != NULL && !rec->out_of_memory(), "Just check");
438 453
439 454 SequencedRecordIterator itr(rec->pointer_itr());
440 455
441 456 MutexLockerEx lock(_lock, true);
442 457 MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
443 458 MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
444 459 MemPointerRecord* matched_rec;
445 460
446 461 while (incoming_rec != NULL) {
447 462 if (incoming_rec->is_vm_pointer()) {
448 463 // we don't do anything with virtual memory records during merge
449 464 if (!_staging_area.vm_data()->append(incoming_rec)) {
450 465 return false;
451 466 }
452 467 } else {
453 468 // locate matched record and/or also position the iterator to proper
454 469 // location for this incoming record.
455 470 matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
456 471 // we have not seen this memory block in this generation,
457 472 // so just add to staging area
458 473 if (matched_rec == NULL) {
459 474 if (!malloc_staging_itr.insert(incoming_rec)) {
460 475 return false;
461 476 }
462 477 } else if (incoming_rec->addr() == matched_rec->addr()) {
463 478 // whoever has higher sequence number wins
464 479 if (incoming_rec->seq() > matched_rec->seq()) {
465 480 copy_seq_pointer(matched_rec, incoming_rec);
466 481 }
467 482 } else if (incoming_rec->addr() < matched_rec->addr()) {
468 483 if (!malloc_staging_itr.insert(incoming_rec)) {
469 484 return false;
470 485 }
471 486 } else {
472 487 ShouldNotReachHere();
473 488 }
474 489 }
475 490 incoming_rec = (MemPointerRecord*)itr.next();
476 491 }
477 492 NOT_PRODUCT(void check_staging_data();)
478 493 return true;
479 494 }
480 495
481 496
482 497 // promote data to next generation
483 498 bool MemSnapshot::promote(int number_of_classes) {
484 499 assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
485 500 assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
486 501 "Just check");
487 502 MutexLockerEx lock(_lock, true);
488 503
489 504 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker();
490 505 bool promoted = false;
491 506 if (promote_malloc_records(&malloc_itr)) {
492 507 VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
493 508 if (promote_virtual_memory_records(&vm_itr)) {
494 509 promoted = true;
495 510 }
496 511 }
497 512
498 513 NOT_PRODUCT(check_malloc_pointers();)
499 514 _staging_area.clear();
500 515 _number_of_classes = number_of_classes;
501 516 return promoted;
502 517 }
503 518
504 519 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
505 520 MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
506 521 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
507 522 MemPointerRecord* matched_rec;
508 523 while (new_rec != NULL) {
509 524 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
510 525 // found matched memory block
511 526 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
512 527 // snapshot already contains 'live' records
513 528 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
514 529 "Sanity check");
515 530 // update block states
516 531 if (new_rec->is_allocation_record()) {
517 532 assign_pointer(matched_rec, new_rec);
518 533 } else if (new_rec->is_arena_memory_record()) {
519 534 if (new_rec->size() == 0) {
520 535 // remove size record once size drops to 0
521 536 malloc_snapshot_itr.remove();
522 537 } else {
523 538 assign_pointer(matched_rec, new_rec);
524 539 }
525 540 } else {
526 541 // a deallocation record
527 542 assert(new_rec->is_deallocation_record(), "Sanity check");
528 543 // an arena record can be followed by a size record, we need to remove both
529 544 if (matched_rec->is_arena_record()) {
530 545 MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
531 546 if (next != NULL && next->is_arena_memory_record() &&
532 547 next->is_memory_record_of_arena(matched_rec)) {
533 548 malloc_snapshot_itr.remove();
534 549 }
535 550 }
536 551 // the memory is deallocated, remove related record(s)
537 552 malloc_snapshot_itr.remove();
538 553 }
539 554 } else {
540 555 // don't insert size 0 record
541 556 if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
542 557 new_rec = NULL;
543 558 }
544 559
545 560 if (new_rec != NULL) {
546 561 if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
547 562 if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
548 563 if (!malloc_snapshot_itr.insert_after(new_rec)) {
549 564 return false;
550 565 }
551 566 } else {
552 567 if (!malloc_snapshot_itr.insert(new_rec)) {
553 568 return false;
554 569 }
555 570 }
556 571 }
557 572 #ifndef PRODUCT
558 573 else if (!has_allocation_record(new_rec->addr())) {
559 574 // NMT can not track some startup memory, which is allocated before NMT is on
560 575 _untracked_count ++;
561 576 }
562 577 #endif
563 578 }
564 579 }
565 580 new_rec = (MemPointerRecord*)itr->next();
566 581 }
567 582 return true;
568 583 }
569 584
570 585 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
571 586 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
572 587 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
573 588 VMMemRegion* reserved_rec;
574 589 while (new_rec != NULL) {
575 590 assert(new_rec->is_vm_pointer(), "Sanity check");
576 591
577 592 // locate a reserved region that contains the specified address, or
578 593 // the nearest reserved region has base address just above the specified
579 594 // address
580 595 reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
581 596 if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
582 597 // snapshot can only have 'live' records
583 598 assert(reserved_rec->is_reserved_region(), "Sanity check");
584 599 if (new_rec->is_allocation_record()) {
585 600 if (!reserved_rec->is_same_region(new_rec)) {
586 601 // only deal with split a bigger reserved region into smaller regions.
587 602 // So far, CDS is the only use case.
588 603 if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
589 604 return false;
590 605 }
591 606 }
592 607 } else if (new_rec->is_uncommit_record()) {
593 608 if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
594 609 return false;
595 610 }
596 611 } else if (new_rec->is_commit_record()) {
597 612 // insert or expand existing committed region to cover this
598 613 // newly committed region
599 614 if (!vm_snapshot_itr.add_committed_region(new_rec)) {
600 615 return false;
601 616 }
602 617 } else if (new_rec->is_deallocation_record()) {
603 618 // release part or all memory region
604 619 if (!vm_snapshot_itr.remove_released_region(new_rec)) {
605 620 return false;
606 621 }
607 622 } else if (new_rec->is_type_tagging_record()) {
608 623 // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
609 624 // to different type.
610 625 assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
611 626 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
612 627 "Sanity check");
613 628 reserved_rec->tag(new_rec->flags());
614 629 } else {
615 630 ShouldNotReachHere();
616 631 }
617 632 } else {
618 633 /*
619 634 * The assertion failure indicates mis-matched virtual memory records. The likely
620 635 * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
621 636 * api, which have to be tracked manually. (perfMemory is an example).
622 637 */
623 638 assert(new_rec->is_allocation_record(), "Sanity check");
624 639 if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
625 640 return false;
626 641 }
627 642 }
628 643 new_rec = (MemPointerRecord*)itr->next();
629 644 }
630 645 return true;
631 646 }
632 647
633 648 #ifndef PRODUCT
634 649 void MemSnapshot::print_snapshot_stats(outputStream* st) {
635 650 st->print_cr("Snapshot:");
636 651 st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
637 652 (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
638 653
639 654 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
640 655 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
641 656
642 657 st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
643 658 _staging_area.malloc_data()->capacity(),
644 659 (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
645 660 _staging_area.malloc_data()->instance_size()/K);
646 661
647 662 st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
648 663 _staging_area.vm_data()->capacity(),
649 664 (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
650 665 _staging_area.vm_data()->instance_size()/K);
651 666
652 667 st->print_cr("\tUntracked allocation: %d", _untracked_count);
653 668 }
654 669
655 670 void MemSnapshot::check_malloc_pointers() {
656 671 MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
657 672 MemPointerRecord* p = (MemPointerRecord*)mItr.current();
658 673 MemPointerRecord* prev = NULL;
659 674 while (p != NULL) {
660 675 if (prev != NULL) {
661 676 assert(p->addr() >= prev->addr(), "sorting order");
662 677 }
663 678 prev = p;
664 679 p = (MemPointerRecord*)mItr.next();
665 680 }
666 681 }
667 682
668 683 bool MemSnapshot::has_allocation_record(address addr) {
669 684 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
670 685 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
671 686 while (cur != NULL) {
672 687 if (cur->addr() == addr && cur->is_allocation_record()) {
673 688 return true;
674 689 }
675 690 cur = (MemPointerRecord*)itr.next();
676 691 }
677 692 return false;
678 693 }
679 694 #endif // PRODUCT
680 695
681 696 #ifdef ASSERT
682 697 void MemSnapshot::check_staging_data() {
683 698 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
684 699 MemPointerRecord* cur = (MemPointerRecord*)itr.current();
685 700 MemPointerRecord* next = (MemPointerRecord*)itr.next();
686 701 while (next != NULL) {
687 702 assert((next->addr() > cur->addr()) ||
688 703 ((next->flags() & MemPointerRecord::tag_masks) >
689 704 (cur->flags() & MemPointerRecord::tag_masks)),
690 705 "sorting order");
691 706 cur = next;
692 707 next = (MemPointerRecord*)itr.next();
693 708 }
694 709
695 710 MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
696 711 cur = (MemPointerRecord*)vm_itr.current();
697 712 while (cur != NULL) {
698 713 assert(cur->is_vm_pointer(), "virtual memory pointer only");
699 714 cur = (MemPointerRecord*)vm_itr.next();
700 715 }
701 716 }
702 717
703 718 void MemSnapshot::dump_all_vm_pointers() {
704 719 MemPointerArrayIteratorImpl itr(_vm_ptrs);
705 720 VMMemRegion* ptr = (VMMemRegion*)itr.current();
706 721 tty->print_cr("dump virtual memory pointers:");
707 722 while (ptr != NULL) {
708 723 if (ptr->is_committed_region()) {
709 724 tty->print("\t");
710 725 }
711 726 tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
712 727 (ptr->addr() + ptr->size()), ptr->flags());
713 728
714 729 if (MemTracker::track_callsite()) {
715 730 VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
716 731 if (ex->pc() != NULL) {
717 732 char buf[1024];
718 733 if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
719 734 tty->print_cr("\t%s", buf);
720 735 } else {
721 736 tty->print_cr("");
722 737 }
723 738 }
724 739 }
725 740
726 741 ptr = (VMMemRegion*)itr.next();
727 742 }
728 743 tty->flush();
729 744 }
730 745 #endif // ASSERT
731 746
↓ open down ↓ |
450 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX