1 /* 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "runtime/mutexLocker.hpp" 27 #include "utilities/decoder.hpp" 28 #include "services/memBaseline.hpp" 29 #include "services/memPtr.hpp" 30 #include "services/memPtrArray.hpp" 31 #include "services/memSnapshot.hpp" 32 #include "services/memTracker.hpp" 33 34 #ifdef ASSERT 35 36 void decode_pointer_record(MemPointerRecord* rec) { 37 tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT "] size = %d bytes", rec->addr(), 38 rec->addr() + rec->size(), (int)rec->size()); 39 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags()))); 40 if (rec->is_vm_pointer()) { 41 if (rec->is_allocation_record()) { 42 tty->print_cr(" (reserve)"); 43 } else if (rec->is_commit_record()) { 44 tty->print_cr(" (commit)"); 45 } else if (rec->is_uncommit_record()) { 46 tty->print_cr(" (uncommit)"); 47 } else if (rec->is_deallocation_record()) { 48 tty->print_cr(" (release)"); 49 } else { 50 tty->print_cr(" (tag)"); 51 } 52 } else { 53 if (rec->is_arena_size_record()) { 54 tty->print_cr(" (arena size)"); 55 } else if (rec->is_allocation_record()) { 56 tty->print_cr(" (malloc)"); 57 } else { 58 tty->print_cr(" (free)"); 59 } 60 } 61 if (MemTracker::track_callsite()) { 62 char buf[1024]; 63 address pc = ((MemPointerRecordEx*)rec)->pc(); 64 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) { 65 tty->print_cr("\tfrom %s", buf); 66 } else { 67 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc); 68 } 69 } 70 } 71 72 void decode_vm_region_record(VMMemRegion* rec) { 73 tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(), 74 rec->addr() + rec->size()); 75 tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags()))); 76 if (rec->is_allocation_record()) { 77 tty->print_cr(" (reserved)"); 78 } else if (rec->is_commit_record()) { 79 tty->print_cr(" (committed)"); 80 } else { 81 ShouldNotReachHere(); 82 } 83 if (MemTracker::track_callsite()) { 84 char buf[1024]; 85 address pc = ((VMMemRegionEx*)rec)->pc(); 86 if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) { 87 tty->print_cr("\tfrom %s", buf); 88 } else { 89 tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc); 90 } 91 92 } 93 } 94 95 #endif 96 97 98 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) { 99 VMMemRegionEx new_rec; 100 assert(rec->is_allocation_record() || rec->is_commit_record(), 101 "Sanity check"); 102 if (MemTracker::track_callsite()) { 103 new_rec.init((MemPointerRecordEx*)rec); 104 } else { 105 new_rec.init(rec); 106 } 107 return insert(&new_rec); 108 } 109 110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) { 111 VMMemRegionEx new_rec; 112 assert(rec->is_allocation_record() || rec->is_commit_record(), 113 "Sanity check"); 114 if (MemTracker::track_callsite()) { 115 new_rec.init((MemPointerRecordEx*)rec); 116 } else { 117 new_rec.init(rec); 118 } 119 return insert_after(&new_rec); 120 } 121 122 // we don't consolidate reserved regions, since they may be categorized 123 // in different types. 124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) { 125 assert(rec->is_allocation_record(), "Sanity check"); 126 VMMemRegion* reserved_rgn = (VMMemRegion*)current(); 127 128 // we don't have anything yet 129 if (reserved_rgn == NULL) { 130 return insert_record(rec); 131 } 132 133 assert(reserved_rgn->is_reserved_region(), "Sanity check"); 134 // duplicated records 135 if (reserved_rgn->is_same_region(rec)) { 136 return true; 137 } 138 // an attached JNI thread can exit without detaching the thread, that results 139 // JVM to leak JavaThread object (JDK-8001743) 140 if (CheckJNICalls) { 141 guarantee(FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) != mtThreadStack || 142 !reserved_rgn->overlaps_region(rec), 143 "Attached JNI thread exited without being detached"); 144 } 145 // otherwise, we should not have overlapping reserved regions 146 assert(FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack || 147 reserved_rgn->base() > rec->addr(), "Just check: locate()"); 148 assert(FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack || 149 !reserved_rgn->overlaps_region(rec), "overlapping reserved regions"); 150 151 return insert_record(rec); 152 } 153 154 // we do consolidate committed regions 155 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) { 156 assert(rec->is_commit_record(), "Sanity check"); 157 VMMemRegion* reserved_rgn = (VMMemRegion*)current(); 158 assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec), 159 "Sanity check"); 160 161 // thread's native stack is always marked as "committed", ignore 162 // the "commit" operation for creating stack guard pages 163 if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack && 164 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { 165 return true; 166 } 167 168 // if the reserved region has any committed regions 169 VMMemRegion* committed_rgn = (VMMemRegion*)next(); 170 while (committed_rgn != NULL && committed_rgn->is_committed_region()) { 171 // duplicated commit records 172 if(committed_rgn->contains_region(rec)) { 173 return true; 174 } else if (committed_rgn->overlaps_region(rec)) { 175 // overlaps front part 176 if (rec->addr() < committed_rgn->addr()) { 177 committed_rgn->expand_region(rec->addr(), 178 committed_rgn->addr() - rec->addr()); 179 } else { 180 // overlaps tail part 181 address committed_rgn_end = committed_rgn->addr() + 182 committed_rgn->size(); 183 assert(committed_rgn_end < rec->addr() + rec->size(), 184 "overlap tail part"); 185 committed_rgn->expand_region(committed_rgn_end, 186 (rec->addr() + rec->size()) - committed_rgn_end); 187 } 188 } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) { 189 // adjunct each other 190 committed_rgn->expand_region(rec->addr(), rec->size()); 191 VMMemRegion* next_reg = (VMMemRegion*)next(); 192 // see if we can consolidate next committed region 193 if (next_reg != NULL && next_reg->is_committed_region() && 194 next_reg->base() == committed_rgn->base() + committed_rgn->size()) { 195 committed_rgn->expand_region(next_reg->base(), next_reg->size()); 196 // delete merged region 197 remove(); 198 } 199 return true; 200 } else if (committed_rgn->base() > rec->addr()) { 201 // found the location, insert this committed region 202 return insert_record(rec); 203 } 204 committed_rgn = (VMMemRegion*)next(); 205 } 206 return insert_record(rec); 207 } 208 209 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) { 210 assert(rec->is_uncommit_record(), "sanity check"); 211 VMMemRegion* cur; 212 cur = (VMMemRegion*)current(); 213 assert(cur->is_reserved_region() && cur->contains_region(rec), 214 "Sanity check"); 215 // thread's native stack is always marked as "committed", ignore 216 // the "commit" operation for creating stack guard pages 217 if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack && 218 FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) { 219 return true; 220 } 221 222 cur = (VMMemRegion*)next(); 223 while (cur != NULL && cur->is_committed_region()) { 224 // region already uncommitted, must be due to duplicated record 225 if (cur->addr() >= rec->addr() + rec->size()) { 226 break; 227 } else if (cur->contains_region(rec)) { 228 // uncommit whole region 229 if (cur->is_same_region(rec)) { 230 remove(); 231 break; 232 } else if (rec->addr() == cur->addr() || 233 rec->addr() + rec->size() == cur->addr() + cur->size()) { 234 // uncommitted from either end of current memory region. 235 cur->exclude_region(rec->addr(), rec->size()); 236 break; 237 } else { // split the committed region and release the middle 238 address high_addr = cur->addr() + cur->size(); 239 size_t sz = high_addr - rec->addr(); 240 cur->exclude_region(rec->addr(), sz); 241 sz = high_addr - (rec->addr() + rec->size()); 242 if (MemTracker::track_callsite()) { 243 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz, 244 ((VMMemRegionEx*)cur)->pc()); 245 return insert_record_after(&tmp); 246 } else { 247 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz); 248 return insert_record_after(&tmp); 249 } 250 } 251 } 252 cur = (VMMemRegion*)next(); 253 } 254 255 // we may not find committed record due to duplicated records 256 return true; 257 } 258 259 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) { 260 assert(rec->is_deallocation_record(), "Sanity check"); 261 VMMemRegion* cur = (VMMemRegion*)current(); 262 assert(cur->is_reserved_region() && cur->contains_region(rec), 263 "Sanity check"); 264 #ifdef ASSERT 265 VMMemRegion* next_reg = (VMMemRegion*)peek_next(); 266 // should not have any committed memory in this reserved region 267 assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check"); 268 #endif 269 if (rec->is_same_region(cur)) { 270 remove(); 271 } else if (rec->addr() == cur->addr() || 272 rec->addr() + rec->size() == cur->addr() + cur->size()) { 273 // released region is at either end of this region 274 cur->exclude_region(rec->addr(), rec->size()); 275 } else { // split the reserved region and release the middle 276 address high_addr = cur->addr() + cur->size(); 277 size_t sz = high_addr - rec->addr(); 278 cur->exclude_region(rec->addr(), sz); 279 sz = high_addr - rec->addr() - rec->size(); 280 if (MemTracker::track_callsite()) { 281 MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz, 282 ((VMMemRegionEx*)cur)->pc()); 283 return insert_reserved_region(&tmp); 284 } else { 285 MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz); 286 return insert_reserved_region(&tmp); 287 } 288 } 289 return true; 290 } 291 292 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) { 293 // skip all 'commit' records associated with previous reserved region 294 VMMemRegion* p = (VMMemRegion*)next(); 295 while (p != NULL && p->is_committed_region() && 296 p->base() + p->size() < rec->addr()) { 297 p = (VMMemRegion*)next(); 298 } 299 return insert_record(rec); 300 } 301 302 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) { 303 assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained"); 304 address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL); 305 if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region 306 size_t sz = rgn->size() - new_rgn_size; 307 // the original region becomes 'new' region 308 rgn->exclude_region(new_rgn_addr + new_rgn_size, sz); 309 // remaining becomes next region 310 MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc); 311 return insert_reserved_region(&next_rgn); 312 } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) { 313 rgn->exclude_region(new_rgn_addr, new_rgn_size); 314 MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc); 315 return insert_reserved_region(&next_rgn); 316 } else { 317 // the orginal region will be split into three 318 address rgn_high_addr = rgn->base() + rgn->size(); 319 // first region 320 rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr)); 321 // the second region is the new region 322 MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc); 323 if (!insert_reserved_region(&new_rgn)) return false; 324 // the remaining region 325 MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), 326 rgn_high_addr - (new_rgn_addr + new_rgn_size), pc); 327 return insert_reserved_region(&rem_rgn); 328 } 329 } 330 331 static int sort_in_seq_order(const void* p1, const void* p2) { 332 assert(p1 != NULL && p2 != NULL, "Sanity check"); 333 const MemPointerRecord* mp1 = (MemPointerRecord*)p1; 334 const MemPointerRecord* mp2 = (MemPointerRecord*)p2; 335 return (mp1->seq() - mp2->seq()); 336 } 337 338 bool StagingArea::init() { 339 if (MemTracker::track_callsite()) { 340 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>(); 341 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>(); 342 } else { 343 _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>(); 344 _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>(); 345 } 346 347 if (_malloc_data != NULL && _vm_data != NULL && 348 !_malloc_data->out_of_memory() && 349 !_vm_data->out_of_memory()) { 350 return true; 351 } else { 352 if (_malloc_data != NULL) delete _malloc_data; 353 if (_vm_data != NULL) delete _vm_data; 354 _malloc_data = NULL; 355 _vm_data = NULL; 356 return false; 357 } 358 } 359 360 361 VMRecordIterator StagingArea::virtual_memory_record_walker() { 362 MemPointerArray* arr = vm_data(); 363 // sort into seq number order 364 arr->sort((FN_SORT)sort_in_seq_order); 365 return VMRecordIterator(arr); 366 } 367 368 369 MemSnapshot::MemSnapshot() { 370 if (MemTracker::track_callsite()) { 371 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>(); 372 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true); 373 } else { 374 _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>(); 375 _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true); 376 } 377 378 _staging_area.init(); 379 _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock"); 380 NOT_PRODUCT(_untracked_count = 0;) 381 } 382 383 MemSnapshot::~MemSnapshot() { 384 assert(MemTracker::shutdown_in_progress(), "native memory tracking still on"); 385 { 386 MutexLockerEx locker(_lock); 387 if (_alloc_ptrs != NULL) { 388 delete _alloc_ptrs; 389 _alloc_ptrs = NULL; 390 } 391 392 if (_vm_ptrs != NULL) { 393 delete _vm_ptrs; 394 _vm_ptrs = NULL; 395 } 396 } 397 398 if (_lock != NULL) { 399 delete _lock; 400 _lock = NULL; 401 } 402 } 403 404 void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) { 405 assert(dest != NULL && src != NULL, "Just check"); 406 assert(dest->addr() == src->addr(), "Just check"); 407 408 MEMFLAGS flags = dest->flags(); 409 410 if (MemTracker::track_callsite()) { 411 *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src; 412 } else { 413 *dest = *src; 414 } 415 } 416 417 418 // merge a per-thread memory recorder to the staging area 419 bool MemSnapshot::merge(MemRecorder* rec) { 420 assert(rec != NULL && !rec->out_of_memory(), "Just check"); 421 422 SequencedRecordIterator itr(rec->pointer_itr()); 423 424 MutexLockerEx lock(_lock, true); 425 MemPointerIterator malloc_staging_itr(_staging_area.malloc_data()); 426 MemPointerRecord *p1, *p2; 427 p1 = (MemPointerRecord*) itr.current(); 428 while (p1 != NULL) { 429 if (p1->is_vm_pointer()) { 430 // we don't do anything with virtual memory records during merge 431 if (!_staging_area.vm_data()->append(p1)) { 432 return false; 433 } 434 } else { 435 // locate matched record and/or also position the iterator to proper 436 // location for this incoming record. 437 p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr()); 438 // we have not seen this memory block, so just add to staging area 439 if (p2 == NULL) { 440 if (!malloc_staging_itr.insert(p1)) { 441 return false; 442 } 443 } else if (p1->addr() == p2->addr()) { 444 MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next(); 445 // a memory block can have many tagging records, find right one to replace or 446 // right position to insert 447 while (staging_next != NULL && staging_next->addr() == p1->addr()) { 448 if ((staging_next->flags() & MemPointerRecord::tag_masks) <= 449 (p1->flags() & MemPointerRecord::tag_masks)) { 450 p2 = (MemPointerRecord*)malloc_staging_itr.next(); 451 staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next(); 452 } else { 453 break; 454 } 455 } 456 int df = (p1->flags() & MemPointerRecord::tag_masks) - 457 (p2->flags() & MemPointerRecord::tag_masks); 458 if (df == 0) { 459 assert(p1->seq() > 0, "not sequenced"); 460 assert(p2->seq() > 0, "not sequenced"); 461 if (p1->seq() > p2->seq()) { 462 copy_pointer(p2, p1); 463 } 464 } else if (df < 0) { 465 if (!malloc_staging_itr.insert(p1)) { 466 return false; 467 } 468 } else { 469 if (!malloc_staging_itr.insert_after(p1)) { 470 return false; 471 } 472 } 473 } else if (p1->addr() < p2->addr()) { 474 if (!malloc_staging_itr.insert(p1)) { 475 return false; 476 } 477 } else { 478 if (!malloc_staging_itr.insert_after(p1)) { 479 return false; 480 } 481 } 482 } 483 p1 = (MemPointerRecord*)itr.next(); 484 } 485 NOT_PRODUCT(void check_staging_data();) 486 return true; 487 } 488 489 490 491 // promote data to next generation 492 bool MemSnapshot::promote() { 493 assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check"); 494 assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL, 495 "Just check"); 496 MutexLockerEx lock(_lock, true); 497 498 MallocRecordIterator malloc_itr = _staging_area.malloc_record_walker(); 499 bool promoted = false; 500 if (promote_malloc_records(&malloc_itr)) { 501 VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker(); 502 if (promote_virtual_memory_records(&vm_itr)) { 503 promoted = true; 504 } 505 } 506 507 NOT_PRODUCT(check_malloc_pointers();) 508 _staging_area.clear(); 509 return promoted; 510 } 511 512 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) { 513 MemPointerIterator malloc_snapshot_itr(_alloc_ptrs); 514 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); 515 MemPointerRecord* matched_rec; 516 while (new_rec != NULL) { 517 matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr()); 518 // found matched memory block 519 if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) { 520 // snapshot already contains 'live' records 521 assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(), 522 "Sanity check"); 523 // update block states 524 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) { 525 copy_pointer(matched_rec, new_rec); 526 } else { 527 // a deallocation record 528 assert(new_rec->is_deallocation_record(), "Sanity check"); 529 // an arena record can be followed by a size record, we need to remove both 530 if (matched_rec->is_arena_record()) { 531 MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next(); 532 if (next->is_arena_size_record()) { 533 // it has to match the arena record 534 assert(next->is_size_record_of_arena(matched_rec), "Sanity check"); 535 malloc_snapshot_itr.remove(); 536 } 537 } 538 // the memory is deallocated, remove related record(s) 539 malloc_snapshot_itr.remove(); 540 } 541 } else { 542 // it is a new record, insert into snapshot 543 if (new_rec->is_arena_size_record()) { 544 MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev(); 545 if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) { 546 // no matched arena record, ignore the size record 547 new_rec = NULL; 548 } 549 } 550 // only 'live' record can go into snapshot 551 if (new_rec != NULL) { 552 if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) { 553 if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) { 554 if (!malloc_snapshot_itr.insert_after(new_rec)) { 555 return false; 556 } 557 } else { 558 if (!malloc_snapshot_itr.insert(new_rec)) { 559 return false; 560 } 561 } 562 } 563 #ifndef PRODUCT 564 else if (!has_allocation_record(new_rec->addr())) { 565 // NMT can not track some startup memory, which is allocated before NMT is on 566 _untracked_count ++; 567 } 568 #endif 569 } 570 } 571 new_rec = (MemPointerRecord*)itr->next(); 572 } 573 return true; 574 } 575 576 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) { 577 VMMemPointerIterator vm_snapshot_itr(_vm_ptrs); 578 MemPointerRecord* new_rec = (MemPointerRecord*)itr->current(); 579 VMMemRegion* reserved_rec; 580 while (new_rec != NULL) { 581 assert(new_rec->is_vm_pointer(), "Sanity check"); 582 583 // locate a reserved region that contains the specified address, or 584 // the nearest reserved region has base address just above the specified 585 // address 586 reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr()); 587 if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) { 588 // snapshot can only have 'live' records 589 assert(reserved_rec->is_reserved_region(), "Sanity check"); 590 if (new_rec->is_allocation_record()) { 591 if (!reserved_rec->is_same_region(new_rec)) { 592 // only deal with split a bigger reserved region into smaller regions. 593 // So far, CDS is the only use case. 594 if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) { 595 return false; 596 } 597 } 598 } else if (new_rec->is_uncommit_record()) { 599 if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) { 600 return false; 601 } 602 } else if (new_rec->is_commit_record()) { 603 // insert or expand existing committed region to cover this 604 // newly committed region 605 if (!vm_snapshot_itr.add_committed_region(new_rec)) { 606 return false; 607 } 608 } else if (new_rec->is_deallocation_record()) { 609 // release part or all memory region 610 if (!vm_snapshot_itr.remove_released_region(new_rec)) { 611 return false; 612 } 613 } else if (new_rec->is_type_tagging_record()) { 614 // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range 615 // to different type. 616 assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone || 617 FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()), 618 "Sanity check"); 619 reserved_rec->tag(new_rec->flags()); 620 } else { 621 ShouldNotReachHere(); 622 } 623 } else { 624 /* 625 * The assertion failure indicates mis-matched virtual memory records. The likely 626 * scenario is, that some virtual memory operations are not going through os::xxxx_memory() 627 * api, which have to be tracked manually. (perfMemory is an example). 628 */ 629 assert(new_rec->is_allocation_record(), "Sanity check"); 630 if (!vm_snapshot_itr.add_reserved_region(new_rec)) { 631 return false; 632 } 633 } 634 new_rec = (MemPointerRecord*)itr->next(); 635 } 636 return true; 637 } 638 639 #ifndef PRODUCT 640 void MemSnapshot::print_snapshot_stats(outputStream* st) { 641 st->print_cr("Snapshot:"); 642 st->print_cr("\tMalloced: %d/%d [%5.2f%%] %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(), 643 (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K); 644 645 st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(), 646 (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K); 647 648 st->print_cr("\tMalloc staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(), 649 _staging_area.malloc_data()->capacity(), 650 (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(), 651 _staging_area.malloc_data()->instance_size()/K); 652 653 st->print_cr("\tVirtual memory staging Area: %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(), 654 _staging_area.vm_data()->capacity(), 655 (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(), 656 _staging_area.vm_data()->instance_size()/K); 657 658 st->print_cr("\tUntracked allocation: %d", _untracked_count); 659 } 660 661 void MemSnapshot::check_malloc_pointers() { 662 MemPointerArrayIteratorImpl mItr(_alloc_ptrs); 663 MemPointerRecord* p = (MemPointerRecord*)mItr.current(); 664 MemPointerRecord* prev = NULL; 665 while (p != NULL) { 666 if (prev != NULL) { 667 assert(p->addr() >= prev->addr(), "sorting order"); 668 } 669 prev = p; 670 p = (MemPointerRecord*)mItr.next(); 671 } 672 } 673 674 bool MemSnapshot::has_allocation_record(address addr) { 675 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); 676 MemPointerRecord* cur = (MemPointerRecord*)itr.current(); 677 while (cur != NULL) { 678 if (cur->addr() == addr && cur->is_allocation_record()) { 679 return true; 680 } 681 cur = (MemPointerRecord*)itr.next(); 682 } 683 return false; 684 } 685 #endif // PRODUCT 686 687 #ifdef ASSERT 688 void MemSnapshot::check_staging_data() { 689 MemPointerArrayIteratorImpl itr(_staging_area.malloc_data()); 690 MemPointerRecord* cur = (MemPointerRecord*)itr.current(); 691 MemPointerRecord* next = (MemPointerRecord*)itr.next(); 692 while (next != NULL) { 693 assert((next->addr() > cur->addr()) || 694 ((next->flags() & MemPointerRecord::tag_masks) > 695 (cur->flags() & MemPointerRecord::tag_masks)), 696 "sorting order"); 697 cur = next; 698 next = (MemPointerRecord*)itr.next(); 699 } 700 701 MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data()); 702 cur = (MemPointerRecord*)vm_itr.current(); 703 while (cur != NULL) { 704 assert(cur->is_vm_pointer(), "virtual memory pointer only"); 705 cur = (MemPointerRecord*)vm_itr.next(); 706 } 707 } 708 709 void MemSnapshot::dump_all_vm_pointers() { 710 MemPointerArrayIteratorImpl itr(_vm_ptrs); 711 VMMemRegion* ptr = (VMMemRegion*)itr.current(); 712 tty->print_cr("dump virtual memory pointers:"); 713 while (ptr != NULL) { 714 if (ptr->is_committed_region()) { 715 tty->print("\t"); 716 } 717 tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(), 718 (ptr->addr() + ptr->size()), ptr->flags()); 719 720 if (MemTracker::track_callsite()) { 721 VMMemRegionEx* ex = (VMMemRegionEx*)ptr; 722 if (ex->pc() != NULL) { 723 char buf[1024]; 724 if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) { 725 tty->print_cr("\t%s", buf); 726 } else { 727 tty->print_cr(""); 728 } 729 } 730 } 731 732 ptr = (VMMemRegion*)itr.next(); 733 } 734 tty->flush(); 735 } 736 #endif // ASSERT 737