1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "runtime/mutexLocker.hpp"
  27 #include "utilities/decoder.hpp"
  28 #include "services/memBaseline.hpp"
  29 #include "services/memPtr.hpp"
  30 #include "services/memPtrArray.hpp"
  31 #include "services/memSnapshot.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 #ifdef ASSERT
  35 
  36 void decode_pointer_record(MemPointerRecord* rec) {
  37   tty->print("Pointer: [" PTR_FORMAT " - " PTR_FORMAT  "] size = %d bytes", rec->addr(),
  38     rec->addr() + rec->size(), (int)rec->size());
  39   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
  40   if (rec->is_vm_pointer()) {
  41     if (rec->is_allocation_record()) {
  42       tty->print_cr(" (reserve)");
  43     } else if (rec->is_commit_record()) {
  44       tty->print_cr(" (commit)");
  45     } else if (rec->is_uncommit_record()) {
  46       tty->print_cr(" (uncommit)");
  47     } else if (rec->is_deallocation_record()) {
  48       tty->print_cr(" (release)");
  49     } else {
  50       tty->print_cr(" (tag)");
  51     }
  52   } else {
  53     if (rec->is_arena_size_record()) {
  54       tty->print_cr(" (arena size)");
  55     } else if (rec->is_allocation_record()) {
  56       tty->print_cr(" (malloc)");
  57     } else {
  58       tty->print_cr(" (free)");
  59     }
  60   }
  61   if (MemTracker::track_callsite()) {
  62     char buf[1024];
  63     address pc = ((MemPointerRecordEx*)rec)->pc();
  64     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
  65       tty->print_cr("\tfrom %s", buf);
  66     } else {
  67       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
  68     }
  69   }
  70 }
  71 
  72 void decode_vm_region_record(VMMemRegion* rec) {
  73   tty->print("VM Region [" PTR_FORMAT " - " PTR_FORMAT "]", rec->addr(),
  74     rec->addr() + rec->size());
  75   tty->print(" type = %s", MemBaseline::type2name(FLAGS_TO_MEMORY_TYPE(rec->flags())));
  76   if (rec->is_allocation_record()) {
  77     tty->print_cr(" (reserved)");
  78   } else if (rec->is_commit_record()) {
  79     tty->print_cr(" (committed)");
  80   } else {
  81     ShouldNotReachHere();
  82   }
  83   if (MemTracker::track_callsite()) {
  84     char buf[1024];
  85     address pc = ((VMMemRegionEx*)rec)->pc();
  86     if (pc != NULL && os::dll_address_to_function_name(pc, buf, sizeof(buf), NULL)) {
  87       tty->print_cr("\tfrom %s", buf);
  88     } else {
  89       tty->print_cr("\tcould not decode pc = " PTR_FORMAT "", pc);
  90     }
  91 
  92   }
  93 }
  94 
  95 #endif
  96 
  97 
  98 bool VMMemPointerIterator::insert_record(MemPointerRecord* rec) {
  99   VMMemRegionEx new_rec;
 100   assert(rec->is_allocation_record() || rec->is_commit_record(),
 101     "Sanity check");
 102   if (MemTracker::track_callsite()) {
 103     new_rec.init((MemPointerRecordEx*)rec);
 104   } else {
 105     new_rec.init(rec);
 106   }
 107   return insert(&new_rec);
 108 }
 109 
 110 bool VMMemPointerIterator::insert_record_after(MemPointerRecord* rec) {
 111   VMMemRegionEx new_rec;
 112   assert(rec->is_allocation_record() || rec->is_commit_record(),
 113     "Sanity check");
 114   if (MemTracker::track_callsite()) {
 115     new_rec.init((MemPointerRecordEx*)rec);
 116   } else {
 117     new_rec.init(rec);
 118   }
 119   return insert_after(&new_rec);
 120 }
 121 
 122 // we don't consolidate reserved regions, since they may be categorized
 123 // in different types.
 124 bool VMMemPointerIterator::add_reserved_region(MemPointerRecord* rec) {
 125   assert(rec->is_allocation_record(), "Sanity check");
 126   VMMemRegion* cur = (VMMemRegion*)current();
 127 
 128   // we don't have anything yet
 129   if (cur == NULL) {
 130     return insert_record(rec);
 131   }
 132 
 133   assert(cur->is_reserved_region(), "Sanity check");
 134   // duplicated records
 135   if (cur->is_same_region(rec)) {
 136     return true;
 137   }
 138   assert(cur->base() > rec->addr(), "Just check: locate()");
 139   assert(!cur->overlaps_region(rec), "overlapping reserved regions");
 140   return insert_record(rec);
 141 }
 142 
 143 // we do consolidate committed regions
 144 bool VMMemPointerIterator::add_committed_region(MemPointerRecord* rec) {
 145   assert(rec->is_commit_record(), "Sanity check");
 146   VMMemRegion* reserved_rgn = (VMMemRegion*)current();
 147   assert(reserved_rgn->is_reserved_region() && reserved_rgn->contains_region(rec),
 148     "Sanity check");
 149 
 150   // thread's native stack is always marked as "committed", ignore
 151   // the "commit" operation for creating stack guard pages
 152   if (FLAGS_TO_MEMORY_TYPE(reserved_rgn->flags()) == mtThreadStack &&
 153       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
 154     return true;
 155   }
 156 
 157   // if the reserved region has any committed regions
 158   VMMemRegion* committed_rgn  = (VMMemRegion*)next();
 159   while (committed_rgn != NULL && committed_rgn->is_committed_region()) {
 160     // duplicated commit records
 161     if(committed_rgn->contains_region(rec)) {
 162       return true;
 163     } else if (committed_rgn->overlaps_region(rec)) {
 164       // overlaps front part
 165       if (rec->addr() < committed_rgn->addr()) {
 166         committed_rgn->expand_region(rec->addr(),
 167           committed_rgn->addr() - rec->addr());
 168       } else {
 169         // overlaps tail part
 170         address committed_rgn_end = committed_rgn->addr() +
 171               committed_rgn->size();
 172         assert(committed_rgn_end < rec->addr() + rec->size(),
 173              "overlap tail part");
 174         committed_rgn->expand_region(committed_rgn_end,
 175           (rec->addr() + rec->size()) - committed_rgn_end);
 176       }
 177     } else if (committed_rgn->base() + committed_rgn->size() == rec->addr()) {
 178       // adjunct each other
 179       committed_rgn->expand_region(rec->addr(), rec->size());
 180       VMMemRegion* next_reg = (VMMemRegion*)next();
 181       // see if we can consolidate next committed region
 182       if (next_reg != NULL && next_reg->is_committed_region() &&
 183         next_reg->base() == committed_rgn->base() + committed_rgn->size()) {
 184           committed_rgn->expand_region(next_reg->base(), next_reg->size());
 185           // delete merged region
 186           remove();
 187       }
 188       return true;
 189     } else if (committed_rgn->base() > rec->addr()) {
 190       // found the location, insert this committed region
 191       return insert_record(rec);
 192     }
 193     committed_rgn = (VMMemRegion*)next();
 194   }
 195   return insert_record(rec);
 196 }
 197 
 198 bool VMMemPointerIterator::remove_uncommitted_region(MemPointerRecord* rec) {
 199   assert(rec->is_uncommit_record(), "sanity check");
 200   VMMemRegion* cur;
 201   cur = (VMMemRegion*)current();
 202   assert(cur->is_reserved_region() && cur->contains_region(rec),
 203     "Sanity check");
 204   // thread's native stack is always marked as "committed", ignore
 205   // the "commit" operation for creating stack guard pages
 206   if (FLAGS_TO_MEMORY_TYPE(cur->flags()) == mtThreadStack &&
 207       FLAGS_TO_MEMORY_TYPE(rec->flags()) != mtThreadStack) {
 208     return true;
 209   }
 210 
 211   cur = (VMMemRegion*)next();
 212   while (cur != NULL && cur->is_committed_region()) {
 213     // region already uncommitted, must be due to duplicated record
 214     if (cur->addr() >= rec->addr() + rec->size()) {
 215       break;
 216     } else if (cur->contains_region(rec)) {
 217       // uncommit whole region
 218       if (cur->is_same_region(rec)) {
 219         remove();
 220         break;
 221       } else if (rec->addr() == cur->addr() ||
 222         rec->addr() + rec->size() == cur->addr() + cur->size()) {
 223         // uncommitted from either end of current memory region.
 224         cur->exclude_region(rec->addr(), rec->size());
 225         break;
 226       } else { // split the committed region and release the middle
 227         address high_addr = cur->addr() + cur->size();
 228         size_t sz = high_addr - rec->addr();
 229         cur->exclude_region(rec->addr(), sz);
 230         sz = high_addr - (rec->addr() + rec->size());
 231         if (MemTracker::track_callsite()) {
 232           MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
 233              ((VMMemRegionEx*)cur)->pc());
 234           return insert_record_after(&tmp);
 235         } else {
 236           MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
 237           return insert_record_after(&tmp);
 238         }
 239       }
 240     }
 241     cur = (VMMemRegion*)next();
 242   }
 243 
 244   // we may not find committed record due to duplicated records
 245   return true;
 246 }
 247 
 248 bool VMMemPointerIterator::remove_released_region(MemPointerRecord* rec) {
 249   assert(rec->is_deallocation_record(), "Sanity check");
 250   VMMemRegion* cur = (VMMemRegion*)current();
 251   assert(cur->is_reserved_region() && cur->contains_region(rec),
 252     "Sanity check");
 253 #ifdef ASSERT
 254   VMMemRegion* next_reg = (VMMemRegion*)peek_next();
 255   // should not have any committed memory in this reserved region
 256   assert(next_reg == NULL || !next_reg->is_committed_region(), "Sanity check");
 257 #endif
 258   if (rec->is_same_region(cur)) {
 259     remove();
 260   } else if (rec->addr() == cur->addr() ||
 261     rec->addr() + rec->size() == cur->addr() + cur->size()) {
 262     // released region is at either end of this region
 263     cur->exclude_region(rec->addr(), rec->size());
 264   } else { // split the reserved region and release the middle
 265     address high_addr = cur->addr() + cur->size();
 266     size_t sz = high_addr - rec->addr();
 267     cur->exclude_region(rec->addr(), sz);
 268     sz = high_addr - rec->addr() - rec->size();
 269     if (MemTracker::track_callsite()) {
 270       MemPointerRecordEx tmp(rec->addr() + rec->size(), cur->flags(), sz,
 271         ((VMMemRegionEx*)cur)->pc());
 272       return insert_reserved_region(&tmp);
 273     } else {
 274       MemPointerRecord tmp(rec->addr() + rec->size(), cur->flags(), sz);
 275       return insert_reserved_region(&tmp);
 276     }
 277   }
 278   return true;
 279 }
 280 
 281 bool VMMemPointerIterator::insert_reserved_region(MemPointerRecord* rec) {
 282   // skip all 'commit' records associated with previous reserved region
 283   VMMemRegion* p = (VMMemRegion*)next();
 284   while (p != NULL && p->is_committed_region() &&
 285          p->base() + p->size() < rec->addr()) {
 286     p = (VMMemRegion*)next();
 287   }
 288   return insert_record(rec);
 289 }
 290 
 291 bool VMMemPointerIterator::split_reserved_region(VMMemRegion* rgn, address new_rgn_addr, size_t new_rgn_size) {
 292   assert(rgn->contains_region(new_rgn_addr, new_rgn_size), "Not fully contained");
 293   address pc = (MemTracker::track_callsite() ? ((VMMemRegionEx*)rgn)->pc() : NULL);
 294   if (rgn->base() == new_rgn_addr) { // new region is at the beginning of the region
 295     size_t sz = rgn->size() - new_rgn_size;
 296     // the original region becomes 'new' region
 297     rgn->exclude_region(new_rgn_addr + new_rgn_size, sz);
 298      // remaining becomes next region
 299     MemPointerRecordEx next_rgn(new_rgn_addr + new_rgn_size, rgn->flags(), sz, pc);
 300     return insert_reserved_region(&next_rgn);
 301   } else if (rgn->base() + rgn->size() == new_rgn_addr + new_rgn_size) {
 302     rgn->exclude_region(new_rgn_addr, new_rgn_size);
 303     MemPointerRecordEx next_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
 304     return insert_reserved_region(&next_rgn);
 305   } else {
 306     // the orginal region will be split into three
 307     address rgn_high_addr = rgn->base() + rgn->size();
 308     // first region
 309     rgn->exclude_region(new_rgn_addr, (rgn_high_addr - new_rgn_addr));
 310     // the second region is the new region
 311     MemPointerRecordEx new_rgn(new_rgn_addr, rgn->flags(), new_rgn_size, pc);
 312     if (!insert_reserved_region(&new_rgn)) return false;
 313     // the remaining region
 314     MemPointerRecordEx rem_rgn(new_rgn_addr + new_rgn_size, rgn->flags(),
 315       rgn_high_addr - (new_rgn_addr + new_rgn_size), pc);
 316     return insert_reserved_region(&rem_rgn);
 317   }
 318 }
 319 
 320 static int sort_in_seq_order(const void* p1, const void* p2) {
 321   assert(p1 != NULL && p2 != NULL, "Sanity check");
 322   const MemPointerRecord* mp1 = (MemPointerRecord*)p1;
 323   const MemPointerRecord* mp2 = (MemPointerRecord*)p2;
 324   return (mp1->seq() - mp2->seq());
 325 }
 326 
 327 bool StagingArea::init() {
 328   if (MemTracker::track_callsite()) {
 329     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
 330     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecordEx>();
 331   } else {
 332     _malloc_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
 333     _vm_data = new (std::nothrow)MemPointerArrayImpl<SeqMemPointerRecord>();
 334   }
 335 
 336   if (_malloc_data != NULL && _vm_data != NULL &&
 337       !_malloc_data->out_of_memory() &&
 338       !_vm_data->out_of_memory()) {
 339     return true;
 340   } else {
 341     if (_malloc_data != NULL) delete _malloc_data;
 342     if (_vm_data != NULL) delete _vm_data;
 343     _malloc_data = NULL;
 344     _vm_data = NULL;
 345     return false;
 346   }
 347 }
 348 
 349 
 350 VMRecordIterator StagingArea::virtual_memory_record_walker() {
 351   MemPointerArray* arr = vm_data();
 352   // sort into seq number order
 353   arr->sort((FN_SORT)sort_in_seq_order);
 354   return VMRecordIterator(arr);
 355 }
 356 
 357 
 358 MemSnapshot::MemSnapshot() {
 359   if (MemTracker::track_callsite()) {
 360     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecordEx>();
 361     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegionEx>(64, true);
 362   } else {
 363     _alloc_ptrs = new (std::nothrow) MemPointerArrayImpl<MemPointerRecord>();
 364     _vm_ptrs = new (std::nothrow)MemPointerArrayImpl<VMMemRegion>(64, true);
 365   }
 366 
 367   _staging_area.init();
 368   _lock = new (std::nothrow) Mutex(Monitor::max_nonleaf - 1, "memSnapshotLock");
 369   NOT_PRODUCT(_untracked_count = 0;)
 370 }
 371 
 372 MemSnapshot::~MemSnapshot() {
 373   assert(MemTracker::shutdown_in_progress(), "native memory tracking still on");
 374   {
 375     MutexLockerEx locker(_lock);
 376     if (_alloc_ptrs != NULL) {
 377       delete _alloc_ptrs;
 378       _alloc_ptrs = NULL;
 379     }
 380 
 381     if (_vm_ptrs != NULL) {
 382       delete _vm_ptrs;
 383       _vm_ptrs = NULL;
 384     }
 385   }
 386 
 387   if (_lock != NULL) {
 388     delete _lock;
 389     _lock = NULL;
 390   }
 391 }
 392 
 393 
 394 void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
 395   assert(dest != NULL && src != NULL, "Just check");
 396   assert(dest->addr() == src->addr(), "Just check");
 397   assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
 398   
 399   if (MemTracker::track_callsite()) {
 400     *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
 401   } else {
 402     *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
 403   }
 404 }
 405 
 406 void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
 407   assert(src != NULL && dest != NULL, "Just check");
 408   assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
 409 
 410   if (MemTracker::track_callsite()) {
 411     *(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
 412   } else {
 413     *(MemPointerRecord*)dest = *(MemPointerRecord*)src; 
 414   }
 415 }
 416 
 417 // merge a recorder to the staging area
 418 bool MemSnapshot::merge(MemRecorder* rec) {
 419   assert(rec != NULL && !rec->out_of_memory(), "Just check");
 420 
 421   SequencedRecordIterator itr(rec->pointer_itr());
 422 
 423   MutexLockerEx lock(_lock, true);
 424   MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
 425   MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
 426   MemPointerRecord* matched_rec;
 427 
 428   while (incoming_rec != NULL) {
 429     if (incoming_rec->is_vm_pointer()) {
 430       // we don't do anything with virtual memory records during merge
 431       if (!_staging_area.vm_data()->append(incoming_rec)) {
 432         return false;
 433       }
 434     } else {
 435       // locate matched record and/or also position the iterator to proper
 436       // location for this incoming record.
 437       matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
 438       // we have not seen this memory block in this generation,
 439       // so just add to staging area
 440       if (matched_rec == NULL) {
 441         if (!malloc_staging_itr.insert(incoming_rec)) {
 442           return false;
 443         }
 444       } else if (incoming_rec->addr() == matched_rec->addr()) {
 445         // whoever has higher sequence number wins
 446         if (incoming_rec->seq() > matched_rec->seq()) {
 447           copy_seq_pointer(matched_rec, incoming_rec);
 448         }
 449       } else if (incoming_rec->addr() < matched_rec->addr()) {
 450         if (!malloc_staging_itr.insert(incoming_rec)) {
 451           return false;
 452         }
 453       } else {
 454         ShouldNotReachHere();  
 455       }
 456     }
 457     incoming_rec = (MemPointerRecord*)itr.next();
 458   }
 459   NOT_PRODUCT(void check_staging_data();)
 460   return true;
 461 }
 462 
 463 
 464 // promote data to next generation
 465 bool MemSnapshot::promote() {
 466   assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
 467   assert(_staging_area.malloc_data() != NULL && _staging_area.vm_data() != NULL,
 468          "Just check");
 469   MutexLockerEx lock(_lock, true);
 470 
 471   MallocRecordIterator  malloc_itr = _staging_area.malloc_record_walker();
 472   bool promoted = false;
 473   if (promote_malloc_records(&malloc_itr)) {
 474     VMRecordIterator vm_itr = _staging_area.virtual_memory_record_walker();
 475     if (promote_virtual_memory_records(&vm_itr)) {
 476       promoted = true;
 477     }
 478   }
 479 
 480   NOT_PRODUCT(check_malloc_pointers();)
 481   _staging_area.clear();
 482   return promoted;
 483 }
 484 
 485 bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
 486   MemPointerIterator malloc_snapshot_itr(_alloc_ptrs);
 487   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
 488   MemPointerRecord* matched_rec;
 489   while (new_rec != NULL) {
 490     matched_rec = (MemPointerRecord*)malloc_snapshot_itr.locate(new_rec->addr());
 491     // found matched memory block
 492     if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
 493       // snapshot already contains 'live' records
 494       assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
 495              "Sanity check");
 496       // update block states
 497       if (new_rec->is_allocation_record()) {
 498         assign_pointer(matched_rec, new_rec);
 499       } else if (new_rec->is_arena_size_record()) {
 500         if (new_rec->size() == 0) {
 501           // remove size record once size drops to 0
 502           malloc_snapshot_itr.remove();
 503         } else {
 504           assign_pointer(matched_rec, new_rec);
 505         }
 506       } else {
 507         // a deallocation record
 508         assert(new_rec->is_deallocation_record(), "Sanity check");
 509         // an arena record can be followed by a size record, we need to remove both
 510         if (matched_rec->is_arena_record()) {
 511           MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
 512           if (next->is_arena_size_record() && next->is_size_record_of_arena(matched_rec)) {
 513             malloc_snapshot_itr.remove();
 514           }
 515         }
 516         // the memory is deallocated, remove related record(s)
 517         malloc_snapshot_itr.remove();
 518       }
 519     } else {
 520       // don't insert size 0 record
 521       if (new_rec->is_arena_size_record() && new_rec->size() == 0) {
 522         new_rec = NULL;
 523       }
 524 
 525       if (new_rec != NULL) {
 526         if  (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
 527           if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
 528             if (!malloc_snapshot_itr.insert_after(new_rec)) {
 529               return false;
 530             }
 531           } else {
 532             if (!malloc_snapshot_itr.insert(new_rec)) {
 533               return false;
 534             }
 535           }
 536         }
 537 #ifndef PRODUCT
 538         else if (!has_allocation_record(new_rec->addr())) {
 539           // NMT can not track some startup memory, which is allocated before NMT is on
 540           _untracked_count ++;
 541         }
 542 #endif
 543       }
 544     }
 545     new_rec = (MemPointerRecord*)itr->next();
 546   }
 547   return true;
 548 }
 549 
 550 bool MemSnapshot::promote_virtual_memory_records(MemPointerArrayIterator* itr) {
 551   VMMemPointerIterator vm_snapshot_itr(_vm_ptrs);
 552   MemPointerRecord* new_rec = (MemPointerRecord*)itr->current();
 553   VMMemRegion*  reserved_rec;
 554   while (new_rec != NULL) {
 555     assert(new_rec->is_vm_pointer(), "Sanity check");
 556 
 557     // locate a reserved region that contains the specified address, or
 558     // the nearest reserved region has base address just above the specified
 559     // address
 560     reserved_rec = (VMMemRegion*)vm_snapshot_itr.locate(new_rec->addr());
 561     if (reserved_rec != NULL && reserved_rec->contains_region(new_rec)) {
 562       // snapshot can only have 'live' records
 563       assert(reserved_rec->is_reserved_region(), "Sanity check");
 564       if (new_rec->is_allocation_record()) {
 565         if (!reserved_rec->is_same_region(new_rec)) {
 566           // only deal with split a bigger reserved region into smaller regions.
 567           // So far, CDS is the only use case.
 568           if (!vm_snapshot_itr.split_reserved_region(reserved_rec, new_rec->addr(), new_rec->size())) {
 569             return false;
 570           }
 571         }
 572       } else if (new_rec->is_uncommit_record()) {
 573         if (!vm_snapshot_itr.remove_uncommitted_region(new_rec)) {
 574           return false;
 575         }
 576       } else if (new_rec->is_commit_record()) {
 577         // insert or expand existing committed region to cover this
 578         // newly committed region
 579         if (!vm_snapshot_itr.add_committed_region(new_rec)) {
 580           return false;
 581         }
 582       } else if (new_rec->is_deallocation_record()) {
 583         // release part or all memory region
 584         if (!vm_snapshot_itr.remove_released_region(new_rec)) {
 585           return false;
 586         }
 587       } else if (new_rec->is_type_tagging_record()) {
 588         // tag this reserved virtual memory range to a memory type. Can not re-tag a memory range
 589         // to different type.
 590         assert(FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == mtNone ||
 591                FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) == FLAGS_TO_MEMORY_TYPE(new_rec->flags()),
 592                "Sanity check");
 593         reserved_rec->tag(new_rec->flags());
 594     } else {
 595         ShouldNotReachHere();
 596           }
 597         } else {
 598       /*
 599        * The assertion failure indicates mis-matched virtual memory records. The likely
 600        * scenario is, that some virtual memory operations are not going through os::xxxx_memory()
 601        * api, which have to be tracked manually. (perfMemory is an example).
 602       */
 603       assert(new_rec->is_allocation_record(), "Sanity check");
 604       if (!vm_snapshot_itr.add_reserved_region(new_rec)) {
 605             return false;
 606           }
 607   }
 608     new_rec = (MemPointerRecord*)itr->next();
 609   }
 610   return true;
 611 }
 612 
 613 #ifndef PRODUCT
 614 void MemSnapshot::print_snapshot_stats(outputStream* st) {
 615   st->print_cr("Snapshot:");
 616   st->print_cr("\tMalloced: %d/%d [%5.2f%%]  %dKB", _alloc_ptrs->length(), _alloc_ptrs->capacity(),
 617     (100.0 * (float)_alloc_ptrs->length()) / (float)_alloc_ptrs->capacity(), _alloc_ptrs->instance_size()/K);
 618 
 619   st->print_cr("\tVM: %d/%d [%5.2f%%] %dKB", _vm_ptrs->length(), _vm_ptrs->capacity(),
 620     (100.0 * (float)_vm_ptrs->length()) / (float)_vm_ptrs->capacity(), _vm_ptrs->instance_size()/K);
 621 
 622   st->print_cr("\tMalloc staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.malloc_data()->length(),
 623     _staging_area.malloc_data()->capacity(),
 624     (100.0 * (float)_staging_area.malloc_data()->length()) / (float)_staging_area.malloc_data()->capacity(),
 625     _staging_area.malloc_data()->instance_size()/K);
 626 
 627   st->print_cr("\tVirtual memory staging Area:     %d/%d [%5.2f%%] %dKB", _staging_area.vm_data()->length(),
 628     _staging_area.vm_data()->capacity(),
 629     (100.0 * (float)_staging_area.vm_data()->length()) / (float)_staging_area.vm_data()->capacity(),
 630     _staging_area.vm_data()->instance_size()/K);
 631 
 632   st->print_cr("\tUntracked allocation: %d", _untracked_count);
 633 }
 634 
 635 void MemSnapshot::check_malloc_pointers() {
 636   MemPointerArrayIteratorImpl mItr(_alloc_ptrs);
 637   MemPointerRecord* p = (MemPointerRecord*)mItr.current();
 638   MemPointerRecord* prev = NULL;
 639   while (p != NULL) {
 640     if (prev != NULL) {
 641       assert(p->addr() >= prev->addr(), "sorting order");
 642     }
 643     prev = p;
 644     p = (MemPointerRecord*)mItr.next();
 645   }
 646 }
 647 
 648 bool MemSnapshot::has_allocation_record(address addr) {
 649   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
 650   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
 651   while (cur != NULL) {
 652     if (cur->addr() == addr && cur->is_allocation_record()) {
 653       return true;
 654     }
 655     cur = (MemPointerRecord*)itr.next();
 656   }
 657   return false;
 658 }
 659 #endif // PRODUCT
 660 
 661 #ifdef ASSERT
 662 void MemSnapshot::check_staging_data() {
 663   MemPointerArrayIteratorImpl itr(_staging_area.malloc_data());
 664   MemPointerRecord* cur = (MemPointerRecord*)itr.current();
 665   MemPointerRecord* next = (MemPointerRecord*)itr.next();
 666   while (next != NULL) {
 667     assert((next->addr() > cur->addr()) ||
 668       ((next->flags() & MemPointerRecord::tag_masks) >
 669        (cur->flags() & MemPointerRecord::tag_masks)),
 670        "sorting order");
 671     cur = next;
 672     next = (MemPointerRecord*)itr.next();
 673   }
 674 
 675   MemPointerArrayIteratorImpl vm_itr(_staging_area.vm_data());
 676   cur = (MemPointerRecord*)vm_itr.current();
 677   while (cur != NULL) {
 678     assert(cur->is_vm_pointer(), "virtual memory pointer only");
 679     cur = (MemPointerRecord*)vm_itr.next();
 680   }
 681 }
 682 
 683 void MemSnapshot::dump_all_vm_pointers() {
 684   MemPointerArrayIteratorImpl itr(_vm_ptrs);
 685   VMMemRegion* ptr = (VMMemRegion*)itr.current();
 686   tty->print_cr("dump virtual memory pointers:");
 687   while (ptr != NULL) {
 688     if (ptr->is_committed_region()) {
 689       tty->print("\t");
 690     }
 691     tty->print("[" PTR_FORMAT " - " PTR_FORMAT "] [%x]", ptr->addr(),
 692       (ptr->addr() + ptr->size()), ptr->flags());
 693 
 694     if (MemTracker::track_callsite()) {
 695       VMMemRegionEx* ex = (VMMemRegionEx*)ptr;
 696       if (ex->pc() != NULL) {
 697         char buf[1024];
 698         if (os::dll_address_to_function_name(ex->pc(), buf, sizeof(buf), NULL)) {
 699           tty->print_cr("\t%s", buf);
 700         } else {
 701           tty->print_cr("");
 702         }
 703       }
 704     }
 705 
 706     ptr = (VMMemRegion*)itr.next();
 707   }
 708   tty->flush();
 709 }
 710 #endif // ASSERT
 711