1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "services/memBaseline.hpp"
  27 #include "services/memTracker.hpp"
  28 
  29 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  30   {mtJavaHeap,   "Java Heap"},
  31   {mtClass,      "Class"},
  32   {mtThreadStack,"Thread Stack"},
  33   {mtThread,     "Thread"},
  34   {mtCode,       "Code"},
  35   {mtGC,         "GC"},
  36   {mtCompiler,   "Compiler"},
  37   {mtInternal,   "Internal"},
  38   {mtOther,      "Other"},
  39   {mtSymbol,     "Symbol"},
  40   {mtNMT,        "Memory Tracking"},
  41   {mtTracing,    "Tracing"},
  42   {mtChunk,      "Pooled Free Chunks"},
  43   {mtClassShared,"Shared spaces for classes"},
  44   {mtTest,       "Test"},
  45   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
  46                              // behind
  47 };
  48 
  49 MemBaseline::MemBaseline() {
  50   _baselined = false;
  51 
  52   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  53     _malloc_data[index].set_type(MemType2NameMap[index]._flag);
  54     _vm_data[index].set_type(MemType2NameMap[index]._flag);
  55     _arena_data[index].set_type(MemType2NameMap[index]._flag);
  56   }
  57 
  58   _malloc_cs = NULL;
  59   _vm_cs = NULL;
  60   _vm_map = NULL;
  61 
  62   _number_of_classes = 0;
  63   _number_of_threads = 0;
  64 }
  65 
  66 
  67 void MemBaseline::clear() {
  68   if (_malloc_cs != NULL) {
  69     delete _malloc_cs;
  70     _malloc_cs = NULL;
  71   }
  72 
  73   if (_vm_cs != NULL) {
  74     delete _vm_cs;
  75     _vm_cs = NULL;
  76   }
  77 
  78   if (_vm_map != NULL) {
  79     delete _vm_map;
  80     _vm_map = NULL;
  81   }
  82 
  83   reset();
  84 }
  85 
  86 
  87 void MemBaseline::reset() {
  88   _baselined = false;
  89   _total_vm_reserved = 0;
  90   _total_vm_committed = 0;
  91   _total_malloced = 0;
  92   _number_of_classes = 0;
  93 
  94   if (_malloc_cs != NULL) _malloc_cs->clear();
  95   if (_vm_cs != NULL) _vm_cs->clear();
  96   if (_vm_map != NULL) _vm_map->clear();
  97 
  98   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  99     _malloc_data[index].clear();
 100     _vm_data[index].clear();
 101     _arena_data[index].clear();
 102   }
 103 }
 104 
 105 MemBaseline::~MemBaseline() {
 106   clear();
 107 }
 108 
 109 // baseline malloc'd memory records, generate overall summary and summaries by
 110 // memory types
 111 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
 112   MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
 113   MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
 114   size_t used_arena_size = 0;
 115   int index;
 116   while (malloc_ptr != NULL) {
 117     index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
 118     size_t size = malloc_ptr->size();
 119     if (malloc_ptr->is_arena_memory_record()) {
 120       // We do have anonymous arenas, they are either used as value objects,
 121       // which are embedded inside other objects, or used as stack objects.
 122       _arena_data[index].inc(size);
 123       used_arena_size += size;
 124     } else {
 125       _total_malloced += size;
 126       _malloc_data[index].inc(size);
 127       if (malloc_ptr->is_arena_record()) {
 128         // see if arena memory record present
 129         MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
 130         if (next_malloc_ptr->is_arena_memory_record()) {
 131           assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
 132              "Arena records do not match");
 133           size = next_malloc_ptr->size();
 134           _arena_data[index].inc(size);
 135           used_arena_size += size;
 136           malloc_itr.next();
 137         }
 138       }
 139     }
 140     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 141   }
 142 
 143   // substract used arena size to get size of arena chunk in free list
 144   index = flag2index(mtChunk);
 145   _malloc_data[index].reduce(used_arena_size);
 146   // we really don't know how many chunks in free list, so just set to
 147   // 0
 148   _malloc_data[index].overwrite_counter(0);
 149 
 150   return true;
 151 }
 152 
 153 // baseline mmap'd memory records, generate overall summary and summaries by
 154 // memory types
 155 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
 156   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 157   VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
 158   int index;
 159   while (vm_ptr != NULL) {
 160     if (vm_ptr->is_reserved_region()) {
 161       index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
 162     // we use the number of thread stack to count threads
 163       if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
 164       _number_of_threads ++;
 165     }
 166       _total_vm_reserved += vm_ptr->size();
 167       _vm_data[index].inc(vm_ptr->size(), 0);
 168     } else {
 169       _total_vm_committed += vm_ptr->size();
 170       _vm_data[index].inc(0, vm_ptr->size());
 171     }
 172     vm_ptr = (VMMemRegion*)vm_itr.next();
 173   }
 174   return true;
 175 }
 176 
 177 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
 178 // over 1KB are stored.
 179 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
 180   assert(MemTracker::track_callsite(), "detail tracking is off");
 181 
 182   MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
 183   MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
 184   MallocCallsitePointer malloc_callsite;
 185 
 186   // initailize malloc callsite array
 187   if (_malloc_cs == NULL) {
 188     _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
 189     // out of native memory
 190     if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
 191       return false;
 192     }
 193   } else {
 194     _malloc_cs->clear();
 195   }
 196 
 197   MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
 198 
 199   // sort into callsite pc order. Details are aggregated by callsites
 200   malloc_data->sort((FN_SORT)malloc_sort_by_pc);
 201   bool ret = true;
 202 
 203   // baseline memory that is totaled over 1 KB
 204   while (malloc_ptr != NULL) {
 205     if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
 206       // skip thread stacks
 207       if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
 208         if (malloc_callsite.addr() != malloc_ptr->pc()) {
 209           if ((malloc_callsite.amount()/K) > 0) {
 210             if (!_malloc_cs->append(&malloc_callsite)) {
 211               ret = false;
 212               break;
 213             }
 214           }
 215           malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
 216         }
 217         malloc_callsite.inc(malloc_ptr->size());
 218       }
 219     }
 220     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 221   }
 222 
 223   // restore to address order. Snapshot malloc data is maintained in memory
 224   // address order.
 225   malloc_data->sort((FN_SORT)malloc_sort_by_addr);
 226 
 227   if (!ret) {
 228               return false;
 229             }
 230   // deal with last record
 231   if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
 232     if (!_malloc_cs->append(&malloc_callsite)) {
 233       return false;
 234     }
 235   }
 236   return true;
 237 }
 238 
 239 // baseline mmap'd memory by callsites
 240 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
 241   assert(MemTracker::track_callsite(), "detail tracking is off");
 242 
 243   VMCallsitePointer  vm_callsite;
 244   VMCallsitePointer* cur_callsite = NULL;
 245   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 246   VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
 247 
 248   // initialize virtual memory map array
 249   if (_vm_map == NULL) {
 250     _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
 251    if (_vm_map == NULL || _vm_map->out_of_memory()) {
 252      return false;
 253    }
 254   } else {
 255     _vm_map->clear();
 256   }
 257 
 258   // initialize virtual memory callsite array
 259   if (_vm_cs == NULL) {
 260     _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
 261     if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
 262       return false;
 263     }
 264   } else {
 265     _vm_cs->clear();
 266   }
 267 
 268   // consolidate virtual memory data
 269   VMMemRegionEx*     reserved_rec = NULL;
 270   VMMemRegionEx*     committed_rec = NULL;
 271 
 272   // vm_ptr is coming in increasing base address order
 273   while (vm_ptr != NULL) {
 274     if (vm_ptr->is_reserved_region()) {
 275       // consolidate reserved memory regions for virtual memory map.
 276       // The criteria for consolidation is:
 277       // 1. two adjacent reserved memory regions
 278       // 2. belong to the same memory type
 279       // 3. reserved from the same callsite
 280       if (reserved_rec == NULL ||
 281         reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
 282         FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
 283         reserved_rec->pc() != vm_ptr->pc()) {
 284         if (!_vm_map->append(vm_ptr)) {
 285         return false;
 286       }
 287         // inserted reserved region, we need the pointer to the element in virtual
 288         // memory map array.
 289         reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 290       } else {
 291         reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 292     }
 293 
 294       if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 295       return false;
 296     }
 297       vm_callsite = VMCallsitePointer(vm_ptr->pc());
 298       cur_callsite = &vm_callsite;
 299       vm_callsite.inc(vm_ptr->size(), 0);
 300     } else {
 301       // consolidate committed memory regions for virtual memory map
 302       // The criterial is:
 303       // 1. two adjacent committed memory regions
 304       // 2. committed from the same callsite
 305       if (committed_rec == NULL ||
 306         committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
 307         committed_rec->pc() != vm_ptr->pc()) {
 308         if (!_vm_map->append(vm_ptr)) {
 309           return false;
 310   }
 311         committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 312     } else {
 313         committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 314       }
 315       vm_callsite.inc(0, vm_ptr->size());
 316     }
 317     vm_ptr = (VMMemRegionEx*)vm_itr.next();
 318   }
 319   // deal with last record
 320   if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 321     return false;
 322   }
 323 
 324   // sort it into callsite pc order. Details are aggregated by callsites
 325   _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
 326 
 327   // walk the array to consolidate record by pc
 328   MemPointerArrayIteratorImpl itr(_vm_cs);
 329   VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
 330   VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
 331   while (next_rec != NULL) {
 332     assert(callsite_rec != NULL, "Sanity check");
 333     if (next_rec->addr() == callsite_rec->addr()) {
 334       callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
 335       itr.remove();
 336       next_rec = (VMCallsitePointer*)itr.current();
 337     } else {
 338       callsite_rec = next_rec;
 339       next_rec = (VMCallsitePointer*)itr.next();
 340     }
 341   }
 342 
 343   return true;
 344 }
 345 
 346 // baseline a snapshot. If summary_only = false, memory usages aggregated by
 347 // callsites are also baselined.
 348 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
 349   MutexLockerEx snapshot_locker(snapshot._lock, true);
 350   reset();
 351   _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
 352                baseline_vm_summary(snapshot._vm_ptrs);
 353   _number_of_classes = snapshot.number_of_classes();
 354 
 355   if (!summary_only && MemTracker::track_callsite() && _baselined) {
 356     _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
 357       baseline_vm_details(snapshot._vm_ptrs);
 358   }
 359   return _baselined;
 360 }
 361 
 362 
 363 int MemBaseline::flag2index(MEMFLAGS flag) const {
 364   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 365     if (MemType2NameMap[index]._flag == flag) {
 366       return index;
 367     }
 368   }
 369   assert(false, "no type");
 370   return -1;
 371 }
 372 
 373 const char* MemBaseline::type2name(MEMFLAGS type) {
 374   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 375     if (MemType2NameMap[index]._flag == type) {
 376       return MemType2NameMap[index]._name;
 377     }
 378   }
 379   assert(false, err_msg("bad type %x", type));
 380   return NULL;
 381 }
 382 
 383 
 384 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
 385   _total_malloced = other._total_malloced;
 386   _total_vm_reserved = other._total_vm_reserved;
 387   _total_vm_committed = other._total_vm_committed;
 388 
 389   _baselined = other._baselined;
 390   _number_of_classes = other._number_of_classes;
 391 
 392   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 393     _malloc_data[index] = other._malloc_data[index];
 394     _vm_data[index] = other._vm_data[index];
 395     _arena_data[index] = other._arena_data[index];
 396   }
 397 
 398   if (MemTracker::track_callsite()) {
 399     assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
 400     assert(other._malloc_cs != NULL && other._vm_cs != NULL,
 401            "not properly baselined");
 402     _malloc_cs->clear();
 403     _vm_cs->clear();
 404     int index;
 405     for (index = 0; index < other._malloc_cs->length(); index ++) {
 406       _malloc_cs->append(other._malloc_cs->at(index));
 407     }
 408 
 409     for (index = 0; index < other._vm_cs->length(); index ++) {
 410       _vm_cs->append(other._vm_cs->at(index));
 411     }
 412   }
 413   return *this;
 414 }
 415 
 416 /* compare functions for sorting */
 417 
 418 // sort snapshot malloc'd records in callsite pc order
 419 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
 420   assert(MemTracker::track_callsite(),"Just check");
 421   const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
 422   const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
 423   return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
 424 }
 425 
 426 // sort baselined malloc'd records in size order
 427 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
 428   assert(MemTracker::is_on(), "Just check");
 429   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 430   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 431   return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
 432 }
 433 
 434 // sort baselined malloc'd records in callsite pc order
 435 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
 436   assert(MemTracker::is_on(), "Just check");
 437   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 438   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 439   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 440 }
 441 
 442 
 443 // sort baselined mmap'd records in size (reserved size) order
 444 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
 445   assert(MemTracker::is_on(), "Just check");
 446   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 447   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 448   return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
 449 }
 450 
 451 // sort baselined mmap'd records in callsite pc order
 452 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
 453   assert(MemTracker::is_on(), "Just check");
 454   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 455   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 456   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 457 }
 458 
 459 
 460 // sort snapshot malloc'd records in memory block address order
 461 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
 462   assert(MemTracker::is_on(), "Just check");
 463   const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
 464   const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
 465   int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 466   assert(delta != 0, "dup pointer");
 467   return delta;
 468 }
 469