1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "classfile/systemDictionary.hpp"
  26 #include "memory/allocation.hpp"
  27 #include "services/memBaseline.hpp"
  28 #include "services/memTracker.hpp"
  29 
  30 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  31   {mtJavaHeap,   "Java Heap"},
  32   {mtClass,      "Class"},
  33   {mtThreadStack,"Thread Stack"},
  34   {mtThread,     "Thread"},
  35   {mtCode,       "Code"},
  36   {mtGC,         "GC"},
  37   {mtCompiler,   "Compiler"},
  38   {mtInternal,   "Internal"},
  39   {mtOther,      "Other"},
  40   {mtSymbol,     "Symbol"},
  41   {mtNMT,        "Memory Tracking"},
  42   {mtChunk,      "Pooled Free Chunks"},
  43   {mtClassShared,"Shared spaces for classes"},
  44   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
  45                              // behind
  46 };
  47 
  48 MemBaseline::MemBaseline() {
  49   _baselined = false;
  50 
  51   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  52     _malloc_data[index].set_type(MemType2NameMap[index]._flag);
  53     _vm_data[index].set_type(MemType2NameMap[index]._flag);
  54     _arena_data[index].set_type(MemType2NameMap[index]._flag);
  55   }
  56 
  57   _malloc_cs = NULL;
  58   _vm_cs = NULL;
  59   _vm_map = NULL;
  60 
  61   _number_of_classes = 0;
  62   _number_of_threads = 0;
  63 }
  64 
  65 
  66 void MemBaseline::clear() {
  67   if (_malloc_cs != NULL) {
  68     delete _malloc_cs;
  69     _malloc_cs = NULL;
  70   }
  71 
  72   if (_vm_cs != NULL) {
  73     delete _vm_cs;
  74     _vm_cs = NULL;
  75   }
  76 
  77   if (_vm_map != NULL) {
  78     delete _vm_map;
  79     _vm_map = NULL;
  80   }
  81 
  82   reset();
  83 }
  84 
  85 
  86 void MemBaseline::reset() {
  87   _baselined = false;
  88   _total_vm_reserved = 0;
  89   _total_vm_committed = 0;
  90   _total_malloced = 0;
  91   _number_of_classes = 0;
  92 
  93   if (_malloc_cs != NULL) _malloc_cs->clear();
  94   if (_vm_cs != NULL) _vm_cs->clear();
  95   if (_vm_map != NULL) _vm_map->clear();
  96 
  97   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  98     _malloc_data[index].clear();
  99     _vm_data[index].clear();
 100     _arena_data[index].clear();
 101   }
 102 }
 103 
 104 MemBaseline::~MemBaseline() {
 105   clear();
 106 }
 107 
 108 // baseline malloc'd memory records, generate overall summary and summaries by
 109 // memory types
 110 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
 111   MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
 112   MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
 113   size_t used_arena_size = 0;
 114   int index;
 115   while (malloc_ptr != NULL) {
 116     index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
 117     size_t size = malloc_ptr->size();
 118     if (malloc_ptr->is_arena_memory_record()) {
 119       // We do have anonymous arenas, they are either used as value objects,
 120       // which are embedded inside other objects, or used as stack objects.
 121       _arena_data[index].inc(size);
 122       used_arena_size += size;
 123     } else {
 124       _total_malloced += size;
 125       _malloc_data[index].inc(size);
 126       if (malloc_ptr->is_arena_record()) {
 127         // see if arena memory record present
 128         MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
 129         if (next_malloc_ptr->is_arena_memory_record()) {
 130           assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
 131              "Arena records do not match");
 132           size = next_malloc_ptr->size();
 133           _arena_data[index].inc(size);
 134           used_arena_size += size;
 135           malloc_itr.next();
 136         }
 137       }
 138     }
 139     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 140   }
 141 
 142   // substract used arena size to get size of arena chunk in free list
 143   index = flag2index(mtChunk);
 144   _malloc_data[index].reduce(used_arena_size);
 145   // we really don't know how many chunks in free list, so just set to
 146   // 0
 147   _malloc_data[index].overwrite_counter(0);
 148 
 149   return true;
 150 }
 151 
 152 // baseline mmap'd memory records, generate overall summary and summaries by
 153 // memory types
 154 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
 155   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 156   VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
 157   int index;
 158   while (vm_ptr != NULL) {
 159     if (vm_ptr->is_reserved_region()) {
 160       index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
 161     // we use the number of thread stack to count threads
 162       if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
 163       _number_of_threads ++;
 164     }
 165       _total_vm_reserved += vm_ptr->size();
 166       _vm_data[index].inc(vm_ptr->size(), 0);
 167     } else {
 168       _total_vm_committed += vm_ptr->size();
 169       _vm_data[index].inc(0, vm_ptr->size());
 170     }
 171     vm_ptr = (VMMemRegion*)vm_itr.next();
 172   }
 173   return true;
 174 }
 175 
 176 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
 177 // over 1KB are stored.
 178 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
 179   assert(MemTracker::track_callsite(), "detail tracking is off");
 180 
 181   MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
 182   MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
 183   MallocCallsitePointer malloc_callsite;
 184 
 185   // initailize malloc callsite array
 186   if (_malloc_cs == NULL) {
 187     _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
 188     // out of native memory
 189     if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
 190       return false;
 191     }
 192   } else {
 193     _malloc_cs->clear();
 194   }
 195 
 196   MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
 197 
 198   // sort into callsite pc order. Details are aggregated by callsites
 199   malloc_data->sort((FN_SORT)malloc_sort_by_pc);
 200   bool ret = true;
 201 
 202   // baseline memory that is totaled over 1 KB
 203   while (malloc_ptr != NULL) {
 204     if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
 205       // skip thread stacks
 206       if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
 207         if (malloc_callsite.addr() != malloc_ptr->pc()) {
 208           if ((malloc_callsite.amount()/K) > 0) {
 209             if (!_malloc_cs->append(&malloc_callsite)) {
 210               ret = false;
 211               break;
 212             }
 213           }
 214           malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
 215         }
 216         malloc_callsite.inc(malloc_ptr->size());
 217       }
 218     }
 219     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 220   }
 221 
 222   // restore to address order. Snapshot malloc data is maintained in memory
 223   // address order.
 224   malloc_data->sort((FN_SORT)malloc_sort_by_addr);
 225 
 226   if (!ret) {
 227               return false;
 228             }
 229   // deal with last record
 230   if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
 231     if (!_malloc_cs->append(&malloc_callsite)) {
 232       return false;
 233     }
 234   }
 235   return true;
 236 }
 237 
 238 // baseline mmap'd memory by callsites
 239 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
 240   assert(MemTracker::track_callsite(), "detail tracking is off");
 241 
 242   VMCallsitePointer  vm_callsite;
 243   VMCallsitePointer* cur_callsite = NULL;
 244   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 245   VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
 246 
 247   // initialize virtual memory map array
 248   if (_vm_map == NULL) {
 249     _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
 250    if (_vm_map == NULL || _vm_map->out_of_memory()) {
 251      return false;
 252    }
 253   } else {
 254     _vm_map->clear();
 255   }
 256 
 257   // initialize virtual memory callsite array
 258   if (_vm_cs == NULL) {
 259     _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
 260     if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
 261       return false;
 262     }
 263   } else {
 264     _vm_cs->clear();
 265   }
 266 
 267   // consolidate virtual memory data
 268   VMMemRegionEx*     reserved_rec = NULL;
 269   VMMemRegionEx*     committed_rec = NULL;
 270 
 271   // vm_ptr is coming in increasing base address order
 272   while (vm_ptr != NULL) {
 273     if (vm_ptr->is_reserved_region()) {
 274       // consolidate reserved memory regions for virtual memory map.
 275       // The criteria for consolidation is:
 276       // 1. two adjacent reserved memory regions
 277       // 2. belong to the same memory type
 278       // 3. reserved from the same callsite
 279       if (reserved_rec == NULL ||
 280         reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
 281         FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
 282         reserved_rec->pc() != vm_ptr->pc()) {
 283         if (!_vm_map->append(vm_ptr)) {
 284         return false;
 285       }
 286         // inserted reserved region, we need the pointer to the element in virtual
 287         // memory map array.
 288         reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 289       } else {
 290         reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 291     }
 292 
 293       if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 294       return false;
 295     }
 296       vm_callsite = VMCallsitePointer(vm_ptr->pc());
 297       cur_callsite = &vm_callsite;
 298       vm_callsite.inc(vm_ptr->size(), 0);
 299     } else {
 300       // consolidate committed memory regions for virtual memory map
 301       // The criterial is:
 302       // 1. two adjacent committed memory regions
 303       // 2. committed from the same callsite
 304       if (committed_rec == NULL ||
 305         committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
 306         committed_rec->pc() != vm_ptr->pc()) {
 307         if (!_vm_map->append(vm_ptr)) {
 308           return false;
 309   }
 310         committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 311     } else {
 312         committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 313       }
 314       vm_callsite.inc(0, vm_ptr->size());
 315     }
 316     vm_ptr = (VMMemRegionEx*)vm_itr.next();
 317   }
 318   // deal with last record
 319   if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 320     return false;
 321   }
 322 
 323   // sort it into callsite pc order. Details are aggregated by callsites
 324   _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
 325 
 326   // walk the array to consolidate record by pc
 327   MemPointerArrayIteratorImpl itr(_vm_cs);
 328   VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
 329   VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
 330   while (next_rec != NULL) {
 331     assert(callsite_rec != NULL, "Sanity check");
 332     if (next_rec->addr() == callsite_rec->addr()) {
 333       callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
 334       itr.remove();
 335       next_rec = (VMCallsitePointer*)itr.current();
 336     } else {
 337       callsite_rec = next_rec;
 338       next_rec = (VMCallsitePointer*)itr.next();
 339     }
 340   }
 341 
 342   return true;
 343 }
 344 
 345 // baseline a snapshot. If summary_only = false, memory usages aggregated by
 346 // callsites are also baselined.
 347 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
 348   MutexLockerEx snapshot_locker(snapshot._lock, true);
 349   reset();
 350   _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
 351                baseline_vm_summary(snapshot._vm_ptrs);
 352   _number_of_classes = SystemDictionary::number_of_classes();
 353 
 354   if (!summary_only && MemTracker::track_callsite() && _baselined) {
 355     _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
 356       baseline_vm_details(snapshot._vm_ptrs);
 357   }
 358   return _baselined;
 359 }
 360 
 361 
 362 int MemBaseline::flag2index(MEMFLAGS flag) const {
 363   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 364     if (MemType2NameMap[index]._flag == flag) {
 365       return index;
 366     }
 367   }
 368   assert(false, "no type");
 369   return -1;
 370 }
 371 
 372 const char* MemBaseline::type2name(MEMFLAGS type) {
 373   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 374     if (MemType2NameMap[index]._flag == type) {
 375       return MemType2NameMap[index]._name;
 376     }
 377   }
 378   assert(false, err_msg("bad type %x", type));
 379   return NULL;
 380 }
 381 
 382 
 383 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
 384   _total_malloced = other._total_malloced;
 385   _total_vm_reserved = other._total_vm_reserved;
 386   _total_vm_committed = other._total_vm_committed;
 387 
 388   _baselined = other._baselined;
 389   _number_of_classes = other._number_of_classes;
 390 
 391   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 392     _malloc_data[index] = other._malloc_data[index];
 393     _vm_data[index] = other._vm_data[index];
 394     _arena_data[index] = other._arena_data[index];
 395   }
 396 
 397   if (MemTracker::track_callsite()) {
 398     assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
 399     assert(other._malloc_cs != NULL && other._vm_cs != NULL,
 400            "not properly baselined");
 401     _malloc_cs->clear();
 402     _vm_cs->clear();
 403     int index;
 404     for (index = 0; index < other._malloc_cs->length(); index ++) {
 405       _malloc_cs->append(other._malloc_cs->at(index));
 406     }
 407 
 408     for (index = 0; index < other._vm_cs->length(); index ++) {
 409       _vm_cs->append(other._vm_cs->at(index));
 410     }
 411   }
 412   return *this;
 413 }
 414 
 415 /* compare functions for sorting */
 416 
 417 // sort snapshot malloc'd records in callsite pc order
 418 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
 419   assert(MemTracker::track_callsite(),"Just check");
 420   const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
 421   const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
 422   return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
 423 }
 424 
 425 // sort baselined malloc'd records in size order
 426 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
 427   assert(MemTracker::is_on(), "Just check");
 428   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 429   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 430   return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
 431 }
 432 
 433 // sort baselined malloc'd records in callsite pc order
 434 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
 435   assert(MemTracker::is_on(), "Just check");
 436   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 437   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 438   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 439 }
 440 
 441 
 442 // sort baselined mmap'd records in size (reserved size) order
 443 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
 444   assert(MemTracker::is_on(), "Just check");
 445   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 446   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 447   return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
 448 }
 449 
 450 // sort baselined mmap'd records in callsite pc order
 451 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
 452   assert(MemTracker::is_on(), "Just check");
 453   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 454   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 455   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 456 }
 457 
 458 
 459 // sort snapshot malloc'd records in memory block address order
 460 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
 461   assert(MemTracker::is_on(), "Just check");
 462   const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
 463   const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
 464   int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 465   assert(delta != 0, "dup pointer");
 466   return delta;
 467 }
 468