1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "runtime/safepoint.hpp"
  27 #include "runtime/thread.inline.hpp"
  28 #include "services/memBaseline.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 
  32 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  33   {mtJavaHeap,   "Java Heap"},
  34   {mtClass,      "Class"},
  35   {mtThreadStack,"Thread Stack"},
  36   {mtThread,     "Thread"},
  37   {mtCode,       "Code"},
  38   {mtGC,         "GC"},
  39   {mtCompiler,   "Compiler"},
  40   {mtInternal,   "Internal"},
  41   {mtOther,      "Other"},
  42   {mtSymbol,     "Symbol"},
  43   {mtNMT,        "Memory Tracking"},
  44   {mtChunk,      "Pooled Free Chunks"},
  45   {mtClassShared,"Shared spaces for classes"},
  46   {mtTest,       "Test"},
  47   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
  48                              // behind
  49 };
  50 
  51 MemBaseline::MemBaseline() {
  52   _baselined = false;
  53 
  54   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  55     _malloc_data[index].set_type(MemType2NameMap[index]._flag);
  56     _vm_data[index].set_type(MemType2NameMap[index]._flag);
  57     _arena_data[index].set_type(MemType2NameMap[index]._flag);
  58   }
  59 
  60   _malloc_cs = NULL;
  61   _vm_cs = NULL;
  62   _vm_map = NULL;
  63 
  64   _number_of_classes = 0;
  65   _number_of_threads = 0;
  66 }
  67 
  68 
  69 void MemBaseline::clear() {
  70   if (_malloc_cs != NULL) {
  71     delete _malloc_cs;
  72     _malloc_cs = NULL;
  73   }
  74 
  75   if (_vm_cs != NULL) {
  76     delete _vm_cs;
  77     _vm_cs = NULL;
  78   }
  79 
  80   if (_vm_map != NULL) {
  81     delete _vm_map;
  82     _vm_map = NULL;
  83   }
  84 
  85   reset();
  86 }
  87 
  88 
  89 void MemBaseline::reset() {
  90   _baselined = false;
  91   _total_vm_reserved = 0;
  92   _total_vm_committed = 0;
  93   _total_malloced = 0;
  94   _number_of_classes = 0;
  95 
  96   if (_malloc_cs != NULL) _malloc_cs->clear();
  97   if (_vm_cs != NULL) _vm_cs->clear();
  98   if (_vm_map != NULL) _vm_map->clear();
  99 
 100   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 101     _malloc_data[index].clear();
 102     _vm_data[index].clear();
 103     _arena_data[index].clear();
 104   }
 105 }
 106 
 107 MemBaseline::~MemBaseline() {
 108   clear();
 109 }
 110 
 111 // baseline malloc'd memory records, generate overall summary and summaries by
 112 // memory types
 113 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
 114   MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
 115   MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
 116   size_t used_arena_size = 0;
 117   int index;
 118   while (malloc_ptr != NULL) {
 119     index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
 120     size_t size = malloc_ptr->size();
 121     if (malloc_ptr->is_arena_memory_record()) {
 122       // We do have anonymous arenas, they are either used as value objects,
 123       // which are embedded inside other objects, or used as stack objects.
 124       _arena_data[index].inc(size);
 125       used_arena_size += size;
 126     } else {
 127       _total_malloced += size;
 128       _malloc_data[index].inc(size);
 129       if (malloc_ptr->is_arena_record()) {
 130         // see if arena memory record present
 131         MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
 132         if (next_malloc_ptr->is_arena_memory_record()) {
 133           assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
 134              "Arena records do not match");
 135           size = next_malloc_ptr->size();
 136           _arena_data[index].inc(size);
 137           used_arena_size += size;
 138           malloc_itr.next();
 139         }
 140       }
 141     }
 142     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 143   }
 144 
 145   // substract used arena size to get size of arena chunk in free list
 146   index = flag2index(mtChunk);
 147   _malloc_data[index].reduce(used_arena_size);
 148   // we really don't know how many chunks in free list, so just set to
 149   // 0
 150   _malloc_data[index].overwrite_counter(0);
 151 
 152   return true;
 153 }
 154 
 155 // check if there is a safepoint in progress, if so, block the thread
 156 // for the safepoint
 157 void MemBaseline::check_safepoint(JavaThread* thr) {
 158   if (SafepointSynchronize::is_synchronizing()) {
 159     // grab and drop the SR_lock to honor the safepoint protocol
 160     MutexLocker ml(thr->SR_lock());
 161   }
 162 }
 163 
 164 // baseline mmap'd memory records, generate overall summary and summaries by
 165 // memory types
 166 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
 167   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 168   VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
 169   int index;
 170   while (vm_ptr != NULL) {
 171     if (vm_ptr->is_reserved_region()) {
 172       index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
 173     // we use the number of thread stack to count threads
 174       if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
 175       _number_of_threads ++;
 176     }
 177       _total_vm_reserved += vm_ptr->size();
 178       _vm_data[index].inc(vm_ptr->size(), 0);
 179     } else {
 180       _total_vm_committed += vm_ptr->size();
 181       _vm_data[index].inc(0, vm_ptr->size());
 182     }
 183     vm_ptr = (VMMemRegion*)vm_itr.next();
 184   }
 185   return true;
 186 }
 187 
 188 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
 189 // over 1KB are stored.
 190 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
 191   assert(MemTracker::track_callsite(), "detail tracking is off");
 192 
 193   MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
 194   MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
 195   MallocCallsitePointer malloc_callsite;
 196 
 197   // initailize malloc callsite array
 198   if (_malloc_cs == NULL) {
 199     _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
 200     // out of native memory
 201     if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
 202       return false;
 203     }
 204   } else {
 205     _malloc_cs->clear();
 206   }
 207 
 208   MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
 209 
 210   // sort into callsite pc order. Details are aggregated by callsites
 211   malloc_data->sort((FN_SORT)malloc_sort_by_pc);
 212   bool ret = true;
 213 
 214   // baseline memory that is totaled over 1 KB
 215   while (malloc_ptr != NULL) {
 216     if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
 217       // skip thread stacks
 218       if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
 219         if (malloc_callsite.addr() != malloc_ptr->pc()) {
 220           if ((malloc_callsite.amount()/K) > 0) {
 221             if (!_malloc_cs->append(&malloc_callsite)) {
 222               ret = false;
 223               break;
 224             }
 225           }
 226           malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
 227         }
 228         malloc_callsite.inc(malloc_ptr->size());
 229       }
 230     }
 231     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 232   }
 233 
 234   // restore to address order. Snapshot malloc data is maintained in memory
 235   // address order.
 236   malloc_data->sort((FN_SORT)malloc_sort_by_addr);
 237 
 238   if (!ret) {
 239               return false;
 240             }
 241   // deal with last record
 242   if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
 243     if (!_malloc_cs->append(&malloc_callsite)) {
 244       return false;
 245     }
 246   }
 247   return true;
 248 }
 249 
 250 // baseline mmap'd memory by callsites
 251 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
 252   assert(MemTracker::track_callsite(), "detail tracking is off");
 253 
 254   VMCallsitePointer  vm_callsite;
 255   VMCallsitePointer* cur_callsite = NULL;
 256   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 257   VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
 258 
 259   // initialize virtual memory map array
 260   if (_vm_map == NULL) {
 261     _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
 262    if (_vm_map == NULL || _vm_map->out_of_memory()) {
 263      return false;
 264    }
 265   } else {
 266     _vm_map->clear();
 267   }
 268 
 269   // initialize virtual memory callsite array
 270   if (_vm_cs == NULL) {
 271     _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
 272     if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
 273       return false;
 274     }
 275   } else {
 276     _vm_cs->clear();
 277   }
 278 
 279   // consolidate virtual memory data
 280   VMMemRegionEx*     reserved_rec = NULL;
 281   VMMemRegionEx*     committed_rec = NULL;
 282 
 283   // vm_ptr is coming in increasing base address order
 284   while (vm_ptr != NULL) {
 285     if (vm_ptr->is_reserved_region()) {
 286       // consolidate reserved memory regions for virtual memory map.
 287       // The criteria for consolidation is:
 288       // 1. two adjacent reserved memory regions
 289       // 2. belong to the same memory type
 290       // 3. reserved from the same callsite
 291       if (reserved_rec == NULL ||
 292         reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
 293         FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
 294         reserved_rec->pc() != vm_ptr->pc()) {
 295         if (!_vm_map->append(vm_ptr)) {
 296         return false;
 297       }
 298         // inserted reserved region, we need the pointer to the element in virtual
 299         // memory map array.
 300         reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 301       } else {
 302         reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 303     }
 304 
 305       if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 306       return false;
 307     }
 308       vm_callsite = VMCallsitePointer(vm_ptr->pc());
 309       cur_callsite = &vm_callsite;
 310       vm_callsite.inc(vm_ptr->size(), 0);
 311     } else {
 312       // consolidate committed memory regions for virtual memory map
 313       // The criterial is:
 314       // 1. two adjacent committed memory regions
 315       // 2. committed from the same callsite
 316       if (committed_rec == NULL ||
 317         committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
 318         committed_rec->pc() != vm_ptr->pc()) {
 319         if (!_vm_map->append(vm_ptr)) {
 320           return false;
 321         }
 322         committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 323     } else {
 324         committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 325       }
 326       vm_callsite.inc(0, vm_ptr->size());
 327     }
 328     vm_ptr = (VMMemRegionEx*)vm_itr.next();
 329   }
 330   // deal with last record
 331   if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 332     return false;
 333   }
 334 
 335   // sort it into callsite pc order. Details are aggregated by callsites
 336   _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
 337 
 338   // walk the array to consolidate record by pc
 339   MemPointerArrayIteratorImpl itr(_vm_cs);
 340   VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
 341   VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
 342   while (next_rec != NULL) {
 343     assert(callsite_rec != NULL, "Sanity check");
 344     if (next_rec->addr() == callsite_rec->addr()) {
 345       callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
 346       itr.remove();
 347       next_rec = (VMCallsitePointer*)itr.current();
 348     } else {
 349       callsite_rec = next_rec;
 350       next_rec = (VMCallsitePointer*)itr.next();
 351     }
 352   }
 353 
 354   return true;
 355 }
 356 
 357 // baseline a snapshot. If summary_only = false, memory usages aggregated by
 358 // callsites are also baselined.
 359 // The method call can be lengthy, especially when detail tracking info is
 360 // requested. So the method checks for safepoint explicitly.
 361 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
 362   Thread* THREAD = Thread::current();
 363   assert(THREAD->is_Java_thread(), "must be a JavaThread");
 364   MutexLocker snapshot_locker(snapshot._lock);
 365   reset();
 366   _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
 367   if (_baselined) {
 368     check_safepoint((JavaThread*)THREAD);
 369     _baselined = baseline_vm_summary(snapshot._vm_ptrs);
 370   }
 371   _number_of_classes = snapshot.number_of_classes();
 372 
 373   if (!summary_only && MemTracker::track_callsite() && _baselined) {
 374     check_safepoint((JavaThread*)THREAD);
 375     _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
 376     if (_baselined) {
 377       check_safepoint((JavaThread*)THREAD);
 378       _baselined =  baseline_vm_details(snapshot._vm_ptrs);
 379     }
 380   }
 381   return _baselined;
 382 }
 383 
 384 
 385 int MemBaseline::flag2index(MEMFLAGS flag) const {
 386   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 387     if (MemType2NameMap[index]._flag == flag) {
 388       return index;
 389     }
 390   }
 391   assert(false, "no type");
 392   return -1;
 393 }
 394 
 395 const char* MemBaseline::type2name(MEMFLAGS type) {
 396   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 397     if (MemType2NameMap[index]._flag == type) {
 398       return MemType2NameMap[index]._name;
 399     }
 400   }
 401   assert(false, err_msg("bad type %x", type));
 402   return NULL;
 403 }
 404 
 405 
 406 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
 407   _total_malloced = other._total_malloced;
 408   _total_vm_reserved = other._total_vm_reserved;
 409   _total_vm_committed = other._total_vm_committed;
 410 
 411   _baselined = other._baselined;
 412   _number_of_classes = other._number_of_classes;
 413 
 414   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 415     _malloc_data[index] = other._malloc_data[index];
 416     _vm_data[index] = other._vm_data[index];
 417     _arena_data[index] = other._arena_data[index];
 418   }
 419 
 420   if (MemTracker::track_callsite()) {
 421     assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
 422     assert(other._malloc_cs != NULL && other._vm_cs != NULL,
 423            "not properly baselined");
 424     _malloc_cs->clear();
 425     _vm_cs->clear();
 426     int index;
 427     for (index = 0; index < other._malloc_cs->length(); index ++) {
 428       _malloc_cs->append(other._malloc_cs->at(index));
 429     }
 430 
 431     for (index = 0; index < other._vm_cs->length(); index ++) {
 432       _vm_cs->append(other._vm_cs->at(index));
 433     }
 434   }
 435   return *this;
 436 }
 437 
 438 /* compare functions for sorting */
 439 
 440 // sort snapshot malloc'd records in callsite pc order
 441 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
 442   assert(MemTracker::track_callsite(),"Just check");
 443   const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
 444   const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
 445   return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
 446 }
 447 
 448 // sort baselined malloc'd records in size order
 449 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
 450   assert(MemTracker::is_on(), "Just check");
 451   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 452   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 453   return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
 454 }
 455 
 456 // sort baselined malloc'd records in callsite pc order
 457 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
 458   assert(MemTracker::is_on(), "Just check");
 459   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 460   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 461   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 462 }
 463 
 464 
 465 // sort baselined mmap'd records in size (reserved size) order
 466 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
 467   assert(MemTracker::is_on(), "Just check");
 468   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 469   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 470   return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
 471 }
 472 
 473 // sort baselined mmap'd records in callsite pc order
 474 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
 475   assert(MemTracker::is_on(), "Just check");
 476   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 477   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 478   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 479 }
 480 
 481 
 482 // sort snapshot malloc'd records in memory block address order
 483 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
 484   assert(MemTracker::is_on(), "Just check");
 485   const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
 486   const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
 487   int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 488   assert(delta != 0, "dup pointer");
 489   return delta;
 490 }
 491