1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "runtime/safepoint.hpp"
  27 #include "runtime/thread.inline.hpp"
  28 #include "services/memBaseline.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 
  32 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  33   {mtJavaHeap,   "Java Heap"},
  34   {mtClass,      "Class"},
  35   {mtThreadStack,"Thread Stack"},
  36   {mtThread,     "Thread"},
  37   {mtCode,       "Code"},
  38   {mtGC,         "GC"},
  39   {mtCompiler,   "Compiler"},
  40   {mtInternal,   "Internal"},
  41   {mtOther,      "Other"},
  42   {mtSymbol,     "Symbol"},
  43   {mtNMT,        "Memory Tracking"},
  44   {mtTracing,    "Tracing"},
  45   {mtChunk,      "Pooled Free Chunks"},
  46   {mtClassShared,"Shared spaces for classes"},
  47   {mtTest,       "Test"},
  48   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
  49                              // behind
  50 };
  51 
  52 MemBaseline::MemBaseline() {
  53   _baselined = false;
  54 
  55   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
  56     _malloc_data[index].set_type(MemType2NameMap[index]._flag);
  57     _vm_data[index].set_type(MemType2NameMap[index]._flag);
  58     _arena_data[index].set_type(MemType2NameMap[index]._flag);
  59   }
  60 
  61   _malloc_cs = NULL;
  62   _vm_cs = NULL;
  63   _vm_map = NULL;
  64 
  65   _number_of_classes = 0;
  66   _number_of_threads = 0;
  67 }
  68 
  69 
  70 void MemBaseline::clear() {
  71   if (_malloc_cs != NULL) {
  72     delete _malloc_cs;
  73     _malloc_cs = NULL;
  74   }
  75 
  76   if (_vm_cs != NULL) {
  77     delete _vm_cs;
  78     _vm_cs = NULL;
  79   }
  80 
  81   if (_vm_map != NULL) {
  82     delete _vm_map;
  83     _vm_map = NULL;
  84   }
  85 
  86   reset();
  87 }
  88 
  89 
  90 void MemBaseline::reset() {
  91   _baselined = false;
  92   _total_vm_reserved = 0;
  93   _total_vm_committed = 0;
  94   _total_malloced = 0;
  95   _number_of_classes = 0;
  96 
  97   if (_malloc_cs != NULL) _malloc_cs->clear();
  98   if (_vm_cs != NULL) _vm_cs->clear();
  99   if (_vm_map != NULL) _vm_map->clear();
 100 
 101   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 102     _malloc_data[index].clear();
 103     _vm_data[index].clear();
 104     _arena_data[index].clear();
 105   }
 106 }
 107 
 108 MemBaseline::~MemBaseline() {
 109   clear();
 110 }
 111 
 112 // baseline malloc'd memory records, generate overall summary and summaries by
 113 // memory types
 114 bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records) {
 115   MemPointerArrayIteratorImpl malloc_itr((MemPointerArray*)malloc_records);
 116   MemPointerRecord* malloc_ptr = (MemPointerRecord*)malloc_itr.current();
 117   size_t used_arena_size = 0;
 118   int index;
 119   while (malloc_ptr != NULL) {
 120     index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
 121     size_t size = malloc_ptr->size();
 122     if (malloc_ptr->is_arena_memory_record()) {
 123       // We do have anonymous arenas, they are either used as value objects,
 124       // which are embedded inside other objects, or used as stack objects.
 125       _arena_data[index].inc(size);
 126       used_arena_size += size;
 127     } else {
 128       _total_malloced += size;
 129       _malloc_data[index].inc(size);
 130       if (malloc_ptr->is_arena_record()) {
 131         // see if arena memory record present
 132         MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
 133         if (next_malloc_ptr->is_arena_memory_record()) {
 134           assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
 135              "Arena records do not match");
 136           size = next_malloc_ptr->size();
 137           _arena_data[index].inc(size);
 138           used_arena_size += size;
 139           malloc_itr.next();
 140         }
 141       }
 142     }
 143     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 144   }
 145 
 146   // substract used arena size to get size of arena chunk in free list
 147   index = flag2index(mtChunk);
 148   _malloc_data[index].reduce(used_arena_size);
 149   // we really don't know how many chunks in free list, so just set to
 150   // 0
 151   _malloc_data[index].overwrite_counter(0);
 152 
 153   return true;
 154 }
 155 
 156 // check if there is a safepoint in progress, if so, block the thread
 157 // for the safepoint
 158 void MemBaseline::check_safepoint(JavaThread* thr) {
 159   if (SafepointSynchronize::is_synchronizing()) {
 160     // grab and drop the SR_lock to honor the safepoint protocol
 161     MutexLocker ml(thr->SR_lock());
 162   }
 163 }
 164 
 165 // baseline mmap'd memory records, generate overall summary and summaries by
 166 // memory types
 167 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
 168   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 169   VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
 170   int index;
 171   while (vm_ptr != NULL) {
 172     if (vm_ptr->is_reserved_region()) {
 173       index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
 174     // we use the number of thread stack to count threads
 175       if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
 176       _number_of_threads ++;
 177     }
 178       _total_vm_reserved += vm_ptr->size();
 179       _vm_data[index].inc(vm_ptr->size(), 0);
 180     } else {
 181       _total_vm_committed += vm_ptr->size();
 182       _vm_data[index].inc(0, vm_ptr->size());
 183     }
 184     vm_ptr = (VMMemRegion*)vm_itr.next();
 185   }
 186   return true;
 187 }
 188 
 189 // baseline malloc'd memory by callsites, but only the callsites with memory allocation
 190 // over 1KB are stored.
 191 bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records) {
 192   assert(MemTracker::track_callsite(), "detail tracking is off");
 193 
 194   MemPointerArrayIteratorImpl malloc_itr(const_cast<MemPointerArray*>(malloc_records));
 195   MemPointerRecordEx* malloc_ptr = (MemPointerRecordEx*)malloc_itr.current();
 196   MallocCallsitePointer malloc_callsite;
 197 
 198   // initailize malloc callsite array
 199   if (_malloc_cs == NULL) {
 200     _malloc_cs = new (std::nothrow) MemPointerArrayImpl<MallocCallsitePointer>(64);
 201     // out of native memory
 202     if (_malloc_cs == NULL || _malloc_cs->out_of_memory()) {
 203       return false;
 204     }
 205   } else {
 206     _malloc_cs->clear();
 207   }
 208 
 209   MemPointerArray* malloc_data = const_cast<MemPointerArray*>(malloc_records);
 210 
 211   // sort into callsite pc order. Details are aggregated by callsites
 212   malloc_data->sort((FN_SORT)malloc_sort_by_pc);
 213   bool ret = true;
 214 
 215   // baseline memory that is totaled over 1 KB
 216   while (malloc_ptr != NULL) {
 217     if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
 218       // skip thread stacks
 219       if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
 220         if (malloc_callsite.addr() != malloc_ptr->pc()) {
 221           if ((malloc_callsite.amount()/K) > 0) {
 222             if (!_malloc_cs->append(&malloc_callsite)) {
 223               ret = false;
 224               break;
 225             }
 226           }
 227           malloc_callsite = MallocCallsitePointer(malloc_ptr->pc());
 228         }
 229         malloc_callsite.inc(malloc_ptr->size());
 230       }
 231     }
 232     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 233   }
 234 
 235   // restore to address order. Snapshot malloc data is maintained in memory
 236   // address order.
 237   malloc_data->sort((FN_SORT)malloc_sort_by_addr);
 238 
 239   if (!ret) {
 240               return false;
 241             }
 242   // deal with last record
 243   if (malloc_callsite.addr() != 0 && (malloc_callsite.amount()/K) > 0) {
 244     if (!_malloc_cs->append(&malloc_callsite)) {
 245       return false;
 246     }
 247   }
 248   return true;
 249 }
 250 
 251 // baseline mmap'd memory by callsites
 252 bool MemBaseline::baseline_vm_details(const MemPointerArray* vm_records) {
 253   assert(MemTracker::track_callsite(), "detail tracking is off");
 254 
 255   VMCallsitePointer  vm_callsite;
 256   VMCallsitePointer* cur_callsite = NULL;
 257   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 258   VMMemRegionEx* vm_ptr = (VMMemRegionEx*)vm_itr.current();
 259 
 260   // initialize virtual memory map array
 261   if (_vm_map == NULL) {
 262     _vm_map = new (std::nothrow) MemPointerArrayImpl<VMMemRegionEx>(vm_records->length());
 263    if (_vm_map == NULL || _vm_map->out_of_memory()) {
 264      return false;
 265    }
 266   } else {
 267     _vm_map->clear();
 268   }
 269 
 270   // initialize virtual memory callsite array
 271   if (_vm_cs == NULL) {
 272     _vm_cs = new (std::nothrow) MemPointerArrayImpl<VMCallsitePointer>(64);
 273     if (_vm_cs == NULL || _vm_cs->out_of_memory()) {
 274       return false;
 275     }
 276   } else {
 277     _vm_cs->clear();
 278   }
 279 
 280   // consolidate virtual memory data
 281   VMMemRegionEx*     reserved_rec = NULL;
 282   VMMemRegionEx*     committed_rec = NULL;
 283 
 284   // vm_ptr is coming in increasing base address order
 285   while (vm_ptr != NULL) {
 286     if (vm_ptr->is_reserved_region()) {
 287       // consolidate reserved memory regions for virtual memory map.
 288       // The criteria for consolidation is:
 289       // 1. two adjacent reserved memory regions
 290       // 2. belong to the same memory type
 291       // 3. reserved from the same callsite
 292       if (reserved_rec == NULL ||
 293         reserved_rec->base() + reserved_rec->size() != vm_ptr->addr() ||
 294         FLAGS_TO_MEMORY_TYPE(reserved_rec->flags()) != FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()) ||
 295         reserved_rec->pc() != vm_ptr->pc()) {
 296         if (!_vm_map->append(vm_ptr)) {
 297         return false;
 298       }
 299         // inserted reserved region, we need the pointer to the element in virtual
 300         // memory map array.
 301         reserved_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 302       } else {
 303         reserved_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 304     }
 305 
 306       if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 307       return false;
 308     }
 309       vm_callsite = VMCallsitePointer(vm_ptr->pc());
 310       cur_callsite = &vm_callsite;
 311       vm_callsite.inc(vm_ptr->size(), 0);
 312     } else {
 313       // consolidate committed memory regions for virtual memory map
 314       // The criterial is:
 315       // 1. two adjacent committed memory regions
 316       // 2. committed from the same callsite
 317       if (committed_rec == NULL ||
 318         committed_rec->base() + committed_rec->size() != vm_ptr->addr() ||
 319         committed_rec->pc() != vm_ptr->pc()) {
 320         if (!_vm_map->append(vm_ptr)) {
 321           return false;
 322         }
 323         committed_rec = (VMMemRegionEx*)_vm_map->at(_vm_map->length() - 1);
 324     } else {
 325         committed_rec->expand_region(vm_ptr->addr(), vm_ptr->size());
 326       }
 327       vm_callsite.inc(0, vm_ptr->size());
 328     }
 329     vm_ptr = (VMMemRegionEx*)vm_itr.next();
 330   }
 331   // deal with last record
 332   if (cur_callsite != NULL && !_vm_cs->append(cur_callsite)) {
 333     return false;
 334   }
 335 
 336   // sort it into callsite pc order. Details are aggregated by callsites
 337   _vm_cs->sort((FN_SORT)bl_vm_sort_by_pc);
 338 
 339   // walk the array to consolidate record by pc
 340   MemPointerArrayIteratorImpl itr(_vm_cs);
 341   VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
 342   VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
 343   while (next_rec != NULL) {
 344     assert(callsite_rec != NULL, "Sanity check");
 345     if (next_rec->addr() == callsite_rec->addr()) {
 346       callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
 347       itr.remove();
 348       next_rec = (VMCallsitePointer*)itr.current();
 349     } else {
 350       callsite_rec = next_rec;
 351       next_rec = (VMCallsitePointer*)itr.next();
 352     }
 353   }
 354 
 355   return true;
 356 }
 357 
 358 // baseline a snapshot. If summary_only = false, memory usages aggregated by
 359 // callsites are also baselined.
 360 // The method call can be lengthy, especially when detail tracking info is
 361 // requested. So the method checks for safepoint explicitly.
 362 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
 363   Thread* THREAD = Thread::current();
 364   assert(THREAD->is_Java_thread(), "must be a JavaThread");
 365   MutexLocker snapshot_locker(snapshot._lock);
 366   reset();
 367   _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
 368   if (_baselined) {
 369     check_safepoint((JavaThread*)THREAD);
 370     _baselined = baseline_vm_summary(snapshot._vm_ptrs);
 371   }
 372   _number_of_classes = snapshot.number_of_classes();
 373 
 374   if (!summary_only && MemTracker::track_callsite() && _baselined) {
 375     check_safepoint((JavaThread*)THREAD);
 376     _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
 377     if (_baselined) {
 378       check_safepoint((JavaThread*)THREAD);
 379       _baselined =  baseline_vm_details(snapshot._vm_ptrs);
 380     }
 381   }
 382   return _baselined;
 383 }
 384 
 385 
 386 int MemBaseline::flag2index(MEMFLAGS flag) const {
 387   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 388     if (MemType2NameMap[index]._flag == flag) {
 389       return index;
 390     }
 391   }
 392   assert(false, "no type");
 393   return -1;
 394 }
 395 
 396 const char* MemBaseline::type2name(MEMFLAGS type) {
 397   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 398     if (MemType2NameMap[index]._flag == type) {
 399       return MemType2NameMap[index]._name;
 400     }
 401   }
 402   assert(false, err_msg("bad type %x", type));
 403   return NULL;
 404 }
 405 
 406 
 407 MemBaseline& MemBaseline::operator=(const MemBaseline& other) {
 408   _total_malloced = other._total_malloced;
 409   _total_vm_reserved = other._total_vm_reserved;
 410   _total_vm_committed = other._total_vm_committed;
 411 
 412   _baselined = other._baselined;
 413   _number_of_classes = other._number_of_classes;
 414 
 415   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 416     _malloc_data[index] = other._malloc_data[index];
 417     _vm_data[index] = other._vm_data[index];
 418     _arena_data[index] = other._arena_data[index];
 419   }
 420 
 421   if (MemTracker::track_callsite()) {
 422     assert(_malloc_cs != NULL && _vm_cs != NULL, "out of memory");
 423     assert(other._malloc_cs != NULL && other._vm_cs != NULL,
 424            "not properly baselined");
 425     _malloc_cs->clear();
 426     _vm_cs->clear();
 427     int index;
 428     for (index = 0; index < other._malloc_cs->length(); index ++) {
 429       _malloc_cs->append(other._malloc_cs->at(index));
 430     }
 431 
 432     for (index = 0; index < other._vm_cs->length(); index ++) {
 433       _vm_cs->append(other._vm_cs->at(index));
 434     }
 435   }
 436   return *this;
 437 }
 438 
 439 /* compare functions for sorting */
 440 
 441 // sort snapshot malloc'd records in callsite pc order
 442 int MemBaseline::malloc_sort_by_pc(const void* p1, const void* p2) {
 443   assert(MemTracker::track_callsite(),"Just check");
 444   const MemPointerRecordEx* mp1 = (const MemPointerRecordEx*)p1;
 445   const MemPointerRecordEx* mp2 = (const MemPointerRecordEx*)p2;
 446   return UNSIGNED_COMPARE(mp1->pc(), mp2->pc());
 447 }
 448 
 449 // sort baselined malloc'd records in size order
 450 int MemBaseline::bl_malloc_sort_by_size(const void* p1, const void* p2) {
 451   assert(MemTracker::is_on(), "Just check");
 452   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 453   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 454   return UNSIGNED_COMPARE(mp2->amount(), mp1->amount());
 455 }
 456 
 457 // sort baselined malloc'd records in callsite pc order
 458 int MemBaseline::bl_malloc_sort_by_pc(const void* p1, const void* p2) {
 459   assert(MemTracker::is_on(), "Just check");
 460   const MallocCallsitePointer* mp1 = (const MallocCallsitePointer*)p1;
 461   const MallocCallsitePointer* mp2 = (const MallocCallsitePointer*)p2;
 462   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 463 }
 464 
 465 
 466 // sort baselined mmap'd records in size (reserved size) order
 467 int MemBaseline::bl_vm_sort_by_size(const void* p1, const void* p2) {
 468   assert(MemTracker::is_on(), "Just check");
 469   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 470   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 471   return UNSIGNED_COMPARE(mp2->reserved_amount(), mp1->reserved_amount());
 472 }
 473 
 474 // sort baselined mmap'd records in callsite pc order
 475 int MemBaseline::bl_vm_sort_by_pc(const void* p1, const void* p2) {
 476   assert(MemTracker::is_on(), "Just check");
 477   const VMCallsitePointer* mp1 = (const VMCallsitePointer*)p1;
 478   const VMCallsitePointer* mp2 = (const VMCallsitePointer*)p2;
 479   return UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 480 }
 481 
 482 
 483 // sort snapshot malloc'd records in memory block address order
 484 int MemBaseline::malloc_sort_by_addr(const void* p1, const void* p2) {
 485   assert(MemTracker::is_on(), "Just check");
 486   const MemPointerRecord* mp1 = (const MemPointerRecord*)p1;
 487   const MemPointerRecord* mp2 = (const MemPointerRecord*)p2;
 488   int delta = UNSIGNED_COMPARE(mp1->addr(), mp2->addr());
 489   assert(delta != 0, "dup pointer");
 490   return delta;
 491 }
 492