src/share/vm/services/memBaseline.cpp

Print this page




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"


  26 #include "services/memBaseline.hpp"
  27 #include "services/memTracker.hpp"
  28 

  29 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  30   {mtJavaHeap,   "Java Heap"},
  31   {mtClass,      "Class"},
  32   {mtThreadStack,"Thread Stack"},
  33   {mtThread,     "Thread"},
  34   {mtCode,       "Code"},
  35   {mtGC,         "GC"},
  36   {mtCompiler,   "Compiler"},
  37   {mtInternal,   "Internal"},
  38   {mtOther,      "Other"},
  39   {mtSymbol,     "Symbol"},
  40   {mtNMT,        "Memory Tracking"},
  41   {mtTracing,    "Tracing"},
  42   {mtChunk,      "Pooled Free Chunks"},
  43   {mtClassShared,"Shared spaces for classes"},
  44   {mtTest,       "Test"},
  45   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
  46                              // behind
  47 };
  48 


 133           size = next_malloc_ptr->size();
 134           _arena_data[index].inc(size);
 135           used_arena_size += size;
 136           malloc_itr.next();
 137         }
 138       }
 139     }
 140     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 141   }
 142 
 143   // substract used arena size to get size of arena chunk in free list
 144   index = flag2index(mtChunk);
 145   _malloc_data[index].reduce(used_arena_size);
 146   // we really don't know how many chunks in free list, so just set to
 147   // 0
 148   _malloc_data[index].overwrite_counter(0);
 149 
 150   return true;
 151 }
 152 









 153 // baseline mmap'd memory records, generate overall summary and summaries by
 154 // memory types
 155 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
 156   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 157   VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
 158   int index;
 159   while (vm_ptr != NULL) {
 160     if (vm_ptr->is_reserved_region()) {
 161       index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
 162     // we use the number of thread stack to count threads
 163       if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
 164       _number_of_threads ++;
 165     }
 166       _total_vm_reserved += vm_ptr->size();
 167       _vm_data[index].inc(vm_ptr->size(), 0);
 168     } else {
 169       _total_vm_committed += vm_ptr->size();
 170       _vm_data[index].inc(0, vm_ptr->size());
 171     }
 172     vm_ptr = (VMMemRegion*)vm_itr.next();


 328   MemPointerArrayIteratorImpl itr(_vm_cs);
 329   VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
 330   VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
 331   while (next_rec != NULL) {
 332     assert(callsite_rec != NULL, "Sanity check");
 333     if (next_rec->addr() == callsite_rec->addr()) {
 334       callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
 335       itr.remove();
 336       next_rec = (VMCallsitePointer*)itr.current();
 337     } else {
 338       callsite_rec = next_rec;
 339       next_rec = (VMCallsitePointer*)itr.next();
 340     }
 341   }
 342 
 343   return true;
 344 }
 345 
 346 // baseline a snapshot. If summary_only = false, memory usages aggregated by
 347 // callsites are also baselined.


 348 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
 349   MutexLockerEx snapshot_locker(snapshot._lock, true);


 350   reset();
 351   _baselined = baseline_malloc_summary(snapshot._alloc_ptrs) &&
 352                baseline_vm_summary(snapshot._vm_ptrs);



 353   _number_of_classes = snapshot.number_of_classes();
 354 
 355   if (!summary_only && MemTracker::track_callsite() && _baselined) {
 356     _baselined =  baseline_malloc_details(snapshot._alloc_ptrs) &&
 357       baseline_vm_details(snapshot._vm_ptrs);



 358   }

 359   return _baselined;
 360 }
 361 
 362 
 363 int MemBaseline::flag2index(MEMFLAGS flag) const {
 364   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 365     if (MemType2NameMap[index]._flag == flag) {
 366       return index;
 367     }
 368   }
 369   assert(false, "no type");
 370   return -1;
 371 }
 372 
 373 const char* MemBaseline::type2name(MEMFLAGS type) {
 374   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 375     if (MemType2NameMap[index]._flag == type) {
 376       return MemType2NameMap[index]._name;
 377     }
 378   }




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "runtime/safepoint.hpp"
  27 #include "runtime/thread.hpp"
  28 #include "services/memBaseline.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 
  32 MemType2Name MemBaseline::MemType2NameMap[NUMBER_OF_MEMORY_TYPE] = {
  33   {mtJavaHeap,   "Java Heap"},
  34   {mtClass,      "Class"},
  35   {mtThreadStack,"Thread Stack"},
  36   {mtThread,     "Thread"},
  37   {mtCode,       "Code"},
  38   {mtGC,         "GC"},
  39   {mtCompiler,   "Compiler"},
  40   {mtInternal,   "Internal"},
  41   {mtOther,      "Other"},
  42   {mtSymbol,     "Symbol"},
  43   {mtNMT,        "Memory Tracking"},
  44   {mtTracing,    "Tracing"},
  45   {mtChunk,      "Pooled Free Chunks"},
  46   {mtClassShared,"Shared spaces for classes"},
  47   {mtTest,       "Test"},
  48   {mtNone,       "Unknown"}  // It can happen when type tagging records are lagging
  49                              // behind
  50 };
  51 


 136           size = next_malloc_ptr->size();
 137           _arena_data[index].inc(size);
 138           used_arena_size += size;
 139           malloc_itr.next();
 140         }
 141       }
 142     }
 143     malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
 144   }
 145 
 146   // substract used arena size to get size of arena chunk in free list
 147   index = flag2index(mtChunk);
 148   _malloc_data[index].reduce(used_arena_size);
 149   // we really don't know how many chunks in free list, so just set to
 150   // 0
 151   _malloc_data[index].overwrite_counter(0);
 152 
 153   return true;
 154 }
 155 
 156 // check if there is a safepoint in progress, if so, block the thread
 157 // for the safepoint
 158 void MemBaseline::check_safepoint(JavaThread* thr) {
 159   if (SafepointSynchronize::is_synchronizing()) {
 160     // grab and drop the SR_lock to honor the safepoint protocol
 161     MutexLocker ml(thr->SR_lock());
 162   }
 163 }
 164 
 165 // baseline mmap'd memory records, generate overall summary and summaries by
 166 // memory types
 167 bool MemBaseline::baseline_vm_summary(const MemPointerArray* vm_records) {
 168   MemPointerArrayIteratorImpl vm_itr((MemPointerArray*)vm_records);
 169   VMMemRegion* vm_ptr = (VMMemRegion*)vm_itr.current();
 170   int index;
 171   while (vm_ptr != NULL) {
 172     if (vm_ptr->is_reserved_region()) {
 173       index = flag2index(FLAGS_TO_MEMORY_TYPE(vm_ptr->flags()));
 174     // we use the number of thread stack to count threads
 175       if (IS_MEMORY_TYPE(vm_ptr->flags(), mtThreadStack)) {
 176       _number_of_threads ++;
 177     }
 178       _total_vm_reserved += vm_ptr->size();
 179       _vm_data[index].inc(vm_ptr->size(), 0);
 180     } else {
 181       _total_vm_committed += vm_ptr->size();
 182       _vm_data[index].inc(0, vm_ptr->size());
 183     }
 184     vm_ptr = (VMMemRegion*)vm_itr.next();


 340   MemPointerArrayIteratorImpl itr(_vm_cs);
 341   VMCallsitePointer* callsite_rec = (VMCallsitePointer*)itr.current();
 342   VMCallsitePointer* next_rec = (VMCallsitePointer*)itr.next();
 343   while (next_rec != NULL) {
 344     assert(callsite_rec != NULL, "Sanity check");
 345     if (next_rec->addr() == callsite_rec->addr()) {
 346       callsite_rec->inc(next_rec->reserved_amount(), next_rec->committed_amount());
 347       itr.remove();
 348       next_rec = (VMCallsitePointer*)itr.current();
 349     } else {
 350       callsite_rec = next_rec;
 351       next_rec = (VMCallsitePointer*)itr.next();
 352     }
 353   }
 354 
 355   return true;
 356 }
 357 
 358 // baseline a snapshot. If summary_only = false, memory usages aggregated by
 359 // callsites are also baselined.
 360 // The method call can be lengthy, especially when detail tracking info is
 361 // requested. So the method checks for safepoint explicitly.
 362 bool MemBaseline::baseline(MemSnapshot& snapshot, bool summary_only) {
 363   Thread* THREAD = Thread::current();
 364   assert(THREAD->is_Java_thread(), "must be a JavaThread");
 365   MutexLocker snapshot_locker(snapshot._lock);
 366   reset();
 367   _baselined = baseline_malloc_summary(snapshot._alloc_ptrs);
 368   if (_baselined) {
 369     check_safepoint((JavaThread*)THREAD);
 370     _baselined = baseline_vm_summary(snapshot._vm_ptrs);
 371   }
 372   _number_of_classes = snapshot.number_of_classes();
 373 
 374   if (!summary_only && MemTracker::track_callsite() && _baselined) {
 375     check_safepoint((JavaThread*)THREAD);
 376     _baselined =  baseline_malloc_details(snapshot._alloc_ptrs);
 377     if (_baselined) {
 378       check_safepoint((JavaThread*)THREAD);
 379       _baselined =  baseline_vm_details(snapshot._vm_ptrs);
 380     }
 381   }
 382   return _baselined;
 383 }
 384 
 385 
 386 int MemBaseline::flag2index(MEMFLAGS flag) const {
 387   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 388     if (MemType2NameMap[index]._flag == flag) {
 389       return index;
 390     }
 391   }
 392   assert(false, "no type");
 393   return -1;
 394 }
 395 
 396 const char* MemBaseline::type2name(MEMFLAGS type) {
 397   for (int index = 0; index < NUMBER_OF_MEMORY_TYPE; index ++) {
 398     if (MemType2NameMap[index]._flag == type) {
 399       return MemType2NameMap[index]._name;
 400     }
 401   }