1 /*
   2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
  26 #define SHARE_VM_SERVICES_MALLOC_TRACKER_HPP
  27 
  28 #if INCLUDE_NMT
  29 
  30 #include "memory/allocation.hpp"
  31 #include "runtime/atomic.hpp"
  32 #include "services/nmtCommon.hpp"
  33 #include "utilities/nativeCallStack.hpp"
  34 
  35 /*
  36  * This counter class counts memory allocation and deallocation,
  37  * records total memory allocation size and number of allocations.
  38  * The counters are updated atomically.
  39  */
  40 class MemoryCounter VALUE_OBJ_CLASS_SPEC {
  41  private:
  42   size_t   _count;
  43   size_t   _size;
  44 
  45   DEBUG_ONLY(size_t   _peak_count;)
  46   DEBUG_ONLY(size_t   _peak_size; )
  47 
  48  public:
  49   MemoryCounter() : _count(0), _size(0) {
  50     DEBUG_ONLY(_peak_count = 0;)
  51     DEBUG_ONLY(_peak_size  = 0;)
  52   }
  53 
  54   inline void allocate(size_t sz) {
  55     Atomic::add(1, (volatile MemoryCounterType*)&_count);
  56     if (sz > 0) {
  57       Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
  58       DEBUG_ONLY(_peak_size = MAX2(_peak_size, _size));
  59     }
  60     DEBUG_ONLY(_peak_count = MAX2(_peak_count, _count);)
  61   }
  62 
  63   inline void deallocate(size_t sz) {
  64     assert(_count > 0, "Negative counter");
  65     assert(_size >= sz, "Negative size");
  66     Atomic::add(-1, (volatile MemoryCounterType*)&_count);
  67     if (sz > 0) {
  68       Atomic::add(-(MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
  69     }
  70   }
  71 
  72   inline void resize(long sz) {
  73     if (sz != 0) {
  74       Atomic::add((MemoryCounterType)sz, (volatile MemoryCounterType*)&_size);
  75       DEBUG_ONLY(_peak_size = MAX2(_size, _peak_size);)
  76     }
  77   }
  78 
  79   inline size_t count() const { return _count; }
  80   inline size_t size()  const { return _size;  }
  81   DEBUG_ONLY(inline size_t peak_count() const { return _peak_count; })
  82   DEBUG_ONLY(inline size_t peak_size()  const { return _peak_size; })
  83 
  84 };
  85 
  86 /*
  87  * Malloc memory used by a particular subsystem.
  88  * It includes the memory acquired through os::malloc()
  89  * call and arena's backing memory.
  90  */
  91 class MallocMemory VALUE_OBJ_CLASS_SPEC {
  92  private:
  93   MemoryCounter _malloc;
  94   MemoryCounter _arena;
  95 
  96  public:
  97   MallocMemory() { }
  98 
  99   inline void record_malloc(size_t sz) {
 100     _malloc.allocate(sz);
 101   }
 102 
 103   inline void record_free(size_t sz) {
 104     _malloc.deallocate(sz);
 105   }
 106 
 107   inline void record_new_arena() {
 108     _arena.allocate(0);
 109   }
 110 
 111   inline void record_arena_free() {
 112     _arena.deallocate(0);
 113   }
 114 
 115   inline void record_arena_size_change(long sz) {
 116     _arena.resize(sz);
 117   }
 118 
 119   inline size_t malloc_size()  const { return _malloc.size(); }
 120   inline size_t malloc_count() const { return _malloc.count();}
 121   inline size_t arena_size()   const { return _arena.size();  }
 122   inline size_t arena_count()  const { return _arena.count(); }
 123 
 124   DEBUG_ONLY(inline const MemoryCounter& malloc_counter() const { return _malloc; })
 125   DEBUG_ONLY(inline const MemoryCounter& arena_counter()  const { return _arena;  })
 126 };
 127 
 128 class MallocMemorySummary;
 129 
 130 // A snapshot of malloc'd memory, includes malloc memory
 131 // usage by types and memory used by tracking itself.
 132 class MallocMemorySnapshot : public ResourceObj {
 133   friend class MallocMemorySummary;
 134 
 135  private:
 136   MallocMemory      _malloc[mt_number_of_types];
 137   MemoryCounter     _tracking_header;
 138 
 139 
 140  public:
 141   inline MallocMemory*  by_type(MEMFLAGS flags) {
 142     int index = NMTUtil::flag_to_index(flags);
 143     return &_malloc[index];
 144   }
 145 
 146   inline MallocMemory* by_index(int index) {
 147     assert(index >= 0, "Index out of bound");
 148     assert(index < mt_number_of_types, "Index out of bound");
 149     return &_malloc[index];
 150   }
 151 
 152   inline MemoryCounter* malloc_overhead() {
 153     return &_tracking_header;
 154   }
 155 
 156   // Total malloc'd memory amount
 157   size_t total() const;
 158   // Total malloc'd memory used by arenas
 159   size_t total_arena() const;
 160 
 161   inline size_t thread_count() const {
 162     MallocMemorySnapshot* s = const_cast<MallocMemorySnapshot*>(this);
 163     return s->by_type(mtThreadStack)->malloc_count();
 164   }
 165 
 166   void copy_to(MallocMemorySnapshot* s) {
 167     // Need to make sure that mtChunks don't get deallocated while the
 168     // copy is going on, because their size is adjusted using this
 169     // buffer in make_adjustment().
 170     ThreadCritical tc;
 171     s->_tracking_header = _tracking_header;
 172     for (int index = 0; index < mt_number_of_types; index ++) {
 173       s->_malloc[index] = _malloc[index];
 174     }
 175   }
 176 
 177   // Make adjustment by subtracting chunks used by arenas
 178   // from total chunks to get total free chunk size
 179   void make_adjustment();
 180 };
 181 
 182 /*
 183  * This class is for collecting malloc statistics at summary level
 184  */
 185 class MallocMemorySummary : AllStatic {
 186  private:
 187   // Reserve memory for placement of MallocMemorySnapshot object
 188   static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)];
 189 
 190  public:
 191    static void initialize();
 192 
 193    static inline void record_malloc(size_t size, MEMFLAGS flag) {
 194      as_snapshot()->by_type(flag)->record_malloc(size);
 195    }
 196 
 197    static inline void record_free(size_t size, MEMFLAGS flag) {
 198      as_snapshot()->by_type(flag)->record_free(size);
 199    }
 200 
 201    static inline void record_new_arena(MEMFLAGS flag) {
 202      as_snapshot()->by_type(flag)->record_new_arena();
 203    }
 204 
 205    static inline void record_arena_free(MEMFLAGS flag) {
 206      as_snapshot()->by_type(flag)->record_arena_free();
 207    }
 208 
 209    static inline void record_arena_size_change(long size, MEMFLAGS flag) {
 210      as_snapshot()->by_type(flag)->record_arena_size_change(size);
 211    }
 212 
 213    static void snapshot(MallocMemorySnapshot* s) {
 214      as_snapshot()->copy_to(s);
 215      s->make_adjustment();
 216    }
 217 
 218    // Record memory used by malloc tracking header
 219    static inline void record_new_malloc_header(size_t sz) {
 220      as_snapshot()->malloc_overhead()->allocate(sz);
 221    }
 222 
 223    static inline void record_free_malloc_header(size_t sz) {
 224      as_snapshot()->malloc_overhead()->deallocate(sz);
 225    }
 226 
 227    // The memory used by malloc tracking headers
 228    static inline size_t tracking_overhead() {
 229      return as_snapshot()->malloc_overhead()->size();
 230    }
 231 
 232   static MallocMemorySnapshot* as_snapshot() {
 233     return (MallocMemorySnapshot*)_snapshot;
 234   }
 235 };
 236 
 237 
 238 /*
 239  * Malloc tracking header.
 240  * To satisfy malloc alignment requirement, NMT uses 2 machine words for tracking purpose,
 241  * which ensures 8-bytes alignment on 32-bit systems and 16-bytes on 64-bit systems (Product build).
 242  */
 243 
 244 class MallocHeader VALUE_OBJ_CLASS_SPEC {
 245 #ifdef _LP64
 246   size_t           _size      : 64;
 247   size_t           _flags     : 8;
 248   size_t           _pos_idx   : 16;
 249   size_t           _bucket_idx: 40;
 250 #define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
 251 #define MAX_BUCKET_LENGTH         right_n_bits(16)
 252 #else
 253   size_t           _size      : 32;
 254   size_t           _flags     : 8;
 255   size_t           _pos_idx   : 8;
 256   size_t           _bucket_idx: 16;
 257 #define MAX_MALLOCSITE_TABLE_SIZE  right_n_bits(16)
 258 #define MAX_BUCKET_LENGTH          right_n_bits(8)
 259 #endif  // _LP64
 260 
 261  public:
 262   MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
 263     assert(sizeof(MallocHeader) == sizeof(void*) * 2,
 264       "Wrong header size");
 265 
 266     if (level == NMT_minimal) {
 267       return;
 268     }
 269 
 270     _flags = flags;
 271     set_size(size);
 272     if (level == NMT_detail) {
 273       size_t bucket_idx;
 274       size_t pos_idx;
 275       if (record_malloc_site(stack, size, &bucket_idx, &pos_idx)) {
 276         assert(bucket_idx <= MAX_MALLOCSITE_TABLE_SIZE, "Overflow bucket index");
 277         assert(pos_idx <= MAX_BUCKET_LENGTH, "Overflow bucket position index");
 278         _bucket_idx = bucket_idx;
 279         _pos_idx = pos_idx;
 280       }
 281     }
 282 
 283     MallocMemorySummary::record_malloc(size, flags);
 284     MallocMemorySummary::record_new_malloc_header(sizeof(MallocHeader));
 285   }
 286 
 287   inline size_t   size()  const { return _size; }
 288   inline MEMFLAGS flags() const { return (MEMFLAGS)_flags; }
 289   bool get_stack(NativeCallStack& stack) const;
 290 
 291   // Cleanup tracking information before the memory is released.
 292   void release() const;
 293 
 294  private:
 295   inline void set_size(size_t size) {
 296     _size = size;
 297   }
 298   bool record_malloc_site(const NativeCallStack& stack, size_t size,
 299     size_t* bucket_idx, size_t* pos_idx) const;
 300 };
 301 
 302 
 303 // Main class called from MemTracker to track malloc activities
 304 class MallocTracker : AllStatic {
 305  public:
 306   // Initialize malloc tracker for specific tracking level
 307   static bool initialize(NMT_TrackingLevel level);
 308 
 309   static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
 310 
 311   // malloc tracking header size for specific tracking level
 312   static inline size_t malloc_header_size(NMT_TrackingLevel level) {
 313     return (level == NMT_off) ? 0 : sizeof(MallocHeader);
 314   }
 315 
 316   // Parameter name convention:
 317   // memblock :   the beginning address for user data
 318   // malloc_base: the beginning address that includes malloc tracking header
 319   //
 320   // The relationship:
 321   // memblock = (char*)malloc_base + sizeof(nmt header)
 322   //
 323 
 324   // Record  malloc on specified memory block
 325   static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
 326     const NativeCallStack& stack, NMT_TrackingLevel level);
 327 
 328   // Record free on specified memory block
 329   static void* record_free(void* memblock);
 330 
 331   // Offset memory address to header address
 332   static inline void* get_base(void* memblock);
 333   static inline void* get_base(void* memblock, NMT_TrackingLevel level) {
 334     if (memblock == NULL || level == NMT_off) return memblock;
 335     return (char*)memblock - malloc_header_size(level);
 336   }
 337 
 338   // Get memory size
 339   static inline size_t get_size(void* memblock) {
 340     MallocHeader* header = malloc_header(memblock);
 341     return header->size();
 342   }
 343 
 344   // Get memory type
 345   static inline MEMFLAGS get_flags(void* memblock) {
 346     MallocHeader* header = malloc_header(memblock);
 347     return header->flags();
 348   }
 349 
 350   // Get header size
 351   static inline size_t get_header_size(void* memblock) {
 352     return (memblock == NULL) ? 0 : sizeof(MallocHeader);
 353   }
 354 
 355   static inline void record_new_arena(MEMFLAGS flags) {
 356     MallocMemorySummary::record_new_arena(flags);
 357   }
 358 
 359   static inline void record_arena_free(MEMFLAGS flags) {
 360     MallocMemorySummary::record_arena_free(flags);
 361   }
 362 
 363   static inline void record_arena_size_change(int size, MEMFLAGS flags) {
 364     MallocMemorySummary::record_arena_size_change(size, flags);
 365   }
 366  private:
 367   static inline MallocHeader* malloc_header(void *memblock) {
 368     assert(memblock != NULL, "NULL pointer");
 369     MallocHeader* header = (MallocHeader*)((char*)memblock - sizeof(MallocHeader));
 370     return header;
 371   }
 372 };
 373 
 374 #endif // INCLUDE_NMT
 375 
 376 
 377 #endif //SHARE_VM_SERVICES_MALLOC_TRACKER_HPP