1 /*
   2  * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
  26 #define SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP
  27 
  28 #if INCLUDE_NMT
  29 
  30 #include "memory/allocation.hpp"
  31 #include "memory/metaspace.hpp"
  32 #include "services/allocationSite.hpp"
  33 #include "services/nmtCommon.hpp"
  34 #include "utilities/linkedlist.hpp"
  35 #include "utilities/nativeCallStack.hpp"
  36 #include "utilities/ostream.hpp"
  37 
  38 
  39 /*
  40  * Virtual memory counter
  41  */
  42 class VirtualMemory VALUE_OBJ_CLASS_SPEC {
  43  private:
  44   size_t     _reserved;
  45   size_t     _committed;
  46 
  47  public:
  48   VirtualMemory() : _reserved(0), _committed(0) { }
  49 
  50   inline void reserve_memory(size_t sz) { _reserved += sz; }
  51   inline void commit_memory (size_t sz) {
  52     _committed += sz;
  53     assert(_committed <= _reserved, "Sanity check");
  54   }
  55 
  56   inline void release_memory (size_t sz) {
  57     assert(_reserved >= sz, "Negative amount");
  58     _reserved -= sz;
  59   }
  60 
  61   inline void uncommit_memory(size_t sz) {
  62     assert(_committed >= sz, "Negative amount");
  63     _committed -= sz;
  64   }
  65 
  66   inline size_t reserved()  const { return _reserved;  }
  67   inline size_t committed() const { return _committed; }
  68 };
  69 
  70 // Virtual memory allocation site, keeps track where the virtual memory is reserved.
  71 class VirtualMemoryAllocationSite : public AllocationSite<VirtualMemory> {
  72  public:
  73   VirtualMemoryAllocationSite(const NativeCallStack& stack) :
  74     AllocationSite<VirtualMemory>(stack) { }
  75 
  76   inline void reserve_memory(size_t sz)  { data()->reserve_memory(sz);  }
  77   inline void commit_memory (size_t sz)  { data()->commit_memory(sz);   }
  78   inline void uncommit_memory(size_t sz) { data()->uncommit_memory(sz); }
  79   inline void release_memory(size_t sz)  { data()->release_memory(sz);  }
  80   inline size_t reserved() const  { return peek()->reserved(); }
  81   inline size_t committed() const { return peek()->committed(); }
  82 };
  83 
  84 class VirtualMemorySummary;
  85 
  86 // This class represents a snapshot of virtual memory at a given time.
  87 // The latest snapshot is saved in a static area.
  88 class VirtualMemorySnapshot : public ResourceObj {
  89   friend class VirtualMemorySummary;
  90 
  91  private:
  92   VirtualMemory  _virtual_memory[mt_number_of_types];
  93 
  94  public:
  95   inline VirtualMemory* by_type(MEMFLAGS flag) {
  96     int index = NMTUtil::flag_to_index(flag);
  97     return &_virtual_memory[index];
  98   }
  99 
 100   inline VirtualMemory* by_index(int index) {
 101     assert(index >= 0, "Index out of bound");
 102     assert(index < mt_number_of_types, "Index out of bound");
 103     return &_virtual_memory[index];
 104   }
 105 
 106   inline size_t total_reserved() const {
 107     size_t amount = 0;
 108     for (int index = 0; index < mt_number_of_types; index ++) {
 109       amount += _virtual_memory[index].reserved();
 110     }
 111     return amount;
 112   }
 113 
 114   inline size_t total_committed() const {
 115     size_t amount = 0;
 116     for (int index = 0; index < mt_number_of_types; index ++) {
 117       amount += _virtual_memory[index].committed();
 118     }
 119     return amount;
 120   }
 121 
 122   void copy_to(VirtualMemorySnapshot* s) {
 123     for (int index = 0; index < mt_number_of_types; index ++) {
 124       s->_virtual_memory[index] = _virtual_memory[index];
 125     }
 126   }
 127 };
 128 
 129 class VirtualMemorySummary : AllStatic {
 130  public:
 131   static void initialize();
 132 
 133   static inline void record_reserved_memory(size_t size, MEMFLAGS flag) {
 134     as_snapshot()->by_type(flag)->reserve_memory(size);
 135   }
 136 
 137   static inline void record_committed_memory(size_t size, MEMFLAGS flag) {
 138     as_snapshot()->by_type(flag)->commit_memory(size);
 139   }
 140 
 141   static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) {
 142     as_snapshot()->by_type(flag)->uncommit_memory(size);
 143   }
 144 
 145   static inline void record_released_memory(size_t size, MEMFLAGS flag) {
 146     as_snapshot()->by_type(flag)->release_memory(size);
 147   }
 148 
 149   // Move virtual memory from one memory type to another.
 150   // Virtual memory can be reserved before it is associated with a memory type, and tagged
 151   // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown'
 152   // type to specified memory type.
 153   static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
 154     as_snapshot()->by_type(from)->release_memory(size);
 155     as_snapshot()->by_type(to)->reserve_memory(size);
 156   }
 157 
 158   static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) {
 159     as_snapshot()->by_type(from)->uncommit_memory(size);
 160     as_snapshot()->by_type(to)->commit_memory(size);
 161   }
 162 
 163   static inline void snapshot(VirtualMemorySnapshot* s) {
 164     as_snapshot()->copy_to(s);
 165   }
 166 
 167   static VirtualMemorySnapshot* as_snapshot() {
 168     return (VirtualMemorySnapshot*)_snapshot;
 169   }
 170 
 171  private:
 172   static size_t _snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
 173 };
 174 
 175 
 176 
 177 /*
 178  * A virtual memory region
 179  */
 180 class VirtualMemoryRegion VALUE_OBJ_CLASS_SPEC {
 181  private:
 182   address      _base_address;
 183   size_t       _size;
 184 
 185  public:
 186   VirtualMemoryRegion(address addr, size_t size) :
 187     _base_address(addr), _size(size) {
 188      assert(addr != NULL, "Invalid address");
 189      assert(size > 0, "Invalid size");
 190    }
 191 
 192   inline address base() const { return _base_address;   }
 193   inline address end()  const { return base() + size(); }
 194   inline size_t  size() const { return _size;           }
 195 
 196   inline bool is_empty() const { return size() == 0; }
 197 
 198   inline bool contain_address(address addr) const {
 199     return (addr >= base() && addr < end());
 200   }
 201 
 202 
 203   inline bool contain_region(address addr, size_t size) const {
 204     return contain_address(addr) && contain_address(addr + size - 1);
 205   }
 206 
 207   inline bool same_region(address addr, size_t sz) const {
 208     return (addr == base() && sz == size());
 209   }
 210 
 211 
 212   inline bool overlap_region(address addr, size_t sz) const {
 213     VirtualMemoryRegion rgn(addr, sz);
 214     return contain_address(addr) ||
 215            contain_address(addr + sz - 1) ||
 216            rgn.contain_address(base()) ||
 217            rgn.contain_address(end() - 1);
 218   }
 219 
 220   inline bool adjacent_to(address addr, size_t sz) const {
 221     return (addr == end() || (addr + sz) == base());
 222   }
 223 
 224   void exclude_region(address addr, size_t sz) {
 225     assert(contain_region(addr, sz), "Not containment");
 226     assert(addr == base() || addr + sz == end(), "Can not exclude from middle");
 227     size_t new_size = size() - sz;
 228 
 229     if (addr == base()) {
 230       set_base(addr + sz);
 231     }
 232     set_size(new_size);
 233   }
 234 
 235   void expand_region(address addr, size_t sz) {
 236     assert(adjacent_to(addr, sz), "Not adjacent regions");
 237     if (base() == addr + sz) {
 238       set_base(addr);
 239     }
 240     set_size(size() + sz);
 241   }
 242 
 243  protected:
 244   void set_base(address base) {
 245     assert(base != NULL, "Sanity check");
 246     _base_address = base;
 247   }
 248 
 249   void set_size(size_t  size) {
 250     assert(size > 0, "Sanity check");
 251     _size = size;
 252   }
 253 };
 254 
 255 
 256 class CommittedMemoryRegion : public VirtualMemoryRegion {
 257  private:
 258   NativeCallStack  _stack;
 259 
 260  public:
 261   CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
 262     VirtualMemoryRegion(addr, size), _stack(stack) { }
 263 
 264   inline int compare(const CommittedMemoryRegion& rgn) const {
 265     if (overlap_region(rgn.base(), rgn.size())) {
 266       return 0;
 267     } else {
 268       if (base() == rgn.base()) {
 269         return 0;
 270       } else if (base() > rgn.base()) {
 271         return 1;
 272       } else {
 273         return -1;
 274       }
 275     }
 276   }
 277 
 278   inline bool equals(const CommittedMemoryRegion& rgn) const {
 279     return compare(rgn) == 0;
 280   }
 281 
 282   inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
 283   inline const NativeCallStack* call_stack() const         { return &_stack; }
 284 };
 285 
 286 
 287 typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
 288 
 289 int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
 290 class ReservedMemoryRegion : public VirtualMemoryRegion {
 291  private:
 292   SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
 293     _committed_regions;
 294 
 295   NativeCallStack  _stack;
 296   MEMFLAGS         _flag;
 297 
 298   bool             _all_committed;
 299 
 300  public:
 301   ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
 302     MEMFLAGS flag = mtNone) :
 303     VirtualMemoryRegion(base, size), _stack(stack), _flag(flag),
 304     _all_committed(false) { }
 305 
 306 
 307   ReservedMemoryRegion(address base, size_t size) :
 308     VirtualMemoryRegion(base, size), _stack(NativeCallStack::EMPTY_STACK), _flag(mtNone),
 309     _all_committed(false) { }
 310 
 311   // Copy constructor
 312   ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
 313     VirtualMemoryRegion(rr.base(), rr.size()) {
 314     *this = rr;
 315   }
 316 
 317   inline void  set_call_stack(const NativeCallStack& stack) { _stack = stack; }
 318   inline const NativeCallStack* call_stack() const          { return &_stack;  }
 319 
 320   void  set_flag(MEMFLAGS flag);
 321   inline MEMFLAGS flag() const            { return _flag;  }
 322 
 323   inline int compare(const ReservedMemoryRegion& rgn) const {
 324     if (overlap_region(rgn.base(), rgn.size())) {
 325       return 0;
 326     } else {
 327       if (base() == rgn.base()) {
 328         return 0;
 329       } else if (base() > rgn.base()) {
 330         return 1;
 331       } else {
 332         return -1;
 333       }
 334     }
 335   }
 336 
 337   inline bool equals(const ReservedMemoryRegion& rgn) const {
 338     return compare(rgn) == 0;
 339   }
 340 
 341   bool    add_committed_region(address addr, size_t size, const NativeCallStack& stack);
 342   bool    remove_uncommitted_region(address addr, size_t size);
 343 
 344   size_t  committed_size() const;
 345 
 346   // move committed regions that higher than specified address to
 347   // the new region
 348   void    move_committed_regions(address addr, ReservedMemoryRegion& rgn);
 349 
 350   inline bool all_committed() const { return _all_committed; }
 351   void        set_all_committed(bool b);
 352 
 353   CommittedRegionIterator iterate_committed_regions() const {
 354     return CommittedRegionIterator(_committed_regions.head());
 355   }
 356 
 357   ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
 358     set_base(other.base());
 359     set_size(other.size());
 360 
 361     _stack =         *other.call_stack();
 362     _flag  =         other.flag();
 363     _all_committed = other.all_committed();
 364     if (other.all_committed()) {
 365       set_all_committed(true);
 366     } else {
 367       CommittedRegionIterator itr = other.iterate_committed_regions();
 368       const CommittedMemoryRegion* rgn = itr.next();
 369       while (rgn != NULL) {
 370         _committed_regions.add(*rgn);
 371         rgn = itr.next();
 372       }
 373     }
 374     return *this;
 375   }
 376 
 377  private:
 378   // The committed region contains the uncommitted region, subtract the uncommitted
 379   // region from this committed region
 380   bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
 381     address addr, size_t sz);
 382 
 383   bool add_committed_region(const CommittedMemoryRegion& rgn) {
 384     assert(rgn.base() != NULL, "Invalid base address");
 385     assert(size() > 0, "Invalid size");
 386     return _committed_regions.add(rgn) != NULL;
 387   }
 388 };
 389 
 390 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
 391 
 392 class VirtualMemoryWalker : public StackObj {
 393  public:
 394    virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
 395 };
 396 
 397 // Main class called from MemTracker to track virtual memory allocations, commits and releases.
 398 class VirtualMemoryTracker : AllStatic {
 399  public:
 400   static bool initialize(NMT_TrackingLevel level);
 401 
 402   // Late phase initialization
 403   static bool late_initialize(NMT_TrackingLevel level);
 404 
 405   static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack,
 406     MEMFLAGS flag = mtNone, bool all_committed = false);
 407 
 408   static bool add_committed_region      (address base_addr, size_t size, const NativeCallStack& stack);
 409   static bool remove_uncommitted_region (address base_addr, size_t size);
 410   static bool remove_released_region    (address base_addr, size_t size);
 411   static void set_reserved_region_type  (address addr, MEMFLAGS flag);
 412 
 413   // Walk virtual memory data structure for creating baseline, etc.
 414   static bool walk_virtual_memory(VirtualMemoryWalker* walker);
 415 
 416   static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
 417 
 418  private:
 419   static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
 420 };
 421 
 422 
 423 class MetaspaceSnapshot : public ResourceObj {
 424 private:
 425   size_t  _reserved_in_bytes[Metaspace::MetadataTypeCount];
 426   size_t  _committed_in_bytes[Metaspace::MetadataTypeCount];
 427   size_t  _used_in_bytes[Metaspace::MetadataTypeCount];
 428   size_t  _free_in_bytes[Metaspace::MetadataTypeCount];
 429 
 430 public:
 431   MetaspaceSnapshot();
 432   size_t reserved_in_bytes(Metaspace::MetadataType type)   const { assert_valid_metadata_type(type); return _reserved_in_bytes[type]; }
 433   size_t committed_in_bytes(Metaspace::MetadataType type)  const { assert_valid_metadata_type(type); return _committed_in_bytes[type]; }
 434   size_t used_in_bytes(Metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _used_in_bytes[type]; }
 435   size_t free_in_bytes(Metaspace::MetadataType type)       const { assert_valid_metadata_type(type); return _free_in_bytes[type]; }
 436 
 437   static void snapshot(MetaspaceSnapshot& s);
 438 
 439 private:
 440   static void snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& s);
 441 
 442   static void assert_valid_metadata_type(Metaspace::MetadataType type) {
 443     assert(type == Metaspace::ClassType || type == Metaspace::NonClassType,
 444       "Invalid metadata type");
 445   }
 446 };
 447 
 448 #endif // INCLUDE_NMT
 449 
 450 #endif // SHARE_VM_SERVICES_VIRTUAL_MEMORY_TRACKER_HPP