1 /*
   2  * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
  26 #define SHARE_VM_SERVICES_MEM_PTR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "runtime/atomic.hpp"
  30 #include "runtime/os.hpp"
  31 #include "runtime/safepoint.hpp"
  32 
  33 /*
  34  * global sequence generator that generates sequence numbers to serialize
  35  * memory records.
  36  */
  37 class SequenceGenerator : AllStatic {
  38  public:
  39   static jint next();
  40 
  41   // peek last sequence number
  42   static jint peek() {
  43     return _seq_number;
  44   }
  45 
  46   // reset sequence number
  47   static void reset() {
  48     assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  49     _seq_number = 1;
  50     _generation ++;
  51   };
  52 
  53   static unsigned long current_generation() { return _generation; }
  54   NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
  55 
  56  private:
  57   static volatile jint             _seq_number;
  58   static volatile unsigned long    _generation;
  59   NOT_PRODUCT(static jint          _max_seq_number; )
  60 };
  61 
  62 /*
  63  * followings are the classes that are used to hold memory activity records in different stages.
  64  *   MemPointer
  65  *     |--------MemPointerRecord
  66  *                     |
  67  *                     |----MemPointerRecordEx
  68  *                     |           |
  69  *                     |           |-------SeqMemPointerRecordEx
  70  *                     |
  71  *                     |----SeqMemPointerRecord
  72  *                     |
  73  *                     |----VMMemRegion
  74  *                               |
  75  *                               |-----VMMemRegionEx
  76  *
  77  *
  78  *  prefix 'Seq' - sequenced, the record contains a sequence number
  79  *  surfix 'Ex'  - extension, the record contains a caller's pc
  80  *
  81  *  per-thread recorder : SeqMemPointerRecord(Ex)
  82  *  snapshot staging    : SeqMemPointerRecord(Ex)
  83  *  snapshot            : MemPointerRecord(Ex) and VMMemRegion(Ex)
  84  *
  85  */
  86 
  87 /*
  88  * class that wraps an address to a memory block,
  89  * the memory pointer either points to a malloc'd
  90  * memory block, or a mmap'd memory block
  91  */
  92 class MemPointer : public _ValueObj {
  93  public:
  94   MemPointer(): _addr(0) { }
  95   MemPointer(address addr): _addr(addr) { }
  96 
  97   MemPointer(const MemPointer& copy_from) {
  98     _addr = copy_from.addr();
  99   }
 100 
 101   inline address addr() const {
 102     return _addr;
 103   }
 104 
 105   inline operator address() const {
 106     return addr();
 107   }
 108 
 109   inline bool operator == (const MemPointer& other) const {
 110     return addr() == other.addr();
 111   }
 112 
 113   inline MemPointer& operator = (const MemPointer& other) {
 114     _addr = other.addr();
 115     return *this;
 116   }
 117 
 118  protected:
 119   inline void set_addr(address addr) { _addr = addr; }
 120 
 121  protected:
 122   // memory address
 123   address    _addr;
 124 };
 125 
 126 /* MemPointerRecord records an activityand associated
 127  * attributes on a memory block.
 128  */
 129 class MemPointerRecord : public MemPointer {
 130  private:
 131   MEMFLAGS       _flags;
 132   size_t         _size;
 133 
 134 public:
 135   /* extension of MemoryType enum
 136    * see share/vm/memory/allocation.hpp for details.
 137    *
 138    * The tag values are associated to sorting orders, so be
 139    * careful if changes are needed.
 140    * The allocation records should be sorted ahead of tagging
 141    * records, which in turn ahead of deallocation records
 142    */
 143   enum MemPointerTags {
 144     tag_alloc            = 0x0001, // malloc or reserve record
 145     tag_commit           = 0x0002, // commit record
 146     tag_type             = 0x0003, // tag virtual memory to a memory type
 147     tag_uncommit         = 0x0004, // uncommit record
 148     tag_release          = 0x0005, // free or release record
 149     tag_size             = 0x0006, // arena size
 150     tag_masks            = 0x0007, // all tag bits
 151     vmBit                = 0x0008
 152   };
 153 
 154   /* helper functions to interpret the tagging flags */
 155 
 156   inline static bool is_allocation_record(MEMFLAGS flags) {
 157     return (flags & tag_masks) == tag_alloc;
 158   }
 159 
 160   inline static bool is_deallocation_record(MEMFLAGS flags) {
 161     return (flags & tag_masks) == tag_release;
 162   }
 163 
 164   inline static bool is_arena_record(MEMFLAGS flags) {
 165     return (flags & (otArena | tag_size)) == otArena;
 166   }
 167 
 168   inline static bool is_arena_memory_record(MEMFLAGS flags) {
 169     return (flags & (otArena | tag_size)) == (otArena | tag_size);
 170   }
 171 
 172   inline static bool is_virtual_memory_record(MEMFLAGS flags) {
 173     return (flags & vmBit) != 0;
 174   }
 175 
 176   inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
 177     return (flags & 0x0F) == (tag_alloc | vmBit);
 178   }
 179 
 180   inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
 181     return (flags & 0x0F) == (tag_commit | vmBit);
 182   }
 183 
 184   inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
 185     return (flags & 0x0F) == (tag_uncommit | vmBit);
 186   }
 187 
 188   inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
 189     return (flags & 0x0F) == (tag_release | vmBit);
 190   }
 191 
 192   inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
 193     return (flags & 0x0F) == (tag_type | vmBit);
 194   }
 195 
 196   /* tagging flags */
 197   inline static MEMFLAGS malloc_tag()                 { return tag_alloc;   }
 198   inline static MEMFLAGS free_tag()                   { return tag_release; }
 199   inline static MEMFLAGS arena_size_tag()             { return tag_size | otArena; }
 200   inline static MEMFLAGS virtual_memory_tag()         { return vmBit; }
 201   inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
 202   inline static MEMFLAGS virtual_memory_commit_tag()  { return (tag_commit | vmBit); }
 203   inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
 204   inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
 205   inline static MEMFLAGS virtual_memory_type_tag()    { return (tag_type | vmBit); }
 206 
 207  public:
 208   MemPointerRecord(): _size(0), _flags(mtNone) { }
 209 
 210   MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
 211       MemPointer(addr), _flags(memflags), _size(size) { }
 212 
 213   MemPointerRecord(const MemPointerRecord& copy_from):
 214     MemPointer(copy_from), _flags(copy_from.flags()),
 215     _size(copy_from.size()) {
 216   }
 217 
 218   /* MemPointerRecord is not sequenced, it always return
 219    * 0 to indicate non-sequenced
 220    */
 221   virtual jint seq() const               { return 0; }
 222 
 223   inline size_t   size()  const          { return _size; }
 224   inline void set_size(size_t size)      { _size = size; }
 225 
 226   inline MEMFLAGS flags() const          { return _flags; }
 227   inline void set_flags(MEMFLAGS flags)  { _flags = flags; }
 228 
 229   MemPointerRecord& operator= (const MemPointerRecord& ptr) {
 230     MemPointer::operator=(ptr);
 231     _flags = ptr.flags();
 232 #ifdef ASSERT
 233     if (IS_ARENA_OBJ(_flags)) {
 234       assert(!is_vm_pointer(), "wrong flags");
 235       assert((_flags & ot_masks) == otArena, "wrong flags");
 236     }
 237 #endif
 238     _size = ptr.size();
 239     return *this;
 240   }
 241 
 242   // if the pointer represents a malloc-ed memory address
 243   inline bool is_malloced_pointer() const {
 244     return !is_vm_pointer();
 245   }
 246 
 247   // if the pointer represents a virtual memory address
 248   inline bool is_vm_pointer() const {
 249     return is_virtual_memory_record(_flags);
 250   }
 251 
 252   // if this record records a 'malloc' or virtual memory
 253   // 'reserve' call
 254   inline bool is_allocation_record() const {
 255     return is_allocation_record(_flags);
 256   }
 257 
 258   // if this record records a size information of an arena
 259   inline bool is_arena_memory_record() const {
 260     return is_arena_memory_record(_flags);
 261   }
 262 
 263   // if this pointer represents an address to an arena object
 264   inline bool is_arena_record() const {
 265     return is_arena_record(_flags);
 266   }
 267 
 268   // if this record represents a size information of specific arena
 269   inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
 270     assert(is_arena_memory_record(), "not size record");
 271     assert(arena_rc->is_arena_record(), "not arena record");
 272     return (arena_rc->addr() + sizeof(void*)) == addr();
 273   }
 274 
 275   // if this record records a 'free' or virtual memory 'free' call
 276   inline bool is_deallocation_record() const {
 277     return is_deallocation_record(_flags);
 278   }
 279 
 280   // if this record records a virtual memory 'commit' call
 281   inline bool is_commit_record() const {
 282     return is_virtual_memory_commit_record(_flags);
 283   }
 284 
 285   // if this record records a virtual memory 'uncommit' call
 286   inline bool is_uncommit_record() const {
 287     return is_virtual_memory_uncommit_record(_flags);
 288   }
 289 
 290   // if this record is a tagging record of a virtual memory block
 291   inline bool is_type_tagging_record() const {
 292     return is_virtual_memory_type_record(_flags);
 293   }
 294 
 295   // if the two memory pointer records actually represent the same
 296   // memory block
 297   inline bool is_same_region(const MemPointerRecord* other) const {
 298     return (addr() == other->addr() && size() == other->size());
 299   }
 300 
 301   // if this memory region fully contains another one
 302   inline bool contains_region(const MemPointerRecord* other) const {
 303     return contains_region(other->addr(), other->size());
 304   }
 305 
 306   // if this memory region fully contains specified memory range
 307   inline bool contains_region(address add, size_t sz) const {
 308     return (addr() <= add && addr() + size() >= add + sz);
 309   }
 310 
 311   inline bool contains_address(address add) const {
 312     return (addr() <= add && addr() + size() > add);
 313   }
 314 
 315   // if this memory region overlaps another region
 316   inline bool overlaps_region(const MemPointerRecord* other) const {
 317     assert(other != NULL, "Just check");
 318     assert(size() > 0 && other->size() > 0, "empty range");
 319     return contains_address(other->addr()) ||
 320            contains_address(other->addr() + other->size() - 1) || // exclude end address
 321            other->contains_address(addr()) ||
 322            other->contains_address(addr() + size() - 1); // exclude end address
 323   }
 324 
 325 };
 326 
 327 // MemPointerRecordEx also records callsite pc, from where
 328 // the memory block is allocated
 329 class MemPointerRecordEx : public MemPointerRecord {
 330  private:
 331   address      _pc;  // callsite pc
 332 
 333  public:
 334   MemPointerRecordEx(): _pc(0) { }
 335 
 336   MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
 337     MemPointerRecord(addr, memflags, size), _pc(pc) {}
 338 
 339   MemPointerRecordEx(const MemPointerRecordEx& copy_from):
 340     MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
 341 
 342   inline address pc() const { return _pc; }
 343 
 344   void init(const MemPointerRecordEx* mpe) {
 345     MemPointerRecord::operator=(*mpe);
 346     _pc = mpe->pc();
 347   }
 348 
 349   void init(const MemPointerRecord* mp) {
 350     MemPointerRecord::operator=(*mp);
 351     _pc = 0;
 352   }
 353 };
 354 
 355 // a virtual memory region. The region can represent a reserved
 356 // virtual memory region or a committed memory region
 357 class VMMemRegion : public MemPointerRecord {
 358 public:
 359   VMMemRegion() { }
 360 
 361   void init(const MemPointerRecord* mp) {
 362     assert(mp->is_vm_pointer(), "Sanity check");
 363     _addr = mp->addr();
 364       set_size(mp->size());
 365     set_flags(mp->flags());
 366   }
 367 
 368   VMMemRegion& operator=(const VMMemRegion& other) {
 369     MemPointerRecord::operator=(other);
 370     return *this;
 371   }
 372 
 373   inline bool is_reserved_region() const {
 374     return is_allocation_record();
 375   }
 376 
 377   inline bool is_committed_region() const {
 378     return is_commit_record();
 379   }
 380 
 381   /* base address of this virtual memory range */
 382   inline address base() const {
 383     return addr();
 384   }
 385 
 386   /* tag this virtual memory range to the specified memory type */
 387   inline void tag(MEMFLAGS f) {
 388     set_flags(flags() | (f & mt_masks));
 389   }
 390 
 391   // expand this region to also cover specified range.
 392   // The range has to be on either end of the memory region.
 393   void expand_region(address addr, size_t sz) {
 394     if (addr < base()) {
 395       assert(addr + sz == base(), "Sanity check");
 396       _addr = addr;
 397       set_size(size() + sz);
 398     } else {
 399       assert(base() + size() == addr, "Sanity check");
 400       set_size(size() + sz);
 401     }
 402   }
 403 
 404   // exclude the specified address range from this region.
 405   // The excluded memory range has to be on either end of this memory
 406   // region.
 407   inline void exclude_region(address add, size_t sz) {
 408     assert(is_reserved_region() || is_committed_region(), "Sanity check");
 409     assert(addr() != NULL && size() != 0, "Sanity check");
 410     assert(add >= addr() && add < addr() + size(), "Sanity check");
 411     assert(add == addr() || (add + sz) == (addr() + size()),
 412       "exclude in the middle");
 413     if (add == addr()) {
 414       set_addr(add + sz);
 415       set_size(size() - sz);
 416     } else {
 417       set_size(size() - sz);
 418     }
 419   }
 420 };
 421 
 422 class VMMemRegionEx : public VMMemRegion {
 423  private:
 424   jint   _seq;  // sequence number
 425 
 426  public:
 427   VMMemRegionEx(): _pc(0) { }
 428 
 429   void init(const MemPointerRecordEx* mpe) {
 430     VMMemRegion::init(mpe);
 431     _pc = mpe->pc();
 432   }
 433 
 434   void init(const MemPointerRecord* mpe) {
 435     VMMemRegion::init(mpe);
 436     _pc = 0;
 437   }
 438 
 439   VMMemRegionEx& operator=(const VMMemRegionEx& other) {
 440     VMMemRegion::operator=(other);
 441     _pc = other.pc();
 442     return *this;
 443   }
 444 
 445   inline address pc() const { return _pc; }
 446  private:
 447   address   _pc;
 448 };
 449 
 450 /*
 451  * Sequenced memory record
 452  */
 453 class SeqMemPointerRecord : public MemPointerRecord {
 454  private:
 455    jint _seq;  // sequence number
 456 
 457  public:
 458   SeqMemPointerRecord(): _seq(0){ }
 459 
 460   SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size)
 461     : MemPointerRecord(addr, flags, size) {
 462     _seq = SequenceGenerator::next();
 463   }
 464 
 465   SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
 466     : MemPointerRecord(copy_from) {
 467     _seq = copy_from.seq();
 468   }
 469 
 470   SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
 471     MemPointerRecord::operator=(ptr);
 472     _seq = ptr.seq();
 473     return *this;
 474   }
 475 
 476   inline jint seq() const {
 477     return _seq;
 478   }
 479 };
 480 
 481 
 482 
 483 class SeqMemPointerRecordEx : public MemPointerRecordEx {
 484  private:
 485   jint    _seq;  // sequence number
 486 
 487  public:
 488   SeqMemPointerRecordEx(): _seq(0) { }
 489 
 490   SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
 491     address pc): MemPointerRecordEx(addr, flags, size, pc) {
 492     _seq = SequenceGenerator::next();
 493   }
 494 
 495   SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
 496     : MemPointerRecordEx(copy_from) {
 497     _seq = copy_from.seq();
 498   }
 499 
 500   SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
 501     MemPointerRecordEx::operator=(ptr);
 502     _seq = ptr.seq();
 503     return *this;
 504   }
 505 
 506   inline jint seq() const {
 507     return _seq;
 508   }
 509 };
 510 
 511 #endif // SHARE_VM_SERVICES_MEM_PTR_HPP