1 /*
   2  * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_SERVICES_MEM_PTR_HPP
  26 #define SHARE_VM_SERVICES_MEM_PTR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "runtime/os.hpp"
  30 #include "runtime/safepoint.hpp"
  31 
  32 /*
  33  * global sequence generator that generates sequence numbers to serialize
  34  * memory records.
  35  */
  36 class SequenceGenerator : AllStatic {
  37  public:
  38   static jint next();
  39 
  40   // peek last sequence number
  41   static jint peek() {
  42     return _seq_number;
  43   }
  44 
  45   // reset sequence number
  46   static void reset() {
  47     assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
  48     _seq_number = 1;
  49     _generation ++;
  50   };
  51 
  52   static unsigned long current_generation() { return _generation; }
  53   NOT_PRODUCT(static jint max_seq_num() { return _max_seq_number; })
  54 
  55  private:
  56   static volatile jint             _seq_number;
  57   static volatile unsigned long    _generation;
  58   NOT_PRODUCT(static jint          _max_seq_number; )
  59 };
  60 
  61 /*
  62  * followings are the classes that are used to hold memory activity records in different stages.
  63  *   MemPointer
  64  *     |--------MemPointerRecord
  65  *                     |
  66  *                     |----MemPointerRecordEx
  67  *                     |           |
  68  *                     |           |-------SeqMemPointerRecordEx
  69  *                     |
  70  *                     |----SeqMemPointerRecord
  71  *                     |
  72  *                     |----VMMemRegion
  73  *                               |
  74  *                               |-----VMMemRegionEx
  75  *
  76  *
  77  *  prefix 'Seq' - sequenced, the record contains a sequence number
  78  *  surfix 'Ex'  - extension, the record contains a caller's pc
  79  *
  80  *  per-thread recorder : SeqMemPointerRecord(Ex)
  81  *  snapshot staging    : SeqMemPointerRecord(Ex)
  82  *  snapshot            : MemPointerRecord(Ex) and VMMemRegion(Ex)
  83  *
  84  */
  85 
  86 /*
  87  * class that wraps an address to a memory block,
  88  * the memory pointer either points to a malloc'd
  89  * memory block, or a mmap'd memory block
  90  */
  91 class MemPointer VALUE_OBJ_CLASS_SPEC {
  92  public:
  93   MemPointer(): _addr(0) { }
  94   MemPointer(address addr): _addr(addr) { }
  95 
  96   MemPointer(const MemPointer& copy_from) {
  97     _addr = copy_from.addr();
  98   }
  99 
 100   inline address addr() const {
 101     return _addr;
 102   }
 103 
 104   inline operator address() const {
 105     return addr();
 106   }
 107 
 108   inline bool operator == (const MemPointer& other) const {
 109     return addr() == other.addr();
 110   }
 111 
 112   inline MemPointer& operator = (const MemPointer& other) {
 113     _addr = other.addr();
 114     return *this;
 115   }
 116 
 117  protected:
 118   inline void set_addr(address addr) { _addr = addr; }
 119 
 120  protected:
 121   // memory address
 122   address    _addr;
 123 };
 124 
 125 /* MemPointerRecord records an activityand associated
 126  * attributes on a memory block.
 127  */
 128 class MemPointerRecord : public MemPointer {
 129  private:
 130   MEMFLAGS       _flags;
 131   size_t         _size;
 132 
 133 public:
 134   /* extension of MemoryType enum
 135    * see share/vm/memory/allocation.hpp for details.
 136    *
 137    * The tag values are associated to sorting orders, so be
 138    * careful if changes are needed.
 139    * The allocation records should be sorted ahead of tagging
 140    * records, which in turn ahead of deallocation records
 141    */
 142   enum MemPointerTags {
 143     tag_alloc            = 0x0001, // malloc or reserve record
 144     tag_commit           = 0x0002, // commit record
 145     tag_type             = 0x0003, // tag virtual memory to a memory type
 146     tag_uncommit         = 0x0004, // uncommit record
 147     tag_release          = 0x0005, // free or release record
 148     tag_size             = 0x0006, // arena size
 149     tag_masks            = 0x0007, // all tag bits
 150     vmBit                = 0x0008
 151   };
 152 
 153   /* helper functions to interpret the tagging flags */
 154 
 155   inline static bool is_allocation_record(MEMFLAGS flags) {
 156     return (flags & tag_masks) == tag_alloc;
 157   }
 158 
 159   inline static bool is_deallocation_record(MEMFLAGS flags) {
 160     return (flags & tag_masks) == tag_release;
 161   }
 162 
 163   inline static bool is_arena_record(MEMFLAGS flags) {
 164     return (flags & (otArena | tag_size)) == otArena;
 165   }
 166 
 167   inline static bool is_arena_memory_record(MEMFLAGS flags) {
 168     return (flags & (otArena | tag_size)) == (otArena | tag_size);
 169   }
 170 
 171   inline static bool is_virtual_memory_record(MEMFLAGS flags) {
 172     return (flags & vmBit) != 0;
 173   }
 174 
 175   inline static bool is_virtual_memory_reserve_record(MEMFLAGS flags) {
 176     return (flags & 0x0F) == (tag_alloc | vmBit);
 177   }
 178 
 179   inline static bool is_virtual_memory_commit_record(MEMFLAGS flags) {
 180     return (flags & 0x0F) == (tag_commit | vmBit);
 181   }
 182 
 183   inline static bool is_virtual_memory_uncommit_record(MEMFLAGS flags) {
 184     return (flags & 0x0F) == (tag_uncommit | vmBit);
 185   }
 186 
 187   inline static bool is_virtual_memory_release_record(MEMFLAGS flags) {
 188     return (flags & 0x0F) == (tag_release | vmBit);
 189   }
 190 
 191   inline static bool is_virtual_memory_type_record(MEMFLAGS flags) {
 192     return (flags & 0x0F) == (tag_type | vmBit);
 193   }
 194 
 195   /* tagging flags */
 196   inline static MEMFLAGS malloc_tag()                 { return tag_alloc;   }
 197   inline static MEMFLAGS free_tag()                   { return tag_release; }
 198   inline static MEMFLAGS arena_size_tag()             { return tag_size | otArena; }
 199   inline static MEMFLAGS virtual_memory_tag()         { return vmBit; }
 200   inline static MEMFLAGS virtual_memory_reserve_tag() { return (tag_alloc | vmBit); }
 201   inline static MEMFLAGS virtual_memory_commit_tag()  { return (tag_commit | vmBit); }
 202   inline static MEMFLAGS virtual_memory_uncommit_tag(){ return (tag_uncommit | vmBit); }
 203   inline static MEMFLAGS virtual_memory_release_tag() { return (tag_release | vmBit); }
 204   inline static MEMFLAGS virtual_memory_type_tag()    { return (tag_type | vmBit); }
 205 
 206  public:
 207   MemPointerRecord(): _size(0), _flags(mtNone) { }
 208 
 209   MemPointerRecord(address addr, MEMFLAGS memflags, size_t size = 0):
 210       MemPointer(addr), _flags(memflags), _size(size) { }
 211 
 212   MemPointerRecord(const MemPointerRecord& copy_from):
 213     MemPointer(copy_from), _flags(copy_from.flags()),
 214     _size(copy_from.size()) {
 215   }
 216 
 217   /* MemPointerRecord is not sequenced, it always return
 218    * 0 to indicate non-sequenced
 219    */
 220   virtual jint seq() const               { return 0; }
 221 
 222   inline size_t   size()  const          { return _size; }
 223   inline void set_size(size_t size)      { _size = size; }
 224 
 225   inline MEMFLAGS flags() const          { return _flags; }
 226   inline void set_flags(MEMFLAGS flags)  { _flags = flags; }
 227 
 228   MemPointerRecord& operator= (const MemPointerRecord& ptr) {
 229     MemPointer::operator=(ptr);
 230     _flags = ptr.flags();
 231 #ifdef ASSERT
 232     if (IS_ARENA_OBJ(_flags)) {
 233       assert(!is_vm_pointer(), "wrong flags");
 234       assert((_flags & ot_masks) == otArena, "wrong flags");
 235     }
 236 #endif
 237     _size = ptr.size();
 238     return *this;
 239   }
 240 
 241   // if the pointer represents a malloc-ed memory address
 242   inline bool is_malloced_pointer() const {
 243     return !is_vm_pointer();
 244   }
 245 
 246   // if the pointer represents a virtual memory address
 247   inline bool is_vm_pointer() const {
 248     return is_virtual_memory_record(_flags);
 249   }
 250 
 251   // if this record records a 'malloc' or virtual memory
 252   // 'reserve' call
 253   inline bool is_allocation_record() const {
 254     return is_allocation_record(_flags);
 255   }
 256 
 257   // if this record records a size information of an arena
 258   inline bool is_arena_memory_record() const {
 259     return is_arena_memory_record(_flags);
 260   }
 261 
 262   // if this pointer represents an address to an arena object
 263   inline bool is_arena_record() const {
 264     return is_arena_record(_flags);
 265   }
 266 
 267   // if this record represents a size information of specific arena
 268   inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
 269     assert(is_arena_memory_record(), "not size record");
 270     assert(arena_rc->is_arena_record(), "not arena record");
 271     return (arena_rc->addr() + sizeof(void*)) == addr();
 272   }
 273 
 274   // if this record records a 'free' or virtual memory 'free' call
 275   inline bool is_deallocation_record() const {
 276     return is_deallocation_record(_flags);
 277   }
 278 
 279   // if this record records a virtual memory 'commit' call
 280   inline bool is_commit_record() const {
 281     return is_virtual_memory_commit_record(_flags);
 282   }
 283 
 284   // if this record records a virtual memory 'uncommit' call
 285   inline bool is_uncommit_record() const {
 286     return is_virtual_memory_uncommit_record(_flags);
 287   }
 288 
 289   // if this record is a tagging record of a virtual memory block
 290   inline bool is_type_tagging_record() const {
 291     return is_virtual_memory_type_record(_flags);
 292   }
 293 
 294   // if the two memory pointer records actually represent the same
 295   // memory block
 296   inline bool is_same_region(const MemPointerRecord* other) const {
 297     return (addr() == other->addr() && size() == other->size());
 298   }
 299 
 300   // if this memory region fully contains another one
 301   inline bool contains_region(const MemPointerRecord* other) const {
 302     return contains_region(other->addr(), other->size());
 303   }
 304 
 305   // if this memory region fully contains specified memory range
 306   inline bool contains_region(address add, size_t sz) const {
 307     return (addr() <= add && addr() + size() >= add + sz);
 308   }
 309 
 310   inline bool contains_address(address add) const {
 311     return (addr() <= add && addr() + size() > add);
 312   }
 313 
 314   // if this memory region overlaps another region
 315   inline bool overlaps_region(const MemPointerRecord* other) const {
 316     assert(other != NULL, "Just check");
 317     assert(size() > 0 && other->size() > 0, "empty range");
 318     return contains_address(other->addr()) ||
 319            contains_address(other->addr() + other->size() - 1) || // exclude end address
 320            other->contains_address(addr()) ||
 321            other->contains_address(addr() + size() - 1); // exclude end address
 322   }
 323 
 324 };
 325 
 326 // MemPointerRecordEx also records callsite pc, from where
 327 // the memory block is allocated
 328 class MemPointerRecordEx : public MemPointerRecord {
 329  private:
 330   address      _pc;  // callsite pc
 331 
 332  public:
 333   MemPointerRecordEx(): _pc(0) { }
 334 
 335   MemPointerRecordEx(address addr, MEMFLAGS memflags, size_t size = 0, address pc = 0):
 336     MemPointerRecord(addr, memflags, size), _pc(pc) {}
 337 
 338   MemPointerRecordEx(const MemPointerRecordEx& copy_from):
 339     MemPointerRecord(copy_from), _pc(copy_from.pc()) {}
 340 
 341   inline address pc() const { return _pc; }
 342 
 343   void init(const MemPointerRecordEx* mpe) {
 344     MemPointerRecord::operator=(*mpe);
 345     _pc = mpe->pc();
 346   }
 347 
 348   void init(const MemPointerRecord* mp) {
 349     MemPointerRecord::operator=(*mp);
 350     _pc = 0;
 351   }
 352 };
 353 
 354 // a virtual memory region. The region can represent a reserved
 355 // virtual memory region or a committed memory region
 356 class VMMemRegion : public MemPointerRecord {
 357 public:
 358   VMMemRegion() { }
 359 
 360   void init(const MemPointerRecord* mp) {
 361     assert(mp->is_vm_pointer(), "Sanity check");
 362     _addr = mp->addr();
 363       set_size(mp->size());
 364     set_flags(mp->flags());
 365   }
 366 
 367   VMMemRegion& operator=(const VMMemRegion& other) {
 368     MemPointerRecord::operator=(other);
 369     return *this;
 370   }
 371 
 372   inline bool is_reserved_region() const {
 373     return is_allocation_record();
 374   }
 375 
 376   inline bool is_committed_region() const {
 377     return is_commit_record();
 378   }
 379 
 380   /* base address of this virtual memory range */
 381   inline address base() const {
 382     return addr();
 383   }
 384 
 385   /* tag this virtual memory range to the specified memory type */
 386   inline void tag(MEMFLAGS f) {
 387     set_flags(flags() | (f & mt_masks));
 388   }
 389 
 390   // expand this region to also cover specified range.
 391   // The range has to be on either end of the memory region.
 392   void expand_region(address addr, size_t sz) {
 393     if (addr < base()) {
 394       assert(addr + sz == base(), "Sanity check");
 395       _addr = addr;
 396       set_size(size() + sz);
 397     } else {
 398       assert(base() + size() == addr, "Sanity check");
 399       set_size(size() + sz);
 400     }
 401   }
 402 
 403   // exclude the specified address range from this region.
 404   // The excluded memory range has to be on either end of this memory
 405   // region.
 406   inline void exclude_region(address add, size_t sz) {
 407     assert(is_reserved_region() || is_committed_region(), "Sanity check");
 408     assert(addr() != NULL && size() != 0, "Sanity check");
 409     assert(add >= addr() && add < addr() + size(), "Sanity check");
 410     assert(add == addr() || (add + sz) == (addr() + size()),
 411       "exclude in the middle");
 412     if (add == addr()) {
 413       set_addr(add + sz);
 414       set_size(size() - sz);
 415     } else {
 416       set_size(size() - sz);
 417     }
 418   }
 419 };
 420 
 421 class VMMemRegionEx : public VMMemRegion {
 422  private:
 423   jint   _seq;  // sequence number
 424 
 425  public:
 426   VMMemRegionEx(): _pc(0) { }
 427 
 428   void init(const MemPointerRecordEx* mpe) {
 429     VMMemRegion::init(mpe);
 430     _pc = mpe->pc();
 431   }
 432 
 433   void init(const MemPointerRecord* mpe) {
 434     VMMemRegion::init(mpe);
 435     _pc = 0;
 436   }
 437 
 438   VMMemRegionEx& operator=(const VMMemRegionEx& other) {
 439     VMMemRegion::operator=(other);
 440     _pc = other.pc();
 441     return *this;
 442   }
 443 
 444   inline address pc() const { return _pc; }
 445  private:
 446   address   _pc;
 447 };
 448 
 449 /*
 450  * Sequenced memory record
 451  */
 452 class SeqMemPointerRecord : public MemPointerRecord {
 453  private:
 454    jint _seq;  // sequence number
 455 
 456  public:
 457   SeqMemPointerRecord(): _seq(0){ }
 458 
 459   SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
 460     : MemPointerRecord(addr, flags, size), _seq(seq)  {
 461   }
 462 
 463   SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
 464     : MemPointerRecord(copy_from) {
 465     _seq = copy_from.seq();
 466   }
 467 
 468   SeqMemPointerRecord& operator= (const SeqMemPointerRecord& ptr) {
 469     MemPointerRecord::operator=(ptr);
 470     _seq = ptr.seq();
 471     return *this;
 472   }
 473 
 474   inline jint seq() const {
 475     return _seq;
 476   }
 477 };
 478 
 479 
 480 
 481 class SeqMemPointerRecordEx : public MemPointerRecordEx {
 482  private:
 483   jint    _seq;  // sequence number
 484 
 485  public:
 486   SeqMemPointerRecordEx(): _seq(0) { }
 487 
 488   SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
 489     jint seq, address pc):
 490     MemPointerRecordEx(addr, flags, size, pc), _seq(seq)  {
 491   }
 492 
 493   SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
 494     : MemPointerRecordEx(copy_from) {
 495     _seq = copy_from.seq();
 496   }
 497 
 498   SeqMemPointerRecordEx& operator= (const SeqMemPointerRecordEx& ptr) {
 499     MemPointerRecordEx::operator=(ptr);
 500     _seq = ptr.seq();
 501     return *this;
 502   }
 503 
 504   inline jint seq() const {
 505     return _seq;
 506   }
 507 };
 508 
 509 #endif // SHARE_VM_SERVICES_MEM_PTR_HPP