< prev index next >

src/hotspot/share/services/virtualMemoryTracker.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 






  41 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  42 
  43 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  44   return r1.compare(r2);
  45 }
  46 
  47 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  48   return r1.compare(r2);
  49 }
  50 
  51 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
  52   assert(addr != NULL, "Invalid address");
  53   assert(size > 0, "Invalid size");
  54   assert(contain_region(addr, size), "Not contain this region");
  55 
  56   if (all_committed()) return true;
  57 
  58   CommittedMemoryRegion committed_rgn(addr, size, stack);
  59   LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
  60 
  61   while (node != NULL) {
  62     CommittedMemoryRegion* rgn = node->data();
  63     if (rgn->same_region(addr, size)) {
  64       return true;
  65     }
  66 
  67     if (rgn->adjacent_to(addr, size)) {
  68       // special case to expand prior region if there is no next region
  69       LinkedListNode<CommittedMemoryRegion>* next = node->next();
  70       if (next == NULL && rgn->call_stack()->equals(stack)) {
  71         VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
  72         // the two adjacent regions have the same call stack, merge them
  73         rgn->expand_region(addr, size);
  74         VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
  75         return true;
  76       }
  77       }
  78 
  79     if (rgn->overlap_region(addr, size)) {
  80       // Clear a space for this region in the case it overlaps with any regions.
  81       remove_uncommitted_region(addr, size);
  82       break;  // commit below




  83     }



  84     if (rgn->end() >= addr + size){
  85       break;
  86     }
  87     node = node->next();
  88   }
  89 
  90     // New committed region
  91     VirtualMemorySummary::record_committed_memory(size, flag());
  92     return add_committed_region(committed_rgn);
  93   }
  94 
  95 void ReservedMemoryRegion::set_all_committed(bool b) {
  96   if (all_committed() != b) {
  97     _all_committed = b;
  98     if (b) {
  99       VirtualMemorySummary::record_committed_memory(size(), flag());
 100     }
 101   }
 102 }
 103 
 104 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
 105   address addr, size_t size) {
 106   assert(addr != NULL, "Invalid address");
 107   assert(size > 0, "Invalid size");
 108 
 109   CommittedMemoryRegion* rgn = node->data();
 110   assert(rgn->contain_region(addr, size), "Has to be contained");
 111   assert(!rgn->same_region(addr, size), "Can not be the same region");
 112 
 113   if (rgn->base() == addr ||


 262     size_t committed = 0;
 263     LinkedListNode<CommittedMemoryRegion>* head =
 264       _committed_regions.head();
 265     while (head != NULL) {
 266       committed += head->data()->size();
 267       head = head->next();
 268     }
 269     return committed;
 270   }
 271 }
 272 
 273 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 274   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 275   if (flag() != f) {
 276     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 277     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 278     _flag = f;
 279   }
 280 }
 281 



















 282 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 283   if (level >= NMT_summary) {
 284     VirtualMemorySummary::initialize();
 285   }
 286   return true;
 287 }
 288 
 289 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 290   if (level >= NMT_summary) {
 291     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 292       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 293     return (_reserved_regions != NULL);
 294   }
 295   return true;
 296 }
 297 
 298 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 299    const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
 300   assert(base_addr != NULL, "Invalid address");
 301   assert(size > 0, "Invalid size");


 442       return true;
 443     } else {
 444       address top = reserved_rgn->end();
 445       address high_base = addr + size;
 446       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 447         *reserved_rgn->call_stack(), reserved_rgn->flag());
 448 
 449       // use original region for lower region
 450       reserved_rgn->exclude_region(addr, top - addr);
 451       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 452       if (new_rgn == NULL) {
 453         return false;
 454       } else {
 455         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 456         return true;
 457       }
 458     }
 459   }
 460 }
 461 



























 462 
 463 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 464   assert(_reserved_regions != NULL, "Sanity check");
 465   ThreadCritical tc;
 466   // Check that the _reserved_regions haven't been deleted.
 467   if (_reserved_regions != NULL) {
 468     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 469     while (head != NULL) {
 470       const ReservedMemoryRegion* rgn = head->peek();
 471       if (!walker->do_allocation_site(rgn)) {
 472         return false;
 473       }
 474       head = head->next();
 475     }
 476    }
 477   return true;
 478 }
 479 
 480 // Transition virtual memory tracking level.
 481 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {




  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 
  41 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
  42   // Snapshot current thread stacks
  43   VirtualMemoryTracker::snapshot_thread_stacks();
  44   as_snapshot()->copy_to(s);
  45 }
  46 
  47 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  48 
  49 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  50   return r1.compare(r2);
  51 }
  52 
  53 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  54   return r1.compare(r2);
  55 }
  56 
  57 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
  58   assert(addr != NULL, "Invalid address");
  59   assert(size > 0, "Invalid size");
  60   assert(contain_region(addr, size), "Not contain this region");
  61 
  62   if (all_committed()) return true;
  63 
  64   CommittedMemoryRegion committed_rgn(addr, size, stack);
  65   LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
  66 
  67   while (node != NULL) {
  68     CommittedMemoryRegion* rgn = node->data();
  69     if (rgn->same_region(addr, size)) {
  70       return true;
  71     }
  72 
  73     if (rgn->adjacent_to(addr, size)) {
  74       // special case to expand prior region if there is no next region
  75       LinkedListNode<CommittedMemoryRegion>* next = node->next();
  76       if (next == NULL && rgn->call_stack()->equals(stack)) {
  77         VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
  78         // the two adjacent regions have the same call stack, merge them
  79         rgn->expand_region(addr, size);
  80         VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
  81         return true;
  82       }
  83     }
  84 
  85     if (rgn->overlap_region(addr, size)) {
  86       if (addr < rgn->base()) {
  87         rgn->expand_region(addr, (rgn->base() - addr));
  88       }
  89 
  90       if (addr + size > rgn->base() + rgn->size()) {
  91         rgn->expand_region(rgn->base() + rgn->size(),
  92                            (addr + size) - (rgn->base() + rgn->size()));
  93       }
  94       return true;
  95     }
  96 
  97     if (rgn->end() >= addr + size){
  98       break;
  99     }
 100     node = node->next();
 101   }
 102 
 103   // New committed region
 104   VirtualMemorySummary::record_committed_memory(size, flag());
 105   return add_committed_region(committed_rgn);
 106 }
 107 
 108 void ReservedMemoryRegion::set_all_committed(bool b) {
 109   if (all_committed() != b) {
 110     _all_committed = b;
 111     if (b) {
 112       VirtualMemorySummary::record_committed_memory(size(), flag());
 113     }
 114   }
 115 }
 116 
 117 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
 118   address addr, size_t size) {
 119   assert(addr != NULL, "Invalid address");
 120   assert(size > 0, "Invalid size");
 121 
 122   CommittedMemoryRegion* rgn = node->data();
 123   assert(rgn->contain_region(addr, size), "Has to be contained");
 124   assert(!rgn->same_region(addr, size), "Can not be the same region");
 125 
 126   if (rgn->base() == addr ||


 275     size_t committed = 0;
 276     LinkedListNode<CommittedMemoryRegion>* head =
 277       _committed_regions.head();
 278     while (head != NULL) {
 279       committed += head->data()->size();
 280       head = head->next();
 281     }
 282     return committed;
 283   }
 284 }
 285 
 286 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 287   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 288   if (flag() != f) {
 289     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 290     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 291     _flag = f;
 292   }
 293 }
 294 
 295 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
 296   assert(flag() == mtThreadStack, "Only for thread stack");
 297   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
 298   address bottom = base();
 299   address top = base() + size();
 300   while (head != NULL) {
 301     address committed_top = head->data()->base() + head->data()->size();
 302     if (committed_top < top) {
 303       // committed stack guard pages, skip them
 304       bottom = head->data()->base() + head->data()->size();
 305       head = head->next();
 306     } else {
 307       break;
 308     }
 309   }
 310 
 311   return bottom;  
 312 }
 313 
 314 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 315   if (level >= NMT_summary) {
 316     VirtualMemorySummary::initialize();
 317   }
 318   return true;
 319 }
 320 
 321 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 322   if (level >= NMT_summary) {
 323     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 324       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 325     return (_reserved_regions != NULL);
 326   }
 327   return true;
 328 }
 329 
 330 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 331    const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
 332   assert(base_addr != NULL, "Invalid address");
 333   assert(size > 0, "Invalid size");


 474       return true;
 475     } else {
 476       address top = reserved_rgn->end();
 477       address high_base = addr + size;
 478       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 479         *reserved_rgn->call_stack(), reserved_rgn->flag());
 480 
 481       // use original region for lower region
 482       reserved_rgn->exclude_region(addr, top - addr);
 483       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 484       if (new_rgn == NULL) {
 485         return false;
 486       } else {
 487         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 488         return true;
 489       }
 490     }
 491   }
 492 }
 493 
 494 // Walk all known thread stacks, snapshot their committed ranges.
 495 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
 496 public:
 497   SnapshotThreadStackWalker() {}
 498 
 499   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
 500     if (rgn->flag() == mtThreadStack) {
 501       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
 502       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
 503 
 504       size_t committed_size = os::committed_stack_size(stack_bottom, stack_size);
 505       if (committed_size > 0) {
 506         ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
 507         NativeCallStack ncs; // empty stack
 508 
 509         // Stack grows downward
 510         region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs);
 511       }
 512     }
 513     return true;
 514   }
 515 };
 516 
 517 void VirtualMemoryTracker::snapshot_thread_stacks() {
 518   SnapshotThreadStackWalker walker;
 519   walk_virtual_memory(&walker);
 520 }
 521 
 522 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 523   assert(_reserved_regions != NULL, "Sanity check");
 524   ThreadCritical tc;
 525   // Check that the _reserved_regions haven't been deleted.
 526   if (_reserved_regions != NULL) {
 527     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 528     while (head != NULL) {
 529       const ReservedMemoryRegion* rgn = head->peek();
 530       if (!walker->do_allocation_site(rgn)) {
 531         return false;
 532       }
 533       head = head->next();
 534     }
 535    }
 536   return true;
 537 }
 538 
 539 // Transition virtual memory tracking level.
 540 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {


< prev index next >