< prev index next >

src/hotspot/share/services/virtualMemoryTracker.cpp

Print this page




  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 






  41 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  42 
  43 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  44   return r1.compare(r2);
  45 }
  46 
  47 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  48   return r1.compare(r2);
  49 }
  50 
  51 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  52   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
  53 }
  54 
  55 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  56   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
  57   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
  58 }
  59 
  60 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {


 269 size_t ReservedMemoryRegion::committed_size() const {
 270   size_t committed = 0;
 271   LinkedListNode<CommittedMemoryRegion>* head =
 272     _committed_regions.head();
 273   while (head != NULL) {
 274     committed += head->data()->size();
 275     head = head->next();
 276   }
 277   return committed;
 278 }
 279 
 280 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 281   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 282   if (flag() != f) {
 283     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 284     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 285     _flag = f;
 286   }
 287 }
 288 




















 289 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 290   if (level >= NMT_summary) {
 291     VirtualMemorySummary::initialize();
 292   }
 293   return true;
 294 }
 295 
 296 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 297   if (level >= NMT_summary) {
 298     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 299       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 300     return (_reserved_regions != NULL);
 301   }
 302   return true;
 303 }
 304 
 305 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 306     const NativeCallStack& stack, MEMFLAGS flag) {
 307   assert(base_addr != NULL, "Invalid address");
 308   assert(size > 0, "Invalid size");


 443       return true;
 444     } else {
 445       address top = reserved_rgn->end();
 446       address high_base = addr + size;
 447       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 448         *reserved_rgn->call_stack(), reserved_rgn->flag());
 449 
 450       // use original region for lower region
 451       reserved_rgn->exclude_region(addr, top - addr);
 452       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 453       if (new_rgn == NULL) {
 454         return false;
 455       } else {
 456         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 457         return true;
 458       }
 459     }
 460   }
 461 }
 462 


























 463 
 464 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 465   assert(_reserved_regions != NULL, "Sanity check");
 466   ThreadCritical tc;
 467   // Check that the _reserved_regions haven't been deleted.
 468   if (_reserved_regions != NULL) {
 469     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 470     while (head != NULL) {
 471       const ReservedMemoryRegion* rgn = head->peek();
 472       if (!walker->do_allocation_site(rgn)) {
 473         return false;
 474       }
 475       head = head->next();
 476     }
 477    }
 478   return true;
 479 }
 480 
 481 // Transition virtual memory tracking level.
 482 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {




  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 
  41 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
  42   // Snapshot current thread stacks
  43   VirtualMemoryTracker::snapshot_thread_stacks();
  44   as_snapshot()->copy_to(s);
  45 }
  46 
  47 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  48 
  49 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  50   return r1.compare(r2);
  51 }
  52 
  53 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  54   return r1.compare(r2);
  55 }
  56 
  57 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  58   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
  59 }
  60 
  61 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  62   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
  63   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
  64 }
  65 
  66 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {


 275 size_t ReservedMemoryRegion::committed_size() const {
 276   size_t committed = 0;
 277   LinkedListNode<CommittedMemoryRegion>* head =
 278     _committed_regions.head();
 279   while (head != NULL) {
 280     committed += head->data()->size();
 281     head = head->next();
 282   }
 283   return committed;
 284 }
 285 
 286 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 287   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 288   if (flag() != f) {
 289     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 290     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 291     _flag = f;
 292   }
 293 }
 294 
 295 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
 296   assert(flag() == mtThreadStack, "Only for thread stack");
 297   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
 298   address bottom = base();
 299   address top = base() + size();
 300   while (head != NULL) {
 301     address committed_top = head->data()->base() + head->data()->size();
 302     if (committed_top < top) {
 303       // committed stack guard pages, skip them
 304       bottom = head->data()->base() + head->data()->size();
 305       head = head->next();
 306     } else {
 307       assert(top == committed_top, "Sanity");
 308       break;
 309     }
 310   }
 311 
 312   return bottom;
 313 }
 314 
 315 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 316   if (level >= NMT_summary) {
 317     VirtualMemorySummary::initialize();
 318   }
 319   return true;
 320 }
 321 
 322 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 323   if (level >= NMT_summary) {
 324     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 325       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 326     return (_reserved_regions != NULL);
 327   }
 328   return true;
 329 }
 330 
 331 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 332     const NativeCallStack& stack, MEMFLAGS flag) {
 333   assert(base_addr != NULL, "Invalid address");
 334   assert(size > 0, "Invalid size");


 469       return true;
 470     } else {
 471       address top = reserved_rgn->end();
 472       address high_base = addr + size;
 473       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 474         *reserved_rgn->call_stack(), reserved_rgn->flag());
 475 
 476       // use original region for lower region
 477       reserved_rgn->exclude_region(addr, top - addr);
 478       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 479       if (new_rgn == NULL) {
 480         return false;
 481       } else {
 482         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 483         return true;
 484       }
 485     }
 486   }
 487 }
 488 
 489 // Walk all known thread stacks, snapshot their committed ranges.
 490 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
 491 public:
 492   SnapshotThreadStackWalker() {}
 493 
 494   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
 495     if (rgn->flag() == mtThreadStack) {
 496       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
 497       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
 498       size_t committed_size = os::committed_stack_size(stack_bottom, stack_size);
 499       if (committed_size > 0) {
 500         ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
 501         NativeCallStack ncs; // empty stack
 502 
 503         // Stack grows downward
 504         region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs);
 505       }
 506     }
 507     return true;
 508   }
 509 };
 510 
 511 void VirtualMemoryTracker::snapshot_thread_stacks() {
 512   SnapshotThreadStackWalker walker;
 513   walk_virtual_memory(&walker);
 514 }
 515 
 516 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 517   assert(_reserved_regions != NULL, "Sanity check");
 518   ThreadCritical tc;
 519   // Check that the _reserved_regions haven't been deleted.
 520   if (_reserved_regions != NULL) {
 521     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 522     while (head != NULL) {
 523       const ReservedMemoryRegion* rgn = head->peek();
 524       if (!walker->do_allocation_site(rgn)) {
 525         return false;
 526       }
 527       head = head->next();
 528     }
 529    }
 530   return true;
 531 }
 532 
 533 // Transition virtual memory tracking level.
 534 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {


< prev index next >