< prev index next >

src/hotspot/share/services/virtualMemoryTracker.cpp

Print this page




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 

  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 






  41 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  42 
  43 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  44   return r1.compare(r2);
  45 }
  46 
  47 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  48   return r1.compare(r2);
  49 }
  50 
  51 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  52   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
  53 }
  54 
  55 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  56   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
  57   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
  58 }
  59 
  60 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {


 269 size_t ReservedMemoryRegion::committed_size() const {
 270   size_t committed = 0;
 271   LinkedListNode<CommittedMemoryRegion>* head =
 272     _committed_regions.head();
 273   while (head != NULL) {
 274     committed += head->data()->size();
 275     head = head->next();
 276   }
 277   return committed;
 278 }
 279 
 280 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 281   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 282   if (flag() != f) {
 283     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 284     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 285     _flag = f;
 286   }
 287 }
 288 




















 289 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 290   if (level >= NMT_summary) {
 291     VirtualMemorySummary::initialize();
 292   }
 293   return true;
 294 }
 295 
 296 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 297   if (level >= NMT_summary) {
 298     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 299       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 300     return (_reserved_regions != NULL);
 301   }
 302   return true;
 303 }
 304 
 305 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 306     const NativeCallStack& stack, MEMFLAGS flag) {
 307   assert(base_addr != NULL, "Invalid address");
 308   assert(size > 0, "Invalid size");


 443       return true;
 444     } else {
 445       address top = reserved_rgn->end();
 446       address high_base = addr + size;
 447       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 448         *reserved_rgn->call_stack(), reserved_rgn->flag());
 449 
 450       // use original region for lower region
 451       reserved_rgn->exclude_region(addr, top - addr);
 452       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 453       if (new_rgn == NULL) {
 454         return false;
 455       } else {
 456         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 457         return true;
 458       }
 459     }
 460   }
 461 }
 462 










































































 463 
 464 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 465   assert(_reserved_regions != NULL, "Sanity check");
 466   ThreadCritical tc;
 467   // Check that the _reserved_regions haven't been deleted.
 468   if (_reserved_regions != NULL) {
 469     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 470     while (head != NULL) {
 471       const ReservedMemoryRegion* rgn = head->peek();
 472       if (!walker->do_allocation_site(rgn)) {
 473         return false;
 474       }
 475       head = head->next();
 476     }
 477    }
 478   return true;
 479 }
 480 
 481 // Transition virtual memory tracking level.
 482 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {




   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "logging/log.hpp"
  27 #include "memory/metaspace.hpp"
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/os.hpp"
  30 #include "runtime/threadCritical.hpp"
  31 #include "services/memTracker.hpp"
  32 #include "services/virtualMemoryTracker.hpp"
  33 
  34 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  35 
  36 void VirtualMemorySummary::initialize() {
  37   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  38   // Use placement operator new to initialize static data area.
  39   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  40 }
  41 
  42 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
  43   // Snapshot current thread stacks
  44   VirtualMemoryTracker::snapshot_thread_stacks();
  45   as_snapshot()->copy_to(s);
  46 }
  47 
  48 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  49 
  50 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  51   return r1.compare(r2);
  52 }
  53 
  54 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  55   return r1.compare(r2);
  56 }
  57 
  58 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  59   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
  60 }
  61 
  62 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  63   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
  64   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
  65 }
  66 
  67 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {


 276 size_t ReservedMemoryRegion::committed_size() const {
 277   size_t committed = 0;
 278   LinkedListNode<CommittedMemoryRegion>* head =
 279     _committed_regions.head();
 280   while (head != NULL) {
 281     committed += head->data()->size();
 282     head = head->next();
 283   }
 284   return committed;
 285 }
 286 
 287 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 288   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 289   if (flag() != f) {
 290     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 291     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 292     _flag = f;
 293   }
 294 }
 295 
 296 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
 297   assert(flag() == mtThreadStack, "Only for thread stack");
 298   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
 299   address bottom = base();
 300   address top = base() + size();
 301   while (head != NULL) {
 302     address committed_top = head->data()->base() + head->data()->size();
 303     if (committed_top < top) {
 304       // committed stack guard pages, skip them
 305       bottom = head->data()->base() + head->data()->size();
 306       head = head->next();
 307     } else {
 308       assert(top == committed_top, "Sanity");
 309       break;
 310     }
 311   }
 312 
 313   return bottom;
 314 }
 315 
 316 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 317   if (level >= NMT_summary) {
 318     VirtualMemorySummary::initialize();
 319   }
 320   return true;
 321 }
 322 
 323 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 324   if (level >= NMT_summary) {
 325     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 326       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 327     return (_reserved_regions != NULL);
 328   }
 329   return true;
 330 }
 331 
 332 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 333     const NativeCallStack& stack, MEMFLAGS flag) {
 334   assert(base_addr != NULL, "Invalid address");
 335   assert(size > 0, "Invalid size");


 470       return true;
 471     } else {
 472       address top = reserved_rgn->end();
 473       address high_base = addr + size;
 474       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 475         *reserved_rgn->call_stack(), reserved_rgn->flag());
 476 
 477       // use original region for lower region
 478       reserved_rgn->exclude_region(addr, top - addr);
 479       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 480       if (new_rgn == NULL) {
 481         return false;
 482       } else {
 483         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 484         return true;
 485       }
 486     }
 487   }
 488 }
 489 
 490 // Iterate the range, find committed region within its bound.
 491 class RegionIterator : public StackObj {
 492 private:
 493   const address _start;
 494   const size_t  _size;
 495 
 496   address _current_start;
 497   size_t  _current_size;
 498 public:
 499   RegionIterator(address start, size_t size) :
 500     _start(start), _size(size), _current_start(start), _current_size(size) {
 501   }
 502   
 503   // return true if committed region is found
 504   bool next_committed(address& start, size_t& size);
 505 private:
 506   address end() const { return _start + _size; }
 507 };
 508 
 509 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) {
 510   if (end() <= _current_start) return false;
 511 
 512   const size_t page_sz = os::vm_page_size();
 513   assert(_current_start + _current_size == end(), "Must be");
 514   if (os::committed_in_range(_current_start, _current_size, committed_start, committed_size)) {
 515     assert(committed_start != NULL, "Must be");
 516     assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be");
 517 
 518     size_t remaining_size = (_current_start + _current_size) - (committed_start + committed_size);
 519     _current_start = committed_start + committed_size;
 520     _current_size = remaining_size;
 521     return true;
 522   } else {
 523     return false;
 524   }
 525 }
 526 
 527 // Walk all known thread stacks, snapshot their committed ranges.
 528 class SnapshotThreadStackWalker : public VirtualMemoryWalker {
 529 public:
 530   SnapshotThreadStackWalker() {}
 531 
 532   bool do_allocation_site(const ReservedMemoryRegion* rgn) {
 533     if (rgn->flag() == mtThreadStack) {
 534       address stack_bottom = rgn->thread_stack_uncommitted_bottom();
 535       address committed_start;
 536       size_t  committed_size;
 537       size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
 538 
 539       ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
 540       NativeCallStack ncs; // empty stack
 541       
 542       RegionIterator itr(stack_bottom, stack_size);
 543       DEBUG_ONLY(bool found_stack = false;)
 544       while (itr.next_committed(committed_start, committed_size)) {
 545         assert(committed_start != NULL, "Should not be null");
 546         assert(committed_size > 0, "Should not be 0");
 547         region->add_committed_region(committed_start, committed_size, ncs);
 548         DEBUG_ONLY(found_stack = true;)
 549       }
 550 #ifdef ASSERT
 551       if (!found_stack) {
 552         log_debug(thread)("Thread exited without proper cleanup, may leak thread object");
 553       }
 554 #endif
 555     }
 556     return true;
 557   }
 558 };
 559 
 560 void VirtualMemoryTracker::snapshot_thread_stacks() {
 561   SnapshotThreadStackWalker walker;
 562   walk_virtual_memory(&walker);
 563 }
 564 
 565 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 566   assert(_reserved_regions != NULL, "Sanity check");
 567   ThreadCritical tc;
 568   // Check that the _reserved_regions haven't been deleted.
 569   if (_reserved_regions != NULL) {
 570     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 571     while (head != NULL) {
 572       const ReservedMemoryRegion* rgn = head->peek();
 573       if (!walker->do_allocation_site(rgn)) {
 574         return false;
 575       }
 576       head = head->next();
 577     }
 578    }
 579   return true;
 580 }
 581 
 582 // Transition virtual memory tracking level.
 583 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {


< prev index next >