< prev index next >
src/hotspot/share/services/virtualMemoryTracker.cpp
Print this page
@@ -36,10 +36,16 @@
assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
// Use placement operator new to initialize static data area.
::new ((void*)_snapshot) VirtualMemorySnapshot();
}
+void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
+ // Snapshot current thread stacks
+ VirtualMemoryTracker::snapshot_thread_stacks();
+ as_snapshot()->copy_to(s);
+}
+
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
return r1.compare(r2);
}
@@ -75,24 +81,31 @@
return true;
}
}
if (rgn->overlap_region(addr, size)) {
- // Clear a space for this region in the case it overlaps with any regions.
- remove_uncommitted_region(addr, size);
- break; // commit below
+ if (addr < rgn->base()) {
+ rgn->expand_region(addr, (rgn->base() - addr));
+ }
+
+ if (addr + size > rgn->base() + rgn->size()) {
+ rgn->expand_region(rgn->base() + rgn->size(),
+ (addr + size) - (rgn->base() + rgn->size()));
}
+ return true;
+ }
+
if (rgn->end() >= addr + size){
break;
}
node = node->next();
}
// New committed region
VirtualMemorySummary::record_committed_memory(size, flag());
return add_committed_region(committed_rgn);
- }
+}
void ReservedMemoryRegion::set_all_committed(bool b) {
if (all_committed() != b) {
_all_committed = b;
if (b) {
@@ -277,10 +290,29 @@
VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
_flag = f;
}
}
+address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
+ assert(flag() == mtThreadStack, "Only for thread stack");
+ LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
+ address bottom = base();
+ address top = base() + size();
+ while (head != NULL) {
+ address committed_top = head->data()->base() + head->data()->size();
+ if (committed_top < top) {
+ // committed stack guard pages, skip them
+ bottom = head->data()->base() + head->data()->size();
+ head = head->next();
+ } else {
+ break;
+ }
+ }
+
+ return bottom;
+}
+
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
if (level >= NMT_summary) {
VirtualMemorySummary::initialize();
}
return true;
@@ -457,10 +489,37 @@
}
}
}
}
+// Walk all known thread stacks, snapshot their committed ranges.
+class SnapshotThreadStackWalker : public VirtualMemoryWalker {
+public:
+ SnapshotThreadStackWalker() {}
+
+ bool do_allocation_site(const ReservedMemoryRegion* rgn) {
+ if (rgn->flag() == mtThreadStack) {
+ address stack_bottom = rgn->thread_stack_uncommitted_bottom();
+ size_t stack_size = rgn->base() + rgn->size() - stack_bottom;
+
+ size_t committed_size = os::committed_stack_size(stack_bottom, stack_size);
+ if (committed_size > 0) {
+ ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
+ NativeCallStack ncs; // empty stack
+
+ // Stack grows downward
+ region->add_committed_region(rgn->base() + rgn->size() - committed_size, committed_size, ncs);
+ }
+ }
+ return true;
+ }
+};
+
+void VirtualMemoryTracker::snapshot_thread_stacks() {
+ SnapshotThreadStackWalker walker;
+ walk_virtual_memory(&walker);
+}
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_reserved_regions != NULL, "Sanity check");
ThreadCritical tc;
// Check that the _reserved_regions haven't been deleted.
< prev index next >