/* * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "memory/metaspace.hpp" #include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "runtime/threadCritical.hpp" #include "services/memTracker.hpp" #include "services/virtualMemoryTracker.hpp" size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; void VirtualMemorySummary::initialize() { assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check"); // Use placement operator new to initialize static data area. ::new ((void*)_snapshot) VirtualMemorySnapshot(); } SortedLinkedList* VirtualMemoryTracker::_reserved_regions; int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { return r1.compare(r2); } int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { return r1.compare(r2); } static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack); } static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions. return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack); } static LinkedListNode* find_preceding_node_from(LinkedListNode* from, address addr) { LinkedListNode* preceding = NULL; for (LinkedListNode* node = from; node != NULL; node = node->next()) { CommittedMemoryRegion* rgn = node->data(); // We searched past the region start. if (rgn->end() > addr) { break; } preceding = node; } return preceding; } static bool try_merge_with(LinkedListNode* node, address addr, size_t size, const NativeCallStack& stack) { if (node != NULL) { CommittedMemoryRegion* rgn = node->data(); if (is_mergeable_with(rgn, addr, size, stack)) { rgn->expand_region(addr, size); return true; } } return false; } static bool try_merge_with(LinkedListNode* node, LinkedListNode* other) { if (other == NULL) { return false; } CommittedMemoryRegion* rgn = other->data(); return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack()); } bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { assert(addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); assert(contain_region(addr, size), "Not contain this region"); // Find the region that fully precedes the [addr, addr + size) region. LinkedListNode* prev = find_preceding_node_from(_committed_regions.head(), addr); LinkedListNode* next = (prev != NULL ? prev->next() : _committed_regions.head()); if (next != NULL) { // Ignore request if region already exists. if (is_same_as(next->data(), addr, size, stack)) { return true; } // The new region is after prev, and either overlaps with the // next region (and maybe more regions), or overlaps with no region. if (next->data()->overlap_region(addr, size)) { // Remove _all_ overlapping regions, and parts of regions, // in preparation for the addition of this new region. remove_uncommitted_region(addr, size); // The remove could have split a region into two and created a // new prev region. Need to reset the prev and next pointers. prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr); next = (prev != NULL ? prev->next() : _committed_regions.head()); } } // At this point the previous overlapping regions have been // cleared, and the full region is guaranteed to be inserted. VirtualMemorySummary::record_committed_memory(size, flag()); // Try to merge with prev and possibly next. if (try_merge_with(prev, addr, size, stack)) { if (try_merge_with(prev, next)) { // prev was expanded to contain the new region // and next, need to remove next from the list _committed_regions.remove_after(prev); } return true; } // Didn't merge with prev, try with next. if (try_merge_with(next, addr, size, stack)) { return true; } // Couldn't merge with any regions - create a new region. return add_committed_region(CommittedMemoryRegion(addr, size, stack)); } bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode* node, address addr, size_t size) { assert(addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); CommittedMemoryRegion* rgn = node->data(); assert(rgn->contain_region(addr, size), "Has to be contained"); assert(!rgn->same_region(addr, size), "Can not be the same region"); if (rgn->base() == addr || rgn->end() == addr + size) { rgn->exclude_region(addr, size); return true; } else { // split this region address top =rgn->end(); // use this region for lower part size_t exclude_size = rgn->end() - addr; rgn->exclude_region(addr, exclude_size); // higher part address high_base = addr + size; size_t high_size = top - high_base; CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack()); LinkedListNode* high_node = _committed_regions.add(high_rgn); assert(high_node == NULL || node->next() == high_node, "Should be right after"); return (high_node != NULL); } return false; } bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { assert(addr != NULL, "Invalid address"); assert(sz > 0, "Invalid size"); CommittedMemoryRegion del_rgn(addr, sz, *call_stack()); address end = addr + sz; LinkedListNode* head = _committed_regions.head(); LinkedListNode* prev = NULL; CommittedMemoryRegion* crgn; while (head != NULL) { crgn = head->data(); if (crgn->same_region(addr, sz)) { VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); _committed_regions.remove_after(prev); return true; } // del_rgn contains crgn if (del_rgn.contain_region(crgn->base(), crgn->size())) { VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); head = head->next(); _committed_regions.remove_after(prev); continue; // don't update head or prev } // Found addr in the current crgn. There are 2 subcases: if (crgn->contain_address(addr)) { // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) if (crgn->contain_address(end - 1)) { VirtualMemorySummary::record_uncommitted_memory(sz, flag()); return remove_uncommitted_region(head, addr, sz); // done! } else { // (2) Did not find del_rgn's end in crgn. size_t size = crgn->end() - del_rgn.base(); crgn->exclude_region(addr, size); VirtualMemorySummary::record_uncommitted_memory(size, flag()); } } else if (crgn->contain_address(end - 1)) { // Found del_rgn's end, but not its base addr. size_t size = del_rgn.end() - crgn->base(); crgn->exclude_region(crgn->base(), size); VirtualMemorySummary::record_uncommitted_memory(size, flag()); return true; // should be done if the list is sorted properly! } prev = head; head = head->next(); } return true; } void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) { assert(addr != NULL, "Invalid address"); // split committed regions LinkedListNode* head = _committed_regions.head(); LinkedListNode* prev = NULL; while (head != NULL) { if (head->data()->base() >= addr) { break; } prev = head; head = head->next(); } if (head != NULL) { if (prev != NULL) { prev->set_next(head->next()); } else { _committed_regions.set_head(NULL); } } rgn._committed_regions.set_head(head); } size_t ReservedMemoryRegion::committed_size() const { size_t committed = 0; LinkedListNode* head = _committed_regions.head(); while (head != NULL) { committed += head->data()->size(); head = head->next(); } return committed; } void ReservedMemoryRegion::set_flag(MEMFLAGS f) { assert((flag() == mtNone || flag() == f), "Overwrite memory type"); if (flag() != f) { VirtualMemorySummary::move_reserved_memory(flag(), f, size()); VirtualMemorySummary::move_committed_memory(flag(), f, committed_size()); _flag = f; } } bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { if (level >= NMT_summary) { VirtualMemorySummary::initialize(); } return true; } bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) { if (level >= NMT_summary) { _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT) SortedLinkedList(); return (_reserved_regions != NULL); } return true; } bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag) { assert(base_addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); assert(_reserved_regions != NULL, "Sanity check"); ReservedMemoryRegion rgn(base_addr, size, stack, flag); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); if (reserved_rgn == NULL) { VirtualMemorySummary::record_reserved_memory(size, flag); return _reserved_regions->add(rgn) != NULL; } else { if (reserved_rgn->same_region(base_addr, size)) { reserved_rgn->set_call_stack(stack); reserved_rgn->set_flag(flag); return true; } else if (reserved_rgn->adjacent_to(base_addr, size)) { VirtualMemorySummary::record_reserved_memory(size, flag); reserved_rgn->expand_region(base_addr, size); reserved_rgn->set_call_stack(stack); return true; } else { // Overlapped reservation. // It can happen when the regions are thread stacks, as JNI // thread does not detach from VM before exits, and leads to // leak JavaThread object if (reserved_rgn->flag() == mtThreadStack) { guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); // Overwrite with new region // Release old region VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag()); VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag()); // Add new region VirtualMemorySummary::record_reserved_memory(rgn.size(), flag); *reserved_rgn = rgn; return true; } // CDS mapping region. // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. // NMT reports CDS as a whole. if (reserved_rgn->flag() == mtClassShared) { assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); return true; } // Mapped CDS string region. // The string region(s) is part of the java heap. if (reserved_rgn->flag() == mtJavaHeap) { assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region"); return true; } ShouldNotReachHere(); return false; } } } void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) { assert(addr != NULL, "Invalid address"); assert(_reserved_regions != NULL, "Sanity check"); ReservedMemoryRegion rgn(addr, 1); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); if (reserved_rgn != NULL) { assert(reserved_rgn->contain_address(addr), "Containment"); if (reserved_rgn->flag() != flag) { assert(reserved_rgn->flag() == mtNone, "Overwrite memory type"); reserved_rgn->set_flag(flag); } } } bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { assert(addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); assert(_reserved_regions != NULL, "Sanity check"); ReservedMemoryRegion rgn(addr, size); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); assert(reserved_rgn != NULL, "No reserved region"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); bool result = reserved_rgn->add_committed_region(addr, size, stack); return result; } bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { assert(addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); assert(_reserved_regions != NULL, "Sanity check"); ReservedMemoryRegion rgn(addr, size); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); assert(reserved_rgn != NULL, "No reserved region"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); bool result = reserved_rgn->remove_uncommitted_region(addr, size); return result; } bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { assert(addr != NULL, "Invalid address"); assert(size > 0, "Invalid size"); assert(_reserved_regions != NULL, "Sanity check"); ReservedMemoryRegion rgn(addr, size); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); assert(reserved_rgn != NULL, "No reserved region"); // uncommit regions within the released region if (!reserved_rgn->remove_uncommitted_region(addr, size)) { return false; } if (reserved_rgn->flag() == mtClassShared && reserved_rgn->contain_region(addr, size) && !reserved_rgn->same_region(addr, size)) { // This is an unmapped CDS region, which is part of the reserved shared // memory region. // See special handling in VirtualMemoryTracker::add_reserved_region also. return true; } VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag()); if (reserved_rgn->same_region(addr, size)) { return _reserved_regions->remove(rgn); } else { assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); if (reserved_rgn->base() == addr || reserved_rgn->end() == addr + size) { reserved_rgn->exclude_region(addr, size); return true; } else { address top = reserved_rgn->end(); address high_base = addr + size; ReservedMemoryRegion high_rgn(high_base, top - high_base, *reserved_rgn->call_stack(), reserved_rgn->flag()); // use original region for lower region reserved_rgn->exclude_region(addr, top - addr); LinkedListNode* new_rgn = _reserved_regions->add(high_rgn); if (new_rgn == NULL) { return false; } else { reserved_rgn->move_committed_regions(addr, *new_rgn->data()); return true; } } } } bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { assert(_reserved_regions != NULL, "Sanity check"); ThreadCritical tc; // Check that the _reserved_regions haven't been deleted. if (_reserved_regions != NULL) { LinkedListNode* head = _reserved_regions->head(); while (head != NULL) { const ReservedMemoryRegion* rgn = head->peek(); if (!walker->do_allocation_site(rgn)) { return false; } head = head->next(); } } return true; } // Transition virtual memory tracking level. bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything"); if (to == NMT_minimal) { assert(from == NMT_summary || from == NMT_detail, "Just check"); // Clean up virtual memory tracking data structures. ThreadCritical tc; // Check for potential race with other thread calling transition if (_reserved_regions != NULL) { delete _reserved_regions; _reserved_regions = NULL; } } return true; } // Metaspace Support MetaspaceSnapshot::MetaspaceSnapshot() { for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) { Metaspace::MetadataType type = (Metaspace::MetadataType)index; assert_valid_metadata_type(type); _reserved_in_bytes[type] = 0; _committed_in_bytes[type] = 0; _used_in_bytes[type] = 0; _free_in_bytes[type] = 0; } } void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) { assert_valid_metadata_type(type); mss._reserved_in_bytes[type] = MetaspaceUtils::reserved_bytes(type); mss._committed_in_bytes[type] = MetaspaceUtils::committed_bytes(type); mss._used_in_bytes[type] = MetaspaceUtils::used_bytes(type); size_t free_in_bytes = (MetaspaceUtils::capacity_bytes(type) - MetaspaceUtils::used_bytes(type)) + MetaspaceUtils::free_chunks_total_bytes(type) + MetaspaceUtils::free_in_vs_bytes(type); mss._free_in_bytes[type] = free_in_bytes; } void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) { snapshot(Metaspace::ClassType, mss); if (Metaspace::using_class_space()) { snapshot(Metaspace::NonClassType, mss); } }