# HG changeset patch # User stuefe # Date 1590219516 -7200 # Sat May 23 09:38:36 2020 +0200 # Node ID 494406c5e45c2099a6f4a84627ba44ad5c29aff3 # Parent 50029a35de431bad65a867b12b10b07254737649 8243535: NMT does not handle os::split_reserved_space() diff -r 50029a35de43 -r 494406c5e45c src/hotspot/os/posix/os_posix.cpp --- a/src/hotspot/os/posix/os_posix.cpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/os/posix/os_posix.cpp Sat May 23 09:38:36 2020 +0200 @@ -370,6 +370,10 @@ assert(split > 0, "Sanity"); assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity"); assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity"); + + // NMT: tell NMT to track both parts individually from now on. + MemTracker::record_virtual_memory_split_reserved(base, size, split); + } int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { diff -r 50029a35de43 -r 494406c5e45c src/hotspot/os/windows/os_windows.cpp --- a/src/hotspot/os/windows/os_windows.cpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/os/windows/os_windows.cpp Sat May 23 09:38:36 2020 +0200 @@ -3237,6 +3237,10 @@ reserve_memory(split, base); reserve_memory(size - split, split_address); + // NMT: nothing to do here. Since Windows implements the split by + // releasing and re-reserving memory, the parts are already registered + // as individual mappings with NMT. + } // Multiple threads can race in this code but it's not possible to unmap small sections of diff -r 50029a35de43 -r 494406c5e45c src/hotspot/share/memory/filemap.cpp --- a/src/hotspot/share/memory/filemap.cpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/share/memory/filemap.cpp Sat May 23 09:38:36 2020 +0200 @@ -1499,11 +1499,6 @@ si->set_read_only(false); // Need to patch the pointers } - if (rs.is_reserved()) { - assert(rs.contains(requested_addr) && rs.contains(requested_addr + size - 1), "must be"); - MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared); - } - if (MetaspaceShared::use_windows_memory_mapping() && rs.is_reserved()) { // This is the second time we try to map the archive(s). We have already created a ReservedSpace // that covers all the FileMapRegions to ensure all regions can be mapped. However, Windows @@ -1515,9 +1510,12 @@ return MAP_ARCHIVE_OTHER_FAILURE; // oom or I/O error. } } else { + // Note that this may either be a "fresh" mapping into unreserved address + // space (Windows, first mapping attempt), or a mapping into pre-reserved + // space (Posix). See also comment in MetaspaceShared::map_archives(). char* base = os::map_memory(_fd, _full_path, si->file_offset(), requested_addr, size, si->read_only(), - si->allow_exec()); + si->allow_exec(), mtClassShared); if (base != requested_addr) { log_info(cds)("Unable to map %s shared space at " INTPTR_FORMAT, shared_region_name[i], p2i(requested_addr)); @@ -1528,14 +1526,6 @@ } si->set_mapped_base(requested_addr); - if (!rs.is_reserved()) { - // When mapping on Windows for the first attempt, we don't reserve the address space for the regions - // (Windows can't mmap into a ReservedSpace). In this case, NMT requires we call it after - // os::map_memory has succeeded. - assert(MetaspaceShared::use_windows_memory_mapping(), "Windows memory mapping only"); - MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared); - } - if (VerifySharedSpaces && !verify_region_checksum(i)) { return MAP_ARCHIVE_OTHER_FAILURE; } @@ -1552,7 +1542,7 @@ bool read_only = true, allow_exec = false; char* requested_addr = NULL; // allow OS to pick any location char* bitmap_base = os::map_memory(_fd, _full_path, si->file_offset(), - requested_addr, si->used_aligned(), read_only, allow_exec); + requested_addr, si->used_aligned(), read_only, allow_exec, mtClassShared); if (bitmap_base == NULL) { log_error(cds)("failed to map relocation bitmap"); return NULL; diff -r 50029a35de43 -r 494406c5e45c src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Sat May 23 09:38:36 2020 +0200 @@ -2500,6 +2500,10 @@ assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity"); assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity"); + // NMT: fix up the space tags + MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared); + MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass); + return archive_space_rs.base(); #else diff -r 50029a35de43 -r 494406c5e45c src/hotspot/share/memory/virtualspace.hpp --- a/src/hotspot/share/memory/virtualspace.hpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/share/memory/virtualspace.hpp Sat May 23 09:38:36 2020 +0200 @@ -78,6 +78,8 @@ // This splits the space into two spaces, the first part of which will be returned. // If split==true, the resulting two spaces can be released independently from each other. // This may cause the original space to loose its content. + // They also will be tracked individually by NMT and can be tagged with different flags. + // Note that this may cause the original space to loose its content. // If split==false, the resulting space will be just a hotspot-internal representation // of a sub section of the underlying mapping. ReservedSpace first_part(size_t partition_size, size_t alignment, bool split = false); diff -r 50029a35de43 -r 494406c5e45c src/hotspot/share/runtime/os.cpp --- a/src/hotspot/share/runtime/os.cpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/share/runtime/os.cpp Sat May 23 09:38:36 2020 +0200 @@ -1763,10 +1763,10 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, - bool allow_exec) { + bool allow_exec, MEMFLAGS flags) { char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); if (result != NULL) { - MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags); } return result; } diff -r 50029a35de43 -r 494406c5e45c src/hotspot/share/runtime/os.hpp --- a/src/hotspot/share/runtime/os.hpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/share/runtime/os.hpp Sat May 23 09:38:36 2020 +0200 @@ -326,7 +326,8 @@ // Both base and split point must be aligned to allocation granularity; split point shall // be >0 and set_call_stack(stack); reserved_rgn->set_flag(flag); return true; - } else if (reserved_rgn->adjacent_to(base_addr, size)) { - VirtualMemorySummary::record_reserved_memory(size, flag); - reserved_rgn->expand_region(base_addr, size); - reserved_rgn->set_call_stack(stack); - return true; } else { + assert(reserved_rgn->overlap_region(base_addr, size), "Must be"); + // Overlapped reservation. // It can happen when the regions are thread stacks, as JNI // thread does not detach from VM before exits, and leads to @@ -491,6 +488,30 @@ } } +// Given an existing memory mapping registered with NMT, split the mapping in +// two. The newly created two mappings will be registered under the call +// stack and the memory flags of the original section. +bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split) { + + ReservedMemoryRegion rgn(addr, size); + ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); + assert(reserved_rgn->same_region(addr, size), "Must be identical region"); + assert(reserved_rgn != NULL, "No reserved region"); + assert(reserved_rgn->committed_size() == 0, "Splitting committed region?"); + + NativeCallStack original_stack = *reserved_rgn->call_stack(); + MEMFLAGS original_flags = reserved_rgn->flag(); + + _reserved_regions->remove(rgn); + + // Now, create two new regions. + add_reserved_region(addr, split, original_stack, original_flags); + add_reserved_region(addr + split, size - split, original_stack, original_flags); + + return true; +} + + // Iterate the range, find committed region within its bound. class RegionIterator : public StackObj { private: diff -r 50029a35de43 -r 494406c5e45c src/hotspot/share/services/virtualMemoryTracker.hpp --- a/src/hotspot/share/services/virtualMemoryTracker.hpp Sat May 23 08:05:48 2020 +0200 +++ b/src/hotspot/share/services/virtualMemoryTracker.hpp Sat May 23 09:38:36 2020 +0200 @@ -210,11 +210,8 @@ inline bool overlap_region(address addr, size_t sz) const { assert(sz > 0, "Invalid size"); assert(size() > 0, "Invalid size"); - VirtualMemoryRegion rgn(addr, sz); return contain_address(addr) || - contain_address(addr + sz - 1) || - rgn.contain_address(base()) || - rgn.contain_address(end() - 1); + contain_address(addr + sz - 1); } inline bool adjacent_to(address addr, size_t sz) const { @@ -240,6 +237,24 @@ set_size(size() + sz); } + // Returns 0 if regions overlap; 1 if this region follows rgn; + // -1 if this region precedes rgn. + inline int compare(const VirtualMemoryRegion& rgn) const { + if (overlap_region(rgn.base(), rgn.size())) { + return 0; + } else if (base() >= rgn.end()) { + return 1; + } else { + assert(rgn.base() >= end(), "Sanity"); + return -1; + } + } + + // Returns true if regions overlap, false otherwise. + inline bool equals(const VirtualMemoryRegion& rgn) const { + return compare(rgn) == 0; + } + protected: void set_base(address base) { assert(base != NULL, "Sanity check"); @@ -261,24 +276,6 @@ CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) : VirtualMemoryRegion(addr, size), _stack(stack) { } - inline int compare(const CommittedMemoryRegion& rgn) const { - if (overlap_region(rgn.base(), rgn.size())) { - return 0; - } else { - if (base() == rgn.base()) { - return 0; - } else if (base() > rgn.base()) { - return 1; - } else { - return -1; - } - } - } - - inline bool equals(const CommittedMemoryRegion& rgn) const { - return compare(rgn) == 0; - } - inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } inline const NativeCallStack* call_stack() const { return &_stack; } }; @@ -316,24 +313,6 @@ void set_flag(MEMFLAGS flag); inline MEMFLAGS flag() const { return _flag; } - inline int compare(const ReservedMemoryRegion& rgn) const { - if (overlap_region(rgn.base(), rgn.size())) { - return 0; - } else { - if (base() == rgn.base()) { - return 0; - } else if (base() > rgn.base()) { - return 1; - } else { - return -1; - } - } - } - - inline bool equals(const ReservedMemoryRegion& rgn) const { - return compare(rgn) == 0; - } - // uncommitted thread stack bottom, above guard pages if there is any. address thread_stack_uncommitted_bottom() const; @@ -405,6 +384,11 @@ static bool remove_released_region (address base_addr, size_t size); static void set_reserved_region_type (address addr, MEMFLAGS flag); + // Given an existing memory mapping registered with NMT, split the mapping in + // two. The newly created two mappings will be registered under the call + // stack and the memory flags of the original section. + static bool split_reserved_region(address addr, size_t size, size_t split); + // Walk virtual memory data structure for creating baseline, etc. static bool walk_virtual_memory(VirtualMemoryWalker* walker);