1 /* 2 * Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 #include "precompiled.hpp" 25 26 #include "logging/log.hpp" 27 #include "memory/metaspace.hpp" 28 #include "runtime/os.hpp" 29 #include "runtime/threadCritical.hpp" 30 #include "services/memTracker.hpp" 31 #include "services/threadStackTracker.hpp" 32 #include "services/virtualMemoryTracker.hpp" 33 #include "utilities/ostream.hpp" 34 35 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)]; 36 37 void VirtualMemorySummary::initialize() { 38 assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check"); 39 // Use placement operator new to initialize static data area. 40 ::new ((void*)_snapshot) VirtualMemorySnapshot(); 41 } 42 43 void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) { 44 // Only if thread stack is backed by virtual memory 45 if (ThreadStackTracker::track_as_vm()) { 46 // Snapshot current thread stacks 47 VirtualMemoryTracker::snapshot_thread_stacks(); 48 } 49 as_snapshot()->copy_to(s); 50 } 51 52 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions; 53 54 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) { 55 return r1.compare(r2); 56 } 57 58 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) { 59 return r1.compare(r2); 60 } 61 62 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { 63 return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack); 64 } 65 66 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) { 67 // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions. 68 return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack); 69 } 70 71 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) { 72 LinkedListNode<CommittedMemoryRegion>* preceding = NULL; 73 74 for (LinkedListNode<CommittedMemoryRegion>* node = from; node != NULL; node = node->next()) { 75 CommittedMemoryRegion* rgn = node->data(); 76 77 // We searched past the region start. 78 if (rgn->end() > addr) { 79 break; 80 } 81 82 preceding = node; 83 } 84 85 return preceding; 86 } 87 88 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) { 89 if (node != NULL) { 90 CommittedMemoryRegion* rgn = node->data(); 91 92 if (is_mergeable_with(rgn, addr, size, stack)) { 93 rgn->expand_region(addr, size); 94 return true; 95 } 96 } 97 98 return false; 99 } 100 101 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) { 102 if (other == NULL) { 103 return false; 104 } 105 106 CommittedMemoryRegion* rgn = other->data(); 107 return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack()); 108 } 109 110 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) { 111 assert(addr != NULL, "Invalid address"); 112 assert(size > 0, "Invalid size"); 113 assert(contain_region(addr, size), "Not contain this region"); 114 115 // Find the region that fully precedes the [addr, addr + size) region. 116 LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr); 117 LinkedListNode<CommittedMemoryRegion>* next = (prev != NULL ? prev->next() : _committed_regions.head()); 118 119 if (next != NULL) { 120 // Ignore request if region already exists. 121 if (is_same_as(next->data(), addr, size, stack)) { 122 return true; 123 } 124 125 // The new region is after prev, and either overlaps with the 126 // next region (and maybe more regions), or overlaps with no region. 127 if (next->data()->overlap_region(addr, size)) { 128 // Remove _all_ overlapping regions, and parts of regions, 129 // in preparation for the addition of this new region. 130 remove_uncommitted_region(addr, size); 131 132 // The remove could have split a region into two and created a 133 // new prev region. Need to reset the prev and next pointers. 134 prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr); 135 next = (prev != NULL ? prev->next() : _committed_regions.head()); 136 } 137 } 138 139 // At this point the previous overlapping regions have been 140 // cleared, and the full region is guaranteed to be inserted. 141 VirtualMemorySummary::record_committed_memory(size, flag()); 142 143 // Try to merge with prev and possibly next. 144 if (try_merge_with(prev, addr, size, stack)) { 145 if (try_merge_with(prev, next)) { 146 // prev was expanded to contain the new region 147 // and next, need to remove next from the list 148 _committed_regions.remove_after(prev); 149 } 150 151 return true; 152 } 153 154 // Didn't merge with prev, try with next. 155 if (try_merge_with(next, addr, size, stack)) { 156 return true; 157 } 158 159 // Couldn't merge with any regions - create a new region. 160 return add_committed_region(CommittedMemoryRegion(addr, size, stack)); 161 } 162 163 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node, 164 address addr, size_t size) { 165 assert(addr != NULL, "Invalid address"); 166 assert(size > 0, "Invalid size"); 167 168 CommittedMemoryRegion* rgn = node->data(); 169 assert(rgn->contain_region(addr, size), "Has to be contained"); 170 assert(!rgn->same_region(addr, size), "Can not be the same region"); 171 172 if (rgn->base() == addr || 173 rgn->end() == addr + size) { 174 rgn->exclude_region(addr, size); 175 return true; 176 } else { 177 // split this region 178 address top =rgn->end(); 179 // use this region for lower part 180 size_t exclude_size = rgn->end() - addr; 181 rgn->exclude_region(addr, exclude_size); 182 183 // higher part 184 address high_base = addr + size; 185 size_t high_size = top - high_base; 186 187 CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack()); 188 LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn); 189 assert(high_node == NULL || node->next() == high_node, "Should be right after"); 190 return (high_node != NULL); 191 } 192 193 return false; 194 } 195 196 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { 197 assert(addr != NULL, "Invalid address"); 198 assert(sz > 0, "Invalid size"); 199 200 CommittedMemoryRegion del_rgn(addr, sz, *call_stack()); 201 address end = addr + sz; 202 203 LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head(); 204 LinkedListNode<CommittedMemoryRegion>* prev = NULL; 205 CommittedMemoryRegion* crgn; 206 207 while (head != NULL) { 208 crgn = head->data(); 209 210 if (crgn->same_region(addr, sz)) { 211 VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); 212 _committed_regions.remove_after(prev); 213 return true; 214 } 215 216 // del_rgn contains crgn 217 if (del_rgn.contain_region(crgn->base(), crgn->size())) { 218 VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); 219 head = head->next(); 220 _committed_regions.remove_after(prev); 221 continue; // don't update head or prev 222 } 223 224 // Found addr in the current crgn. There are 2 subcases: 225 if (crgn->contain_address(addr)) { 226 227 // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) 228 if (crgn->contain_address(end - 1)) { 229 VirtualMemorySummary::record_uncommitted_memory(sz, flag()); 230 return remove_uncommitted_region(head, addr, sz); // done! 231 } else { 232 // (2) Did not find del_rgn's end in crgn. 233 size_t size = crgn->end() - del_rgn.base(); 234 crgn->exclude_region(addr, size); 235 VirtualMemorySummary::record_uncommitted_memory(size, flag()); 236 } 237 238 } else if (crgn->contain_address(end - 1)) { 239 // Found del_rgn's end, but not its base addr. 240 size_t size = del_rgn.end() - crgn->base(); 241 crgn->exclude_region(crgn->base(), size); 242 VirtualMemorySummary::record_uncommitted_memory(size, flag()); 243 return true; // should be done if the list is sorted properly! 244 } 245 246 prev = head; 247 head = head->next(); 248 } 249 250 return true; 251 } 252 253 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) { 254 assert(addr != NULL, "Invalid address"); 255 256 // split committed regions 257 LinkedListNode<CommittedMemoryRegion>* head = 258 _committed_regions.head(); 259 LinkedListNode<CommittedMemoryRegion>* prev = NULL; 260 261 while (head != NULL) { 262 if (head->data()->base() >= addr) { 263 break; 264 } 265 prev = head; 266 head = head->next(); 267 } 268 269 if (head != NULL) { 270 if (prev != NULL) { 271 prev->set_next(head->next()); 272 } else { 273 _committed_regions.set_head(NULL); 274 } 275 } 276 277 rgn._committed_regions.set_head(head); 278 } 279 280 size_t ReservedMemoryRegion::committed_size() const { 281 size_t committed = 0; 282 LinkedListNode<CommittedMemoryRegion>* head = 283 _committed_regions.head(); 284 while (head != NULL) { 285 committed += head->data()->size(); 286 head = head->next(); 287 } 288 return committed; 289 } 290 291 void ReservedMemoryRegion::set_flag(MEMFLAGS f) { 292 assert((flag() == mtNone || flag() == f), 293 "Overwrite memory type for region [" PTR_FORMAT "-" PTR_FORMAT "), %u->%u.", 294 p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f); 295 if (flag() != f) { 296 VirtualMemorySummary::move_reserved_memory(flag(), f, size()); 297 VirtualMemorySummary::move_committed_memory(flag(), f, committed_size()); 298 _flag = f; 299 } 300 } 301 302 address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const { 303 assert(flag() == mtThreadStack, "Only for thread stack"); 304 LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head(); 305 address bottom = base(); 306 address top = base() + size(); 307 while (head != NULL) { 308 address committed_top = head->data()->base() + head->data()->size(); 309 if (committed_top < top) { 310 // committed stack guard pages, skip them 311 bottom = head->data()->base() + head->data()->size(); 312 head = head->next(); 313 } else { 314 assert(top == committed_top, "Sanity"); 315 break; 316 } 317 } 318 319 return bottom; 320 } 321 322 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { 323 if (level >= NMT_summary) { 324 VirtualMemorySummary::initialize(); 325 } 326 return true; 327 } 328 329 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) { 330 if (level >= NMT_summary) { 331 _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT) 332 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>(); 333 return (_reserved_regions != NULL); 334 } 335 return true; 336 } 337 338 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, 339 const NativeCallStack& stack, MEMFLAGS flag) { 340 assert(base_addr != NULL, "Invalid address"); 341 assert(size > 0, "Invalid size"); 342 assert(_reserved_regions != NULL, "Sanity check"); 343 ReservedMemoryRegion rgn(base_addr, size, stack, flag); 344 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 345 346 if (reserved_rgn == NULL) { 347 VirtualMemorySummary::record_reserved_memory(size, flag); 348 return _reserved_regions->add(rgn) != NULL; 349 } else { 350 if (reserved_rgn->same_region(base_addr, size)) { 351 reserved_rgn->set_call_stack(stack); 352 reserved_rgn->set_flag(flag); 353 return true; 354 } else { 355 assert(reserved_rgn->overlap_region(base_addr, size), "Must be"); 356 357 // Overlapped reservation. 358 // It can happen when the regions are thread stacks, as JNI 359 // thread does not detach from VM before exits, and leads to 360 // leak JavaThread object 361 if (reserved_rgn->flag() == mtThreadStack) { 362 guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); 363 // Overwrite with new region 364 365 // Release old region 366 VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag()); 367 VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag()); 368 369 // Add new region 370 VirtualMemorySummary::record_reserved_memory(rgn.size(), flag); 371 372 *reserved_rgn = rgn; 373 return true; 374 } 375 376 // CDS mapping region. 377 // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. 378 // NMT reports CDS as a whole. 379 if (reserved_rgn->flag() == mtClassShared) { 380 assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); 381 return true; 382 } 383 384 // Mapped CDS string region. 385 // The string region(s) is part of the java heap. 386 if (reserved_rgn->flag() == mtJavaHeap) { 387 assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region"); 388 return true; 389 } 390 391 // Print some more details. Don't use UL here to avoid circularities. 392 #ifdef ASSERT 393 tty->print_cr("Error: existing region: [" PTR_FORMAT "-" PTR_FORMAT "), flag %u.\n" 394 " new region: [" PTR_FORMAT "-" PTR_FORMAT "), flag %u.", 395 p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(), 396 p2i(base_addr), p2i(base_addr + size), (unsigned)flag); 397 #endif 398 ShouldNotReachHere(); 399 return false; 400 } 401 } 402 } 403 404 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) { 405 assert(addr != NULL, "Invalid address"); 406 assert(_reserved_regions != NULL, "Sanity check"); 407 408 ReservedMemoryRegion rgn(addr, 1); 409 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 410 if (reserved_rgn != NULL) { 411 assert(reserved_rgn->contain_address(addr), "Containment"); 412 if (reserved_rgn->flag() != flag) { 413 assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")", 414 NMTUtil::flag_to_name(reserved_rgn->flag())); 415 reserved_rgn->set_flag(flag); 416 } 417 } 418 } 419 420 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, 421 const NativeCallStack& stack) { 422 assert(addr != NULL, "Invalid address"); 423 assert(size > 0, "Invalid size"); 424 assert(_reserved_regions != NULL, "Sanity check"); 425 426 ReservedMemoryRegion rgn(addr, size); 427 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 428 429 assert(reserved_rgn != NULL, "No reserved region"); 430 assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 431 bool result = reserved_rgn->add_committed_region(addr, size, stack); 432 return result; 433 } 434 435 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) { 436 assert(addr != NULL, "Invalid address"); 437 assert(size > 0, "Invalid size"); 438 assert(_reserved_regions != NULL, "Sanity check"); 439 440 ReservedMemoryRegion rgn(addr, size); 441 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 442 assert(reserved_rgn != NULL, "No reserved region"); 443 assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 444 bool result = reserved_rgn->remove_uncommitted_region(addr, size); 445 return result; 446 } 447 448 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { 449 assert(addr != NULL, "Invalid address"); 450 assert(size > 0, "Invalid size"); 451 assert(_reserved_regions != NULL, "Sanity check"); 452 453 ReservedMemoryRegion rgn(addr, size); 454 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 455 456 assert(reserved_rgn != NULL, "No reserved region"); 457 458 // uncommit regions within the released region 459 if (!reserved_rgn->remove_uncommitted_region(addr, size)) { 460 return false; 461 } 462 463 if (reserved_rgn->flag() == mtClassShared && 464 reserved_rgn->contain_region(addr, size) && 465 !reserved_rgn->same_region(addr, size)) { 466 // This is an unmapped CDS region, which is part of the reserved shared 467 // memory region. 468 // See special handling in VirtualMemoryTracker::add_reserved_region also. 469 return true; 470 } 471 472 VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag()); 473 474 if (reserved_rgn->same_region(addr, size)) { 475 return _reserved_regions->remove(rgn); 476 } else { 477 assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); 478 if (reserved_rgn->base() == addr || 479 reserved_rgn->end() == addr + size) { 480 reserved_rgn->exclude_region(addr, size); 481 return true; 482 } else { 483 address top = reserved_rgn->end(); 484 address high_base = addr + size; 485 ReservedMemoryRegion high_rgn(high_base, top - high_base, 486 *reserved_rgn->call_stack(), reserved_rgn->flag()); 487 488 // use original region for lower region 489 reserved_rgn->exclude_region(addr, top - addr); 490 LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn); 491 if (new_rgn == NULL) { 492 return false; 493 } else { 494 reserved_rgn->move_committed_regions(addr, *new_rgn->data()); 495 return true; 496 } 497 } 498 } 499 } 500 501 // Given an existing memory mapping registered with NMT, split the mapping in 502 // two. The newly created two mappings will be registered under the call 503 // stack and the memory flags of the original section. 504 bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split) { 505 506 ReservedMemoryRegion rgn(addr, size); 507 ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); 508 assert(reserved_rgn->same_region(addr, size), "Must be identical region"); 509 assert(reserved_rgn != NULL, "No reserved region"); 510 assert(reserved_rgn->committed_size() == 0, "Splitting committed region?"); 511 512 NativeCallStack original_stack = *reserved_rgn->call_stack(); 513 MEMFLAGS original_flags = reserved_rgn->flag(); 514 515 remove_released_region(addr, size); 516 517 // Now, create two new regions. 518 add_reserved_region(addr, split, original_stack, original_flags); 519 add_reserved_region(addr + split, size - split, original_stack, original_flags); 520 521 return true; 522 } 523 524 525 // Iterate the range, find committed region within its bound. 526 class RegionIterator : public StackObj { 527 private: 528 const address _start; 529 const size_t _size; 530 531 address _current_start; 532 size_t _current_size; 533 public: 534 RegionIterator(address start, size_t size) : 535 _start(start), _size(size), _current_start(start), _current_size(size) { 536 } 537 538 // return true if committed region is found 539 bool next_committed(address& start, size_t& size); 540 private: 541 address end() const { return _start + _size; } 542 }; 543 544 bool RegionIterator::next_committed(address& committed_start, size_t& committed_size) { 545 if (end() <= _current_start) return false; 546 547 const size_t page_sz = os::vm_page_size(); 548 assert(_current_start + _current_size == end(), "Must be"); 549 if (os::committed_in_range(_current_start, _current_size, committed_start, committed_size)) { 550 assert(committed_start != NULL, "Must be"); 551 assert(committed_size > 0 && is_aligned(committed_size, os::vm_page_size()), "Must be"); 552 553 size_t remaining_size = (_current_start + _current_size) - (committed_start + committed_size); 554 _current_start = committed_start + committed_size; 555 _current_size = remaining_size; 556 return true; 557 } else { 558 return false; 559 } 560 } 561 562 // Walk all known thread stacks, snapshot their committed ranges. 563 class SnapshotThreadStackWalker : public VirtualMemoryWalker { 564 public: 565 SnapshotThreadStackWalker() {} 566 567 bool do_allocation_site(const ReservedMemoryRegion* rgn) { 568 if (rgn->flag() == mtThreadStack) { 569 address stack_bottom = rgn->thread_stack_uncommitted_bottom(); 570 address committed_start; 571 size_t committed_size; 572 size_t stack_size = rgn->base() + rgn->size() - stack_bottom; 573 // Align the size to work with full pages (Alpine and AIX stack top is not page aligned) 574 size_t aligned_stack_size = align_up(stack_size, os::vm_page_size()); 575 576 ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn); 577 NativeCallStack ncs; // empty stack 578 579 RegionIterator itr(stack_bottom, aligned_stack_size); 580 DEBUG_ONLY(bool found_stack = false;) 581 while (itr.next_committed(committed_start, committed_size)) { 582 assert(committed_start != NULL, "Should not be null"); 583 assert(committed_size > 0, "Should not be 0"); 584 // unaligned stack_size case: correct the region to fit the actual stack_size 585 if (stack_bottom + stack_size < committed_start + committed_size) { 586 committed_size = stack_bottom + stack_size - committed_start; 587 } 588 region->add_committed_region(committed_start, committed_size, ncs); 589 DEBUG_ONLY(found_stack = true;) 590 } 591 #ifdef ASSERT 592 if (!found_stack) { 593 log_debug(thread)("Thread exited without proper cleanup, may leak thread object"); 594 } 595 #endif 596 } 597 return true; 598 } 599 }; 600 601 void VirtualMemoryTracker::snapshot_thread_stacks() { 602 SnapshotThreadStackWalker walker; 603 walk_virtual_memory(&walker); 604 } 605 606 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { 607 assert(_reserved_regions != NULL, "Sanity check"); 608 ThreadCritical tc; 609 // Check that the _reserved_regions haven't been deleted. 610 if (_reserved_regions != NULL) { 611 LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head(); 612 while (head != NULL) { 613 const ReservedMemoryRegion* rgn = head->peek(); 614 if (!walker->do_allocation_site(rgn)) { 615 return false; 616 } 617 head = head->next(); 618 } 619 } 620 return true; 621 } 622 623 // Transition virtual memory tracking level. 624 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { 625 assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything"); 626 if (to == NMT_minimal) { 627 assert(from == NMT_summary || from == NMT_detail, "Just check"); 628 // Clean up virtual memory tracking data structures. 629 ThreadCritical tc; 630 // Check for potential race with other thread calling transition 631 if (_reserved_regions != NULL) { 632 delete _reserved_regions; 633 _reserved_regions = NULL; 634 } 635 } 636 637 return true; 638 } 639 640 // Metaspace Support 641 MetaspaceSnapshot::MetaspaceSnapshot() { 642 for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) { 643 Metaspace::MetadataType type = (Metaspace::MetadataType)index; 644 assert_valid_metadata_type(type); 645 _reserved_in_bytes[type] = 0; 646 _committed_in_bytes[type] = 0; 647 _used_in_bytes[type] = 0; 648 _free_in_bytes[type] = 0; 649 } 650 } 651 652 void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) { 653 assert_valid_metadata_type(type); 654 655 mss._reserved_in_bytes[type] = MetaspaceUtils::reserved_bytes(type); 656 mss._committed_in_bytes[type] = MetaspaceUtils::committed_bytes(type); 657 mss._used_in_bytes[type] = MetaspaceUtils::used_bytes(type); 658 659 size_t free_in_bytes = (MetaspaceUtils::capacity_bytes(type) - MetaspaceUtils::used_bytes(type)) 660 + MetaspaceUtils::free_chunks_total_bytes(type) 661 + MetaspaceUtils::free_in_vs_bytes(type); 662 mss._free_in_bytes[type] = free_in_bytes; 663 } 664 665 void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) { 666 snapshot(Metaspace::ClassType, mss); 667 if (Metaspace::using_class_space()) { 668 snapshot(Metaspace::NonClassType, mss); 669 } 670 }