1 /*
   2  * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 
  41 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  42 
  43 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  44   return r1.compare(r2);
  45 }
  46 
  47 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  48   return r1.compare(r2);
  49 }
  50 
  51 static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  52   return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
  53 }
  54 
  55 static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
  56   // It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
  57   return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
  58 }
  59 
  60 static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
  61   LinkedListNode<CommittedMemoryRegion>* preceding = NULL;
  62 
  63   for (LinkedListNode<CommittedMemoryRegion>* node = from; node != NULL; node = node->next()) {
  64     CommittedMemoryRegion* rgn = node->data();
  65 
  66     // We searched past the region start.
  67     if (rgn->end() > addr) {
  68       break;
  69     }
  70 
  71     preceding = node;
  72   }
  73 
  74   return preceding;
  75 }
  76 
  77 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
  78   if (node != NULL) {
  79     CommittedMemoryRegion* rgn = node->data();
  80 
  81     if (is_mergeable_with(rgn, addr, size, stack)) {
  82       rgn->expand_region(addr, size);
  83       return true;
  84     }
  85   }
  86 
  87   return false;
  88 }
  89 
  90 static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
  91   if (other == NULL) {
  92     return false;
  93   }
  94 
  95   CommittedMemoryRegion* rgn = other->data();
  96   return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
  97 }
  98 
  99 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
 100   assert(addr != NULL, "Invalid address");
 101   assert(size > 0, "Invalid size");
 102   assert(contain_region(addr, size), "Not contain this region");
 103 
 104   // Find the region that fully precedes the [addr, addr + size) region.
 105   LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
 106   LinkedListNode<CommittedMemoryRegion>* next = (prev != NULL ? prev->next() : _committed_regions.head());
 107 
 108   if (next != NULL) {
 109     // Ignore request if region already exists.
 110     if (is_same_as(next->data(), addr, size, stack)) {
 111       return true;
 112     }
 113 
 114     // The new region is after prev, and either overlaps with the
 115     // next region (and maybe more regions), or overlaps with no region.
 116     if (next->data()->overlap_region(addr, size)) {
 117       // Remove _all_ overlapping regions, and parts of regions,
 118       // in preparation for the addition of this new region.
 119       remove_uncommitted_region(addr, size);
 120 
 121       // The remove could have split a region into two and created a
 122       // new prev region. Need to reset the prev and next pointers.
 123       prev = find_preceding_node_from((prev != NULL ? prev : _committed_regions.head()), addr);
 124       next = (prev != NULL ? prev->next() : _committed_regions.head());
 125     }
 126   }
 127 
 128   // At this point the previous overlapping regions have been
 129   // cleared, and the full region is guaranteed to be inserted.
 130   VirtualMemorySummary::record_committed_memory(size, flag());
 131 
 132   // Try to merge with prev and possibly next.
 133   if (try_merge_with(prev, addr, size, stack)) {
 134     if (try_merge_with(prev, next)) {
 135       // prev was expanded to contain the new region
 136       // and next, need to remove next from the list
 137       _committed_regions.remove_after(prev);
 138     }
 139 
 140     return true;
 141   }
 142 
 143   // Didn't merge with prev, try with next.
 144   if (try_merge_with(next, addr, size, stack)) {
 145     return true;
 146   }
 147 
 148   // Couldn't merge with any regions - create a new region.
 149   return add_committed_region(CommittedMemoryRegion(addr, size, stack));
 150 }
 151 
 152 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
 153   address addr, size_t size) {
 154   assert(addr != NULL, "Invalid address");
 155   assert(size > 0, "Invalid size");
 156 
 157   CommittedMemoryRegion* rgn = node->data();
 158   assert(rgn->contain_region(addr, size), "Has to be contained");
 159   assert(!rgn->same_region(addr, size), "Can not be the same region");
 160 
 161   if (rgn->base() == addr ||
 162       rgn->end() == addr + size) {
 163     rgn->exclude_region(addr, size);
 164     return true;
 165   } else {
 166     // split this region
 167     address top =rgn->end();
 168     // use this region for lower part
 169     size_t exclude_size = rgn->end() - addr;
 170     rgn->exclude_region(addr, exclude_size);
 171 
 172     // higher part
 173     address high_base = addr + size;
 174     size_t  high_size = top - high_base;
 175 
 176     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
 177     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
 178     assert(high_node == NULL || node->next() == high_node, "Should be right after");
 179     return (high_node != NULL);
 180   }
 181 
 182   return false;
 183 }
 184 
 185 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
 186   assert(addr != NULL, "Invalid address");
 187   assert(sz > 0, "Invalid size");
 188 
 189   CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
 190   address end = addr + sz;
 191 
 192   LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
 193   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 194   CommittedMemoryRegion* crgn;
 195 
 196   while (head != NULL) {
 197     crgn = head->data();
 198 
 199     if (crgn->same_region(addr, sz)) {
 200       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
 201       _committed_regions.remove_after(prev);
 202       return true;
 203     }
 204 
 205     // del_rgn contains crgn
 206     if (del_rgn.contain_region(crgn->base(), crgn->size())) {
 207       VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
 208       head = head->next();
 209       _committed_regions.remove_after(prev);
 210       continue;  // don't update head or prev
 211     }
 212 
 213     // Found addr in the current crgn. There are 2 subcases:
 214     if (crgn->contain_address(addr)) {
 215 
 216       // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
 217       if (crgn->contain_address(end - 1)) {
 218         VirtualMemorySummary::record_uncommitted_memory(sz, flag());
 219         return remove_uncommitted_region(head, addr, sz); // done!
 220       } else {
 221         // (2) Did not find del_rgn's end in crgn.
 222         size_t size = crgn->end() - del_rgn.base();
 223         crgn->exclude_region(addr, size);
 224         VirtualMemorySummary::record_uncommitted_memory(size, flag());
 225       }
 226 
 227     } else if (crgn->contain_address(end - 1)) {
 228       // Found del_rgn's end, but not its base addr.
 229       size_t size = del_rgn.end() - crgn->base();
 230       crgn->exclude_region(crgn->base(), size);
 231       VirtualMemorySummary::record_uncommitted_memory(size, flag());
 232       return true;  // should be done if the list is sorted properly!
 233     }
 234 
 235     prev = head;
 236     head = head->next();
 237   }
 238 
 239   return true;
 240 }
 241 
 242 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
 243   assert(addr != NULL, "Invalid address");
 244 
 245   // split committed regions
 246   LinkedListNode<CommittedMemoryRegion>* head =
 247     _committed_regions.head();
 248   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 249 
 250   while (head != NULL) {
 251     if (head->data()->base() >= addr) {
 252       break;
 253     }
 254     prev = head;
 255     head = head->next();
 256   }
 257 
 258   if (head != NULL) {
 259     if (prev != NULL) {
 260       prev->set_next(head->next());
 261     } else {
 262       _committed_regions.set_head(NULL);
 263     }
 264   }
 265 
 266   rgn._committed_regions.set_head(head);
 267 }
 268 
 269 size_t ReservedMemoryRegion::committed_size() const {
 270   size_t committed = 0;
 271   LinkedListNode<CommittedMemoryRegion>* head =
 272     _committed_regions.head();
 273   while (head != NULL) {
 274     committed += head->data()->size();
 275     head = head->next();
 276   }
 277   return committed;
 278 }
 279 
 280 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 281   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 282   if (flag() != f) {
 283     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 284     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 285     _flag = f;
 286   }
 287 }
 288 
 289 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 290   if (level >= NMT_summary) {
 291     VirtualMemorySummary::initialize();
 292   }
 293   return true;
 294 }
 295 
 296 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 297   if (level >= NMT_summary) {
 298     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 299       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 300     return (_reserved_regions != NULL);
 301   }
 302   return true;
 303 }
 304 
 305 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 306     const NativeCallStack& stack, MEMFLAGS flag) {
 307   assert(base_addr != NULL, "Invalid address");
 308   assert(size > 0, "Invalid size");
 309   assert(_reserved_regions != NULL, "Sanity check");
 310   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
 311   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 312 
 313   if (reserved_rgn == NULL) {
 314     VirtualMemorySummary::record_reserved_memory(size, flag);
 315     return _reserved_regions->add(rgn) != NULL;
 316   } else {
 317     if (reserved_rgn->same_region(base_addr, size)) {
 318       reserved_rgn->set_call_stack(stack);
 319       reserved_rgn->set_flag(flag);
 320       return true;
 321     } else if (reserved_rgn->adjacent_to(base_addr, size)) {
 322       VirtualMemorySummary::record_reserved_memory(size, flag);
 323       reserved_rgn->expand_region(base_addr, size);
 324       reserved_rgn->set_call_stack(stack);
 325       return true;
 326     } else {
 327       // Overlapped reservation.
 328       // It can happen when the regions are thread stacks, as JNI
 329       // thread does not detach from VM before exits, and leads to
 330       // leak JavaThread object
 331       if (reserved_rgn->flag() == mtThreadStack) {
 332         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
 333         // Overwrite with new region
 334 
 335         // Release old region
 336         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
 337         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
 338 
 339         // Add new region
 340         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
 341 
 342         *reserved_rgn = rgn;
 343         return true;
 344       }
 345 
 346       // CDS mapping region.
 347       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
 348       // NMT reports CDS as a whole.
 349       if (reserved_rgn->flag() == mtClassShared) {
 350         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
 351         return true;
 352       }
 353 
 354       // Mapped CDS string region.
 355       // The string region(s) is part of the java heap.
 356       if (reserved_rgn->flag() == mtJavaHeap) {
 357         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
 358         return true;
 359       }
 360 
 361       ShouldNotReachHere();
 362       return false;
 363     }
 364   }
 365 }
 366 
 367 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
 368   assert(addr != NULL, "Invalid address");
 369   assert(_reserved_regions != NULL, "Sanity check");
 370 
 371   ReservedMemoryRegion   rgn(addr, 1);
 372   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
 373   if (reserved_rgn != NULL) {
 374     assert(reserved_rgn->contain_address(addr), "Containment");
 375     if (reserved_rgn->flag() != flag) {
 376       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
 377       reserved_rgn->set_flag(flag);
 378     }
 379   }
 380 }
 381 
 382 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
 383   const NativeCallStack& stack) {
 384   assert(addr != NULL, "Invalid address");
 385   assert(size > 0, "Invalid size");
 386   assert(_reserved_regions != NULL, "Sanity check");
 387 
 388   ReservedMemoryRegion  rgn(addr, size);
 389   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 390 
 391   assert(reserved_rgn != NULL, "No reserved region");
 392   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 393   bool result = reserved_rgn->add_committed_region(addr, size, stack);
 394   return result;
 395 }
 396 
 397 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
 398   assert(addr != NULL, "Invalid address");
 399   assert(size > 0, "Invalid size");
 400   assert(_reserved_regions != NULL, "Sanity check");
 401 
 402   ReservedMemoryRegion  rgn(addr, size);
 403   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 404   assert(reserved_rgn != NULL, "No reserved region");
 405   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 406   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
 407   return result;
 408 }
 409 
 410 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
 411   assert(addr != NULL, "Invalid address");
 412   assert(size > 0, "Invalid size");
 413   assert(_reserved_regions != NULL, "Sanity check");
 414 
 415   ReservedMemoryRegion  rgn(addr, size);
 416   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 417 
 418   assert(reserved_rgn != NULL, "No reserved region");
 419 
 420   // uncommit regions within the released region
 421   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
 422     return false;
 423   }
 424 
 425   if (reserved_rgn->flag() == mtClassShared &&
 426       reserved_rgn->contain_region(addr, size) &&
 427       !reserved_rgn->same_region(addr, size)) {
 428     // This is an unmapped CDS region, which is part of the reserved shared
 429     // memory region.
 430     // See special handling in VirtualMemoryTracker::add_reserved_region also.
 431     return true;
 432   }
 433 
 434   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
 435 
 436   if (reserved_rgn->same_region(addr, size)) {
 437     return _reserved_regions->remove(rgn);
 438   } else {
 439     assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 440     if (reserved_rgn->base() == addr ||
 441         reserved_rgn->end() == addr + size) {
 442         reserved_rgn->exclude_region(addr, size);
 443       return true;
 444     } else {
 445       address top = reserved_rgn->end();
 446       address high_base = addr + size;
 447       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 448         *reserved_rgn->call_stack(), reserved_rgn->flag());
 449 
 450       // use original region for lower region
 451       reserved_rgn->exclude_region(addr, top - addr);
 452       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 453       if (new_rgn == NULL) {
 454         return false;
 455       } else {
 456         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 457         return true;
 458       }
 459     }
 460   }
 461 }
 462 
 463 
 464 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 465   assert(_reserved_regions != NULL, "Sanity check");
 466   ThreadCritical tc;
 467   // Check that the _reserved_regions haven't been deleted.
 468   if (_reserved_regions != NULL) {
 469     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 470     while (head != NULL) {
 471       const ReservedMemoryRegion* rgn = head->peek();
 472       if (!walker->do_allocation_site(rgn)) {
 473         return false;
 474       }
 475       head = head->next();
 476     }
 477    }
 478   return true;
 479 }
 480 
 481 // Transition virtual memory tracking level.
 482 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
 483   assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
 484   if (to == NMT_minimal) {
 485     assert(from == NMT_summary || from == NMT_detail, "Just check");
 486     // Clean up virtual memory tracking data structures.
 487     ThreadCritical tc;
 488     // Check for potential race with other thread calling transition
 489     if (_reserved_regions != NULL) {
 490       delete _reserved_regions;
 491       _reserved_regions = NULL;
 492     }
 493   }
 494 
 495   return true;
 496 }
 497 
 498 // Metaspace Support
 499 MetaspaceSnapshot::MetaspaceSnapshot() {
 500   for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) {
 501     Metaspace::MetadataType type = (Metaspace::MetadataType)index;
 502     assert_valid_metadata_type(type);
 503     _reserved_in_bytes[type]  = 0;
 504     _committed_in_bytes[type] = 0;
 505     _used_in_bytes[type]      = 0;
 506     _free_in_bytes[type]      = 0;
 507   }
 508 }
 509 
 510 void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) {
 511   assert_valid_metadata_type(type);
 512 
 513   mss._reserved_in_bytes[type]   = MetaspaceAux::reserved_bytes(type);
 514   mss._committed_in_bytes[type]  = MetaspaceAux::committed_bytes(type);
 515   mss._used_in_bytes[type]       = MetaspaceAux::used_bytes(type);
 516 
 517   size_t free_in_bytes = (MetaspaceAux::capacity_bytes(type) - MetaspaceAux::used_bytes(type))
 518                        + MetaspaceAux::free_chunks_total_bytes(type)
 519                        + MetaspaceAux::free_bytes(type);
 520   mss._free_in_bytes[type] = free_in_bytes;
 521 }
 522 
 523 void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) {
 524   snapshot(Metaspace::ClassType, mss);
 525   if (Metaspace::using_class_space()) {
 526     snapshot(Metaspace::NonClassType, mss);
 527   }
 528 }