1 /*
   2  * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/metaspace.hpp"
  27 #include "runtime/atomic.hpp"
  28 #include "runtime/os.hpp"
  29 #include "runtime/threadCritical.hpp"
  30 #include "services/memTracker.hpp"
  31 #include "services/virtualMemoryTracker.hpp"
  32 
  33 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  34 
  35 void VirtualMemorySummary::initialize() {
  36   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  37   // Use placement operator new to initialize static data area.
  38   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  39 }
  40 
  41 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  42 
  43 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  44   return r1.compare(r2);
  45 }
  46 
  47 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  48   return r1.compare(r2);
  49 }
  50 
  51 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
  52   assert(addr != NULL, "Invalid address");
  53   assert(size > 0, "Invalid size");
  54   assert(contain_region(addr, size), "Not contain this region");
  55 
  56   if (all_committed()) return true;
  57 
  58   CommittedMemoryRegion committed_rgn(addr, size, stack);
  59   LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.head();
  60 
  61   while (node != NULL) {
  62     CommittedMemoryRegion* rgn = node->data();
  63     if (rgn->same_region(addr, size)) {
  64       return true;
  65     }
  66 
  67     if (rgn->adjacent_to(addr, size)) {
  68       // special case to expand prior region if there is no next region
  69       LinkedListNode<CommittedMemoryRegion>* next = node->next();
  70       if (next == NULL && rgn->call_stack()->equals(stack)) {
  71         VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
  72         // the two adjacent regions have the same call stack, merge them
  73         rgn->expand_region(addr, size);
  74         VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
  75         return true;
  76       }
  77       }
  78 
  79     if (rgn->overlap_region(addr, size)) {
  80       // Clear a space for this region in the case it overlaps with any regions.
  81       remove_uncommitted_region(addr, size);
  82       break;  // commit below
  83     }
  84     if (rgn->end() >= addr + size){
  85       break;
  86     }
  87     node = node->next();
  88   }
  89 
  90     // New committed region
  91     VirtualMemorySummary::record_committed_memory(size, flag());
  92     return add_committed_region(committed_rgn);
  93   }
  94 
  95 void ReservedMemoryRegion::set_all_committed(bool b) {
  96   if (all_committed() != b) {
  97     _all_committed = b;
  98     if (b) {
  99       VirtualMemorySummary::record_committed_memory(size(), flag());
 100     }
 101   }
 102 }
 103 
 104 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
 105   address addr, size_t size) {
 106   assert(addr != NULL, "Invalid address");
 107   assert(size > 0, "Invalid size");
 108 
 109   CommittedMemoryRegion* rgn = node->data();
 110   assert(rgn->contain_region(addr, size), "Has to be contained");
 111   assert(!rgn->same_region(addr, size), "Can not be the same region");
 112 
 113   if (rgn->base() == addr ||
 114       rgn->end() == addr + size) {
 115     rgn->exclude_region(addr, size);
 116     return true;
 117   } else {
 118     // split this region
 119     address top =rgn->end();
 120     // use this region for lower part
 121     size_t exclude_size = rgn->end() - addr;
 122     rgn->exclude_region(addr, exclude_size);
 123 
 124     // higher part
 125     address high_base = addr + size;
 126     size_t  high_size = top - high_base;
 127 
 128     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
 129     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
 130     assert(high_node == NULL || node->next() == high_node, "Should be right after");
 131     return (high_node != NULL);
 132   }
 133 
 134   return false;
 135 }
 136 
 137 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
 138   // uncommit stack guard pages
 139   if (flag() == mtThreadStack && !same_region(addr, sz)) {
 140     return true;
 141   }
 142 
 143   assert(addr != NULL, "Invalid address");
 144   assert(sz > 0, "Invalid size");
 145 
 146   if (all_committed()) {
 147     assert(_committed_regions.is_empty(), "Sanity check");
 148     assert(contain_region(addr, sz), "Reserved region does not contain this region");
 149     set_all_committed(false);
 150     VirtualMemorySummary::record_uncommitted_memory(sz, flag());
 151     if (same_region(addr, sz)) {
 152       return true;
 153     } else {
 154       CommittedMemoryRegion rgn(base(), size(), *call_stack());
 155       if (rgn.base() == addr || rgn.end() == (addr + sz)) {
 156         rgn.exclude_region(addr, sz);
 157         return add_committed_region(rgn);
 158       } else {
 159         // split this region
 160         // top of the whole region
 161         address top =rgn.end();
 162         // use this region for lower part
 163         size_t exclude_size = rgn.end() - addr;
 164         rgn.exclude_region(addr, exclude_size);
 165         if (add_committed_region(rgn)) {
 166           // higher part
 167           address high_base = addr + sz;
 168           size_t  high_size = top - high_base;
 169           CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK);
 170           return add_committed_region(high_rgn);
 171         } else {
 172           return false;
 173         }
 174       }
 175     }
 176   } else {
 177     CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
 178     address end = addr + sz;
 179 
 180     LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
 181     LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 182     CommittedMemoryRegion* crgn;
 183 
 184     while (head != NULL) {
 185       crgn = head->data();
 186 
 187       if (crgn->same_region(addr, sz)) {
 188         VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
 189           _committed_regions.remove_after(prev);
 190           return true;
 191       }
 192 
 193       // del_rgn contains crgn
 194       if (del_rgn.contain_region(crgn->base(), crgn->size())) {
 195           VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
 196           head = head->next();
 197           _committed_regions.remove_after(prev);
 198         continue;  // don't update head or prev
 199         }
 200 
 201       // Found addr in the current crgn. There are 2 subcases:
 202       if (crgn->contain_address(addr)) {
 203 
 204         // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
 205         if (crgn->contain_address(end - 1)) {
 206           VirtualMemorySummary::record_uncommitted_memory(sz, flag());
 207           return remove_uncommitted_region(head, addr, sz); // done!
 208         } else {
 209           // (2) Did not find del_rgn's end in crgn.
 210           size_t size = crgn->end() - del_rgn.base();
 211           crgn->exclude_region(addr, size);
 212           VirtualMemorySummary::record_uncommitted_memory(size, flag());
 213       }
 214 
 215       } else if (crgn->contain_address(end - 1)) {
 216       // Found del_rgn's end, but not its base addr.
 217         size_t size = del_rgn.end() - crgn->base();
 218         crgn->exclude_region(crgn->base(), size);
 219         VirtualMemorySummary::record_uncommitted_memory(size, flag());
 220         return true;  // should be done if the list is sorted properly!
 221       }
 222 
 223       prev = head;
 224       head = head->next();
 225     }
 226   }
 227 
 228   return true;
 229 }
 230 
 231 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
 232   assert(addr != NULL, "Invalid address");
 233 
 234   // split committed regions
 235   LinkedListNode<CommittedMemoryRegion>* head =
 236     _committed_regions.head();
 237   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 238 
 239   while (head != NULL) {
 240     if (head->data()->base() >= addr) {
 241       break;
 242     }
 243     prev = head;
 244     head = head->next();
 245   }
 246 
 247   if (head != NULL) {
 248     if (prev != NULL) {
 249       prev->set_next(head->next());
 250     } else {
 251       _committed_regions.set_head(NULL);
 252     }
 253   }
 254 
 255   rgn._committed_regions.set_head(head);
 256 }
 257 
 258 size_t ReservedMemoryRegion::committed_size() const {
 259   if (all_committed()) {
 260     return size();
 261   } else {
 262     size_t committed = 0;
 263     LinkedListNode<CommittedMemoryRegion>* head =
 264       _committed_regions.head();
 265     while (head != NULL) {
 266       committed += head->data()->size();
 267       head = head->next();
 268     }
 269     return committed;
 270   }
 271 }
 272 
 273 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 274   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 275   if (flag() != f) {
 276     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 277     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 278     _flag = f;
 279   }
 280 }
 281 
 282 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 283   if (level >= NMT_summary) {
 284     VirtualMemorySummary::initialize();
 285   }
 286   return true;
 287 }
 288 
 289 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 290   if (level >= NMT_summary) {
 291     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 292       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 293     return (_reserved_regions != NULL);
 294   }
 295   return true;
 296 }
 297 
 298 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 299    const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
 300   assert(base_addr != NULL, "Invalid address");
 301   assert(size > 0, "Invalid size");
 302   assert(_reserved_regions != NULL, "Sanity check");
 303   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
 304   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 305   LinkedListNode<ReservedMemoryRegion>* node;
 306   if (reserved_rgn == NULL) {
 307     VirtualMemorySummary::record_reserved_memory(size, flag);
 308     node = _reserved_regions->add(rgn);
 309     if (node != NULL) {
 310       node->data()->set_all_committed(all_committed);
 311       return true;
 312     } else {
 313       return false;
 314     }
 315   } else {
 316     if (reserved_rgn->same_region(base_addr, size)) {
 317       reserved_rgn->set_call_stack(stack);
 318       reserved_rgn->set_flag(flag);
 319       return true;
 320     } else if (reserved_rgn->adjacent_to(base_addr, size)) {
 321       VirtualMemorySummary::record_reserved_memory(size, flag);
 322       reserved_rgn->expand_region(base_addr, size);
 323       reserved_rgn->set_call_stack(stack);
 324       return true;
 325     } else {
 326       // Overlapped reservation.
 327       // It can happen when the regions are thread stacks, as JNI
 328       // thread does not detach from VM before exits, and leads to
 329       // leak JavaThread object
 330       if (reserved_rgn->flag() == mtThreadStack) {
 331         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
 332         // Overwrite with new region
 333 
 334         // Release old region
 335         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
 336         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
 337 
 338         // Add new region
 339         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
 340 
 341         *reserved_rgn = rgn;
 342         return true;
 343       }
 344 
 345       // CDS mapping region.
 346       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
 347       // NMT reports CDS as a whole.
 348       if (reserved_rgn->flag() == mtClassShared) {
 349         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
 350         return true;
 351       }
 352 
 353       // Mapped CDS string region.
 354       // The string region(s) is part of the java heap.
 355       if (reserved_rgn->flag() == mtJavaHeap) {
 356         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
 357         return true;
 358       }
 359 
 360       ShouldNotReachHere();
 361       return false;
 362     }
 363   }
 364 }
 365 
 366 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
 367   assert(addr != NULL, "Invalid address");
 368   assert(_reserved_regions != NULL, "Sanity check");
 369 
 370   ReservedMemoryRegion   rgn(addr, 1);
 371   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
 372   if (reserved_rgn != NULL) {
 373     assert(reserved_rgn->contain_address(addr), "Containment");
 374     if (reserved_rgn->flag() != flag) {
 375       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
 376       reserved_rgn->set_flag(flag);
 377     }
 378   }
 379 }
 380 
 381 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
 382   const NativeCallStack& stack) {
 383   assert(addr != NULL, "Invalid address");
 384   assert(size > 0, "Invalid size");
 385   assert(_reserved_regions != NULL, "Sanity check");
 386 
 387   ReservedMemoryRegion  rgn(addr, size);
 388   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 389 
 390   assert(reserved_rgn != NULL, "No reserved region");
 391   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 392   bool result = reserved_rgn->add_committed_region(addr, size, stack);
 393   return result;
 394 }
 395 
 396 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
 397   assert(addr != NULL, "Invalid address");
 398   assert(size > 0, "Invalid size");
 399   assert(_reserved_regions != NULL, "Sanity check");
 400 
 401   ReservedMemoryRegion  rgn(addr, size);
 402   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 403   assert(reserved_rgn != NULL, "No reserved region");
 404   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 405   bool result = reserved_rgn->remove_uncommitted_region(addr, size);
 406   return result;
 407 }
 408 
 409 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
 410   assert(addr != NULL, "Invalid address");
 411   assert(size > 0, "Invalid size");
 412   assert(_reserved_regions != NULL, "Sanity check");
 413 
 414   ReservedMemoryRegion  rgn(addr, size);
 415   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 416 
 417   assert(reserved_rgn != NULL, "No reserved region");
 418 
 419   // uncommit regions within the released region
 420   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
 421     return false;
 422   }
 423 
 424   if (reserved_rgn->flag() == mtClassShared &&
 425       reserved_rgn->contain_region(addr, size) &&
 426       !reserved_rgn->same_region(addr, size)) {
 427     // This is an unmapped CDS region, which is part of the reserved shared
 428     // memory region.
 429     // See special handling in VirtualMemoryTracker::add_reserved_region also.
 430     return true;
 431   }
 432 
 433   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
 434 
 435   if (reserved_rgn->same_region(addr, size)) {
 436     return _reserved_regions->remove(rgn);
 437   } else {
 438     assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 439     if (reserved_rgn->base() == addr ||
 440         reserved_rgn->end() == addr + size) {
 441         reserved_rgn->exclude_region(addr, size);
 442       return true;
 443     } else {
 444       address top = reserved_rgn->end();
 445       address high_base = addr + size;
 446       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 447         *reserved_rgn->call_stack(), reserved_rgn->flag());
 448 
 449       // use original region for lower region
 450       reserved_rgn->exclude_region(addr, top - addr);
 451       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 452       if (new_rgn == NULL) {
 453         return false;
 454       } else {
 455         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 456         return true;
 457       }
 458     }
 459   }
 460 }
 461 
 462 
 463 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 464   assert(_reserved_regions != NULL, "Sanity check");
 465   ThreadCritical tc;
 466   // Check that the _reserved_regions haven't been deleted.
 467   if (_reserved_regions != NULL) {
 468     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 469     while (head != NULL) {
 470       const ReservedMemoryRegion* rgn = head->peek();
 471       if (!walker->do_allocation_site(rgn)) {
 472         return false;
 473       }
 474       head = head->next();
 475     }
 476    }
 477   return true;
 478 }
 479 
 480 // Transition virtual memory tracking level.
 481 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
 482   assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
 483   if (to == NMT_minimal) {
 484     assert(from == NMT_summary || from == NMT_detail, "Just check");
 485     // Clean up virtual memory tracking data structures.
 486     ThreadCritical tc;
 487     // Check for potential race with other thread calling transition
 488     if (_reserved_regions != NULL) {
 489       delete _reserved_regions;
 490       _reserved_regions = NULL;
 491     }
 492   }
 493 
 494   return true;
 495 }
 496 
 497 // Metaspace Support
 498 MetaspaceSnapshot::MetaspaceSnapshot() {
 499   for (int index = (int)Metaspace::ClassType; index < (int)Metaspace::MetadataTypeCount; index ++) {
 500     Metaspace::MetadataType type = (Metaspace::MetadataType)index;
 501     assert_valid_metadata_type(type);
 502     _reserved_in_bytes[type]  = 0;
 503     _committed_in_bytes[type] = 0;
 504     _used_in_bytes[type]      = 0;
 505     _free_in_bytes[type]      = 0;
 506   }
 507 }
 508 
 509 void MetaspaceSnapshot::snapshot(Metaspace::MetadataType type, MetaspaceSnapshot& mss) {
 510   assert_valid_metadata_type(type);
 511 
 512   mss._reserved_in_bytes[type]   = MetaspaceAux::reserved_bytes(type);
 513   mss._committed_in_bytes[type]  = MetaspaceAux::committed_bytes(type);
 514   mss._used_in_bytes[type]       = MetaspaceAux::used_bytes(type);
 515 
 516   size_t free_in_bytes = (MetaspaceAux::capacity_bytes(type) - MetaspaceAux::used_bytes(type))
 517                        + MetaspaceAux::free_chunks_total_bytes(type)
 518                        + MetaspaceAux::free_bytes(type);
 519   mss._free_in_bytes[type] = free_in_bytes;
 520 }
 521 
 522 void MetaspaceSnapshot::snapshot(MetaspaceSnapshot& mss) {
 523   snapshot(Metaspace::ClassType, mss);
 524   if (Metaspace::using_class_space()) {
 525     snapshot(Metaspace::NonClassType, mss);
 526   }
 527 }