1 /*
   2  * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 #include "precompiled.hpp"
  25 
  26 #include "runtime/threadCritical.hpp"
  27 #include "services/virtualMemoryTracker.hpp"
  28 
  29 size_t VirtualMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(VirtualMemorySnapshot, size_t)];
  30 
  31 void VirtualMemorySummary::initialize() {
  32   assert(sizeof(_snapshot) >= sizeof(VirtualMemorySnapshot), "Sanity Check");
  33   // Use placement operator new to initialize static data area.
  34   ::new ((void*)_snapshot) VirtualMemorySnapshot();
  35 }
  36 
  37 SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
  38 
  39 int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
  40   return r1.compare(r2);
  41 }
  42 
  43 int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
  44   return r1.compare(r2);
  45 }
  46 
  47 bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
  48   assert(addr != NULL, "Invalid address");
  49   assert(size > 0, "Invalid size");
  50   assert(contain_region(addr, size), "Not contain this region");
  51 
  52   if (all_committed()) return true;
  53 
  54   CommittedMemoryRegion committed_rgn(addr, size, stack);
  55   LinkedListNode<CommittedMemoryRegion>* node = _committed_regions.find_node(committed_rgn);
  56   if (node != NULL) {
  57     CommittedMemoryRegion* rgn = node->data();
  58     if (rgn->same_region(addr, size)) {
  59       return true;
  60     }
  61 
  62     if (rgn->adjacent_to(addr, size)) {
  63       // check if the next region covers this committed region,
  64       // the regions may not be merged due to different call stacks
  65       LinkedListNode<CommittedMemoryRegion>* next =
  66         node->next();
  67       if (next != NULL && next->data()->contain_region(addr, size)) {
  68         if (next->data()->same_region(addr, size)) {
  69           next->data()->set_call_stack(stack);
  70         }
  71         return true;
  72       }
  73       if (rgn->call_stack()->equals(stack)) {
  74         VirtualMemorySummary::record_uncommitted_memory(rgn->size(), flag());
  75         // the two adjacent regions have the same call stack, merge them
  76         rgn->expand_region(addr, size);
  77         VirtualMemorySummary::record_committed_memory(rgn->size(), flag());
  78         return true;
  79       }
  80       VirtualMemorySummary::record_committed_memory(size, flag());
  81       if (rgn->base() > addr) {
  82         return _committed_regions.insert_before(committed_rgn, node) != NULL;
  83       } else {
  84         return _committed_regions.insert_after(committed_rgn, node) != NULL;
  85       }
  86     }
  87     assert(rgn->contain_region(addr, size), "Must cover this region");
  88     return true;
  89   } else {
  90     // New committed region
  91     VirtualMemorySummary::record_committed_memory(size, flag());
  92     return add_committed_region(committed_rgn);
  93   }
  94 }
  95 
  96 void ReservedMemoryRegion::set_all_committed(bool b) {
  97   if (all_committed() != b) {
  98     _all_committed = b;
  99     if (b) {
 100       VirtualMemorySummary::record_committed_memory(size(), flag());
 101     }
 102   }
 103 }
 104 
 105 bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
 106   address addr, size_t size) {
 107   assert(addr != NULL, "Invalid address");
 108   assert(size > 0, "Invalid size");
 109 
 110   CommittedMemoryRegion* rgn = node->data();
 111   assert(rgn->contain_region(addr, size), "Has to be contained");
 112   assert(!rgn->same_region(addr, size), "Can not be the same region");
 113 
 114   if (rgn->base() == addr ||
 115       rgn->end() == addr + size) {
 116     rgn->exclude_region(addr, size);
 117     return true;
 118   } else {
 119     // split this region
 120     address top =rgn->end();
 121     // use this region for lower part
 122     size_t exclude_size = rgn->end() - addr;
 123     rgn->exclude_region(addr, exclude_size);
 124 
 125     // higher part
 126     address high_base = addr + size;
 127     size_t  high_size = top - high_base;
 128 
 129     CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
 130     LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
 131     assert(high_node == NULL || node->next() == high_node, "Should be right after");
 132     return (high_node != NULL);
 133   }
 134 
 135   return false;
 136 }
 137 
 138 bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
 139   // uncommit stack guard pages
 140   if (flag() == mtThreadStack && !same_region(addr, sz)) {
 141     return true;
 142   }
 143 
 144   assert(addr != NULL, "Invalid address");
 145   assert(sz > 0, "Invalid size");
 146 
 147   if (all_committed()) {
 148     assert(_committed_regions.is_empty(), "Sanity check");
 149     assert(contain_region(addr, sz), "Reserved region does not contain this region");
 150     set_all_committed(false);
 151     VirtualMemorySummary::record_uncommitted_memory(sz, flag());
 152     if (same_region(addr, sz)) {
 153       return true;
 154     } else {
 155       CommittedMemoryRegion rgn(base(), size(), *call_stack());
 156       if (rgn.base() == addr || rgn.end() == (addr + sz)) {
 157         rgn.exclude_region(addr, sz);
 158         return add_committed_region(rgn);
 159       } else {
 160         // split this region
 161         // top of the whole region
 162         address top =rgn.end();
 163         // use this region for lower part
 164         size_t exclude_size = rgn.end() - addr;
 165         rgn.exclude_region(addr, exclude_size);
 166         if (add_committed_region(rgn)) {
 167           // higher part
 168           address high_base = addr + sz;
 169           size_t  high_size = top - high_base;
 170           CommittedMemoryRegion high_rgn(high_base, high_size, NativeCallStack::EMPTY_STACK);
 171           return add_committed_region(high_rgn);
 172         } else {
 173           return false;
 174         }
 175       }
 176     }
 177   } else {
 178     // we have to walk whole list to remove the committed regions in
 179     // specified range
 180     LinkedListNode<CommittedMemoryRegion>* head =
 181       _committed_regions.head();
 182     LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 183     VirtualMemoryRegion uncommitted_rgn(addr, sz);
 184 
 185     while (head != NULL && !uncommitted_rgn.is_empty()) {
 186       CommittedMemoryRegion* crgn = head->data();
 187       // this committed region overlaps to region to uncommit
 188       if (crgn->overlap_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
 189         if (crgn->same_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
 190           // find matched region, remove the node will do
 191           VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
 192           _committed_regions.remove_after(prev);
 193           return true;
 194         } else if (crgn->contain_region(uncommitted_rgn.base(), uncommitted_rgn.size())) {
 195           // this committed region contains whole uncommitted region
 196           VirtualMemorySummary::record_uncommitted_memory(uncommitted_rgn.size(), flag());
 197           return remove_uncommitted_region(head, uncommitted_rgn.base(), uncommitted_rgn.size());
 198         } else if (uncommitted_rgn.contain_region(crgn->base(), crgn->size())) {
 199           // this committed region has been uncommitted
 200           size_t exclude_size = crgn->end() - uncommitted_rgn.base();
 201           uncommitted_rgn.exclude_region(uncommitted_rgn.base(), exclude_size);
 202           VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag());
 203           LinkedListNode<CommittedMemoryRegion>* tmp = head;
 204           head = head->next();
 205           _committed_regions.remove_after(prev);
 206           continue;
 207         } else if (crgn->contain_address(uncommitted_rgn.base())) {
 208           size_t toUncommitted = crgn->end() - uncommitted_rgn.base();
 209           crgn->exclude_region(uncommitted_rgn.base(), toUncommitted);
 210           uncommitted_rgn.exclude_region(uncommitted_rgn.base(), toUncommitted);
 211           VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
 212         } else if (uncommitted_rgn.contain_address(crgn->base())) {
 213           size_t toUncommitted = uncommitted_rgn.end() - crgn->base();
 214           crgn->exclude_region(crgn->base(), toUncommitted);
 215           uncommitted_rgn.exclude_region(uncommitted_rgn.end() - toUncommitted,
 216             toUncommitted);
 217           VirtualMemorySummary::record_uncommitted_memory(toUncommitted, flag());
 218         }
 219       }
 220       prev = head;
 221       head = head->next();
 222     }
 223   }
 224 
 225   return true;
 226 }
 227 
 228 void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
 229   assert(addr != NULL, "Invalid address");
 230 
 231   // split committed regions
 232   LinkedListNode<CommittedMemoryRegion>* head =
 233     _committed_regions.head();
 234   LinkedListNode<CommittedMemoryRegion>* prev = NULL;
 235 
 236   while (head != NULL) {
 237     if (head->data()->base() >= addr) {
 238       break;
 239     }
 240     prev = head;
 241     head = head->next();
 242   }
 243 
 244   if (head != NULL) {
 245     if (prev != NULL) {
 246       prev->set_next(head->next());
 247     } else {
 248       _committed_regions.set_head(NULL);
 249     }
 250   }
 251 
 252   rgn._committed_regions.set_head(head);
 253 }
 254 
 255 size_t ReservedMemoryRegion::committed_size() const {
 256   if (all_committed()) {
 257     return size();
 258   } else {
 259     size_t committed = 0;
 260     LinkedListNode<CommittedMemoryRegion>* head =
 261       _committed_regions.head();
 262     while (head != NULL) {
 263       committed += head->data()->size();
 264       head = head->next();
 265     }
 266     return committed;
 267   }
 268 }
 269 
 270 void ReservedMemoryRegion::set_flag(MEMFLAGS f) {
 271   assert((flag() == mtNone || flag() == f), "Overwrite memory type");
 272   if (flag() != f) {
 273     VirtualMemorySummary::move_reserved_memory(flag(), f, size());
 274     VirtualMemorySummary::move_committed_memory(flag(), f, committed_size());
 275     _flag = f;
 276   }
 277 }
 278 
 279 bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
 280   if (level >= NMT_summary) {
 281     VirtualMemorySummary::initialize();
 282   }
 283   return true;
 284 }
 285 
 286 bool VirtualMemoryTracker::late_initialize(NMT_TrackingLevel level) {
 287   if (level >= NMT_summary) {
 288     _reserved_regions = new (std::nothrow, ResourceObj::C_HEAP, mtNMT)
 289       SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
 290     return (_reserved_regions != NULL);
 291   }
 292   return true;
 293 }
 294 
 295 bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
 296    const NativeCallStack& stack, MEMFLAGS flag, bool all_committed) {
 297   assert(base_addr != NULL, "Invalid address");
 298   assert(size > 0, "Invalid size");
 299   assert(_reserved_regions != NULL, "Sanity check");
 300   ReservedMemoryRegion  rgn(base_addr, size, stack, flag);
 301   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 302   LinkedListNode<ReservedMemoryRegion>* node;
 303   if (reserved_rgn == NULL) {
 304     VirtualMemorySummary::record_reserved_memory(size, flag);
 305     node = _reserved_regions->add(rgn);
 306     if (node != NULL) {
 307       node->data()->set_all_committed(all_committed);
 308       return true;
 309     } else {
 310       return false;
 311     }
 312   } else {
 313     if (reserved_rgn->same_region(base_addr, size)) {
 314       reserved_rgn->set_call_stack(stack);
 315       reserved_rgn->set_flag(flag);
 316       return true;
 317     } else if (reserved_rgn->adjacent_to(base_addr, size)) {
 318       VirtualMemorySummary::record_reserved_memory(size, flag);
 319       reserved_rgn->expand_region(base_addr, size);
 320       reserved_rgn->set_call_stack(stack);
 321       return true;
 322     } else {
 323       // Overlapped reservation.
 324       // It can happen when the regions are thread stacks, as JNI
 325       // thread does not detach from VM before exits, and leads to
 326       // leak JavaThread object
 327       if (reserved_rgn->flag() == mtThreadStack) {
 328         guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
 329         // Overwrite with new region
 330 
 331         // Release old region
 332         VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag());
 333         VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag());
 334 
 335         // Add new region
 336         VirtualMemorySummary::record_reserved_memory(rgn.size(), flag);
 337 
 338         *reserved_rgn = rgn;
 339         return true;
 340       }
 341 
 342       // CDS mapping region.
 343       // CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
 344       // NMT reports CDS as a whole.
 345       if (reserved_rgn->flag() == mtClassShared) {
 346         assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
 347         return true;
 348       }
 349 
 350       // Mapped CDS string region.
 351       // The string region(s) is part of the java heap.
 352       if (reserved_rgn->flag() == mtJavaHeap) {
 353         assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
 354         return true;
 355       }
 356 
 357       ShouldNotReachHere();
 358       return false;
 359     }
 360   }
 361 }
 362 
 363 void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) {
 364   assert(addr != NULL, "Invalid address");
 365   assert(_reserved_regions != NULL, "Sanity check");
 366 
 367   ReservedMemoryRegion   rgn(addr, 1);
 368   ReservedMemoryRegion*  reserved_rgn = _reserved_regions->find(rgn);
 369   if (reserved_rgn != NULL) {
 370     assert(reserved_rgn->contain_address(addr), "Containment");
 371     if (reserved_rgn->flag() != flag) {
 372       assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
 373       reserved_rgn->set_flag(flag);
 374     }
 375   }
 376 }
 377 
 378 bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
 379   const NativeCallStack& stack) {
 380   assert(addr != NULL, "Invalid address");
 381   assert(size > 0, "Invalid size");
 382   assert(_reserved_regions != NULL, "Sanity check");
 383 
 384   ReservedMemoryRegion  rgn(addr, size);
 385   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 386 
 387   assert(reserved_rgn != NULL, "No reserved region");
 388   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 389   return reserved_rgn->add_committed_region(addr, size, stack);
 390 }
 391 
 392 bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
 393   assert(addr != NULL, "Invalid address");
 394   assert(size > 0, "Invalid size");
 395   assert(_reserved_regions != NULL, "Sanity check");
 396 
 397   ReservedMemoryRegion  rgn(addr, size);
 398   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 399   assert(reserved_rgn != NULL, "No reserved region");
 400   assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 401   return reserved_rgn->remove_uncommitted_region(addr, size);
 402 }
 403 
 404 bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
 405   assert(addr != NULL, "Invalid address");
 406   assert(size > 0, "Invalid size");
 407   assert(_reserved_regions != NULL, "Sanity check");
 408 
 409   ReservedMemoryRegion  rgn(addr, size);
 410   ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
 411 
 412   assert(reserved_rgn != NULL, "No reserved region");
 413 
 414   // uncommit regions within the released region
 415   if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
 416     return false;
 417   }
 418 
 419   if (reserved_rgn->flag() == mtClassShared &&
 420       reserved_rgn->contain_region(addr, size) &&
 421       !reserved_rgn->same_region(addr, size)) {
 422     // This is an unmapped CDS region, which is part of the reserved shared
 423     // memory region.
 424     // See special handling in VirtualMemoryTracker::add_reserved_region also.
 425     return true;
 426   }
 427  
 428   VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag());
 429 
 430   if (reserved_rgn->same_region(addr, size)) {
 431     return _reserved_regions->remove(rgn);
 432   } else {
 433     assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
 434     if (reserved_rgn->base() == addr ||
 435         reserved_rgn->end() == addr + size) {
 436         reserved_rgn->exclude_region(addr, size);
 437       return true;
 438     } else {
 439       address top = reserved_rgn->end();
 440       address high_base = addr + size;
 441       ReservedMemoryRegion high_rgn(high_base, top - high_base,
 442         *reserved_rgn->call_stack(), reserved_rgn->flag());
 443 
 444       // use original region for lower region
 445       reserved_rgn->exclude_region(addr, top - addr);
 446       LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
 447       if (new_rgn == NULL) {
 448         return false;
 449       } else {
 450         reserved_rgn->move_committed_regions(addr, *new_rgn->data());
 451         return true;
 452       }
 453     }
 454   }
 455 }
 456 
 457 
 458 bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
 459   assert(_reserved_regions != NULL, "Sanity check");
 460   ThreadCritical tc;
 461   // Check that the _reserved_regions haven't been deleted.
 462   if (_reserved_regions != NULL) {
 463     LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
 464     while (head != NULL) {
 465       const ReservedMemoryRegion* rgn = head->peek();
 466       if (!walker->do_allocation_site(rgn)) {
 467         return false;
 468       }
 469       head = head->next();
 470     }
 471    }
 472   return true;
 473 }
 474 
 475 // Transition virtual memory tracking level.
 476 bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
 477   assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
 478   if (to == NMT_minimal) {
 479     assert(from == NMT_summary || from == NMT_detail, "Just check");
 480     // Clean up virtual memory tracking data structures.
 481     ThreadCritical tc;
 482     // Check for potential race with other thread calling transition
 483     if (_reserved_regions != NULL) {
 484       delete _reserved_regions;
 485       _reserved_regions = NULL;
 486     }
 487   }
 488 
 489   return true;
 490 }
 491 
 492