1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 ReservedSpace::ReservedSpace(size_t size) {
  46   initialize(size, 0, false, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 char *
  64 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  65                                      const size_t prefix_size,
  66                                      const size_t prefix_align,
  67                                      const size_t suffix_size,
  68                                      const size_t suffix_align)
  69 {
  70   assert(addr != NULL, "sanity");
  71   const size_t required_size = prefix_size + suffix_size;
  72   assert(len >= required_size, "len too small");
  73 
  74   const size_t s = size_t(addr);
  75   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  76   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  77 
  78   if (len < beg_delta + required_size) {
  79      return NULL; // Cannot do proper alignment.
  80   }
  81   const size_t end_delta = len - (beg_delta + required_size);
  82 
  83   if (beg_delta != 0) {
  84     os::release_memory(addr, beg_delta);
  85   }
  86 
  87   if (end_delta != 0) {
  88     char* release_addr = (char*) (s + beg_delta + required_size);
  89     os::release_memory(release_addr, end_delta);
  90   }
  91 
  92   return (char*) (s + beg_delta);
  93 }
  94 
  95 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  96                                        const size_t prefix_size,
  97                                        const size_t prefix_align,
  98                                        const size_t suffix_size,
  99                                        const size_t suffix_align)
 100 {
 101   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 102 
 103   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 104   if (raw_addr == NULL) return NULL;
 105 
 106   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 107                                        prefix_align, suffix_size,
 108                                        suffix_align);
 109   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 110     fatal("os::release_memory failed");
 111   }
 112 
 113 #ifdef ASSERT
 114   if (result != NULL) {
 115     const size_t raw = size_t(raw_addr);
 116     const size_t res = size_t(result);
 117     assert(res >= raw, "alignment decreased start addr");
 118     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 119            "alignment increased end addr");
 120     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 121     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 122            "bad alignment of suffix");
 123   }
 124 #endif
 125 
 126   return result;
 127 }
 128 
 129 // Helper method.
 130 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 131                                            const size_t size, bool special)
 132 {
 133   if (base == requested_address || requested_address == NULL)
 134     return false; // did not fail
 135 
 136   if (base != NULL) {
 137     // Different reserve address may be acceptable in other cases
 138     // but for compressed oops heap should be at requested address.
 139     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 140     if (PrintCompressedOopsMode) {
 141       tty->cr();
 142       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 143     }
 144     // OS ignored requested address. Try different address.
 145     if (special) {
 146       if (!os::release_memory_special(base, size)) {
 147         fatal("os::release_memory_special failed");
 148       }
 149     } else {
 150       if (!os::release_memory(base, size)) {
 151         fatal("os::release_memory failed");
 152       }
 153     }
 154   }
 155   return true;
 156 }
 157 
 158 ReservedSpace::ReservedSpace(const size_t suffix_size,
 159                              const size_t suffix_align,
 160                              char* requested_address,
 161                              const size_t noaccess_prefix)
 162 {
 163   assert(suffix_size != 0, "sanity");
 164   assert(suffix_align != 0, "sanity");
 165   assert((suffix_size & (suffix_align - 1)) == 0,
 166     "suffix_size not divisible by suffix_align");
 167 
 168   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 169   // Add in noaccess_prefix to prefix
 170   const size_t adjusted_prefix_size = noaccess_prefix;
 171   const size_t size = adjusted_prefix_size + suffix_size;
 172 
 173   // On systems where the entire region has to be reserved and committed up
 174   // front, the compound alignment normally done by this method is unnecessary.
 175   const bool try_reserve_special = UseLargePages &&
 176     suffix_align == os::large_page_size();
 177   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 178     initialize(size, suffix_align, true, requested_address, noaccess_prefix,
 179                false);
 180     return;
 181   }
 182 
 183   _base = NULL;
 184   _size = 0;
 185   _alignment = 0;
 186   _special = false;
 187   _noaccess_prefix = 0;
 188   _executable = false;
 189 
 190   // Optimistically try to reserve the exact size needed.
 191   char* addr;
 192   if (requested_address != 0) {
 193     requested_address -= noaccess_prefix; // adjust address
 194     assert(requested_address != NULL, "huge noaccess prefix?");
 195     addr = os::attempt_reserve_memory_at(size, requested_address);
 196     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 197       // OS ignored requested address. Try different address.
 198       addr = NULL;
 199     }
 200   } else {
 201     addr = os::reserve_memory(size, NULL, suffix_align);
 202   }
 203   if (addr == NULL) return;
 204 
 205   // Check whether the result has the needed alignment
 206   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 207   if (ofs != 0) {
 208     // Wrong alignment.  Release, allocate more space and do manual alignment.
 209     //
 210     // On most operating systems, another allocation with a somewhat larger size
 211     // will return an address "close to" that of the previous allocation.  The
 212     // result is often the same address (if the kernel hands out virtual
 213     // addresses from low to high), or an address that is offset by the increase
 214     // in size.  Exploit that to minimize the amount of extra space requested.
 215     if (!os::release_memory(addr, size)) {
 216       fatal("os::release_memory failed");
 217     }
 218 
 219     const size_t extra = MAX2(ofs, suffix_align - ofs);
 220     addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align,
 221                              suffix_size, suffix_align);
 222     if (addr == NULL) {
 223       // Try an even larger region.  If this fails, address space is exhausted.
 224       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 225                                suffix_align, suffix_size, suffix_align);
 226     }
 227 
 228     if (requested_address != 0 &&
 229         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 230       // As a result of the alignment constraints, the allocated addr differs
 231       // from the requested address. Return back to the caller who can
 232       // take remedial action (like try again without a requested address).
 233       assert(_base == NULL, "should be");
 234       return;
 235     }
 236   }
 237 
 238   _base = addr;
 239   _size = size;
 240   _alignment = suffix_align;
 241   _noaccess_prefix = noaccess_prefix;
 242 }
 243 
 244 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 245                                char* requested_address,
 246                                const size_t noaccess_prefix,
 247                                bool executable) {
 248   const size_t granularity = os::vm_allocation_granularity();
 249   assert((size & (granularity - 1)) == 0,
 250          "size not aligned to os::vm_allocation_granularity()");
 251   assert((alignment & (granularity - 1)) == 0,
 252          "alignment not aligned to os::vm_allocation_granularity()");
 253   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 254          "not a power of 2");
 255 
 256   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 257 
 258   // Assert that if noaccess_prefix is used, it is the same as alignment.
 259   assert(noaccess_prefix == 0 ||
 260          noaccess_prefix == alignment, "noaccess prefix wrong");
 261 
 262   _base = NULL;
 263   _size = 0;
 264   _special = false;
 265   _executable = executable;
 266   _alignment = 0;
 267   _noaccess_prefix = 0;
 268   if (size == 0) {
 269     return;
 270   }
 271 
 272   // If OS doesn't support demand paging for large page memory, we need
 273   // to use reserve_memory_special() to reserve and pin the entire region.
 274   bool special = large && !os::can_commit_large_page_memory();
 275   char* base = NULL;
 276 
 277   if (requested_address != 0) {
 278     requested_address -= noaccess_prefix; // adjust requested address
 279     assert(requested_address != NULL, "huge noaccess prefix?");
 280   }
 281 
 282   if (special) {
 283 
 284     base = os::reserve_memory_special(size, requested_address, executable);
 285 
 286     if (base != NULL) {
 287       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 288         // OS ignored requested address. Try different address.
 289         return;
 290       }
 291       // Check alignment constraints
 292       assert((uintptr_t) base % alignment == 0,
 293              "Large pages returned a non-aligned address");
 294       _special = true;
 295     } else {
 296       // failed; try to reserve regular memory below
 297       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 298                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 299         if (PrintCompressedOopsMode) {
 300           tty->cr();
 301           tty->print_cr("Reserve regular memory without large pages.");
 302         }
 303       }
 304     }
 305   }
 306 
 307   if (base == NULL) {
 308     // Optimistically assume that the OSes returns an aligned base pointer.
 309     // When reserving a large address range, most OSes seem to align to at
 310     // least 64K.
 311 
 312     // If the memory was requested at a particular address, use
 313     // os::attempt_reserve_memory_at() to avoid over mapping something
 314     // important.  If available space is not detected, return NULL.
 315 
 316     if (requested_address != 0) {
 317       base = os::attempt_reserve_memory_at(size, requested_address);
 318       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 319         // OS ignored requested address. Try different address.
 320         base = NULL;
 321       }
 322     } else {
 323       base = os::reserve_memory(size, NULL, alignment);
 324     }
 325 
 326     if (base == NULL) return;
 327 
 328     // Check alignment constraints
 329     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 330       // Base not aligned, retry
 331       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 332       // Make sure that size is aligned
 333       size = align_size_up(size, alignment);
 334       base = os::reserve_memory_aligned(size, alignment);
 335 
 336       if (requested_address != 0 &&
 337           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 338         // As a result of the alignment constraints, the allocated base differs
 339         // from the requested address. Return back to the caller who can
 340         // take remedial action (like try again without a requested address).
 341         assert(_base == NULL, "should be");
 342         return;
 343       }
 344     }
 345   }
 346   // Done
 347   _base = base;
 348   _size = size;
 349   _alignment = alignment;
 350   _noaccess_prefix = noaccess_prefix;
 351 
 352   // Assert that if noaccess_prefix is used, it is the same as alignment.
 353   assert(noaccess_prefix == 0 ||
 354          noaccess_prefix == _alignment, "noaccess prefix wrong");
 355 
 356   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 357          "area must be distinguisable from marks for mark-sweep");
 358   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 359          "area must be distinguisable from marks for mark-sweep");
 360 }
 361 
 362 
 363 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 364                              bool special, bool executable) {
 365   assert((size % os::vm_allocation_granularity()) == 0,
 366          "size not allocation aligned");
 367   _base = base;
 368   _size = size;
 369   _alignment = alignment;
 370   _noaccess_prefix = 0;
 371   _special = special;
 372   _executable = executable;
 373 }
 374 
 375 
 376 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 377                                         bool split, bool realloc) {
 378   assert(partition_size <= size(), "partition failed");
 379   if (split) {
 380     os::split_reserved_memory(base(), size(), partition_size, realloc);
 381   }
 382   ReservedSpace result(base(), partition_size, alignment, special(),
 383                        executable());
 384   return result;
 385 }
 386 
 387 
 388 ReservedSpace
 389 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 390   assert(partition_size <= size(), "partition failed");
 391   ReservedSpace result(base() + partition_size, size() - partition_size,
 392                        alignment, special(), executable());
 393   return result;
 394 }
 395 
 396 
 397 size_t ReservedSpace::page_align_size_up(size_t size) {
 398   return align_size_up(size, os::vm_page_size());
 399 }
 400 
 401 
 402 size_t ReservedSpace::page_align_size_down(size_t size) {
 403   return align_size_down(size, os::vm_page_size());
 404 }
 405 
 406 
 407 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 408   return align_size_up(size, os::vm_allocation_granularity());
 409 }
 410 
 411 
 412 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 413   return align_size_down(size, os::vm_allocation_granularity());
 414 }
 415 
 416 
 417 void ReservedSpace::release() {
 418   if (is_reserved()) {
 419     char *real_base = _base - _noaccess_prefix;
 420     const size_t real_size = _size + _noaccess_prefix;
 421     if (special()) {
 422       os::release_memory_special(real_base, real_size);
 423     } else{
 424       os::release_memory(real_base, real_size);
 425     }
 426     _base = NULL;
 427     _size = 0;
 428     _noaccess_prefix = 0;
 429     _special = false;
 430     _executable = false;
 431   }
 432 }
 433 
 434 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 435   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 436                                       (Universe::narrow_oop_base() != NULL) &&
 437                                       Universe::narrow_oop_use_implicit_null_checks()),
 438          "noaccess_prefix should be used only with non zero based compressed oops");
 439 
 440   // If there is no noaccess prefix, return.
 441   if (_noaccess_prefix == 0) return;
 442 
 443   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 444          "must be at least page size big");
 445 
 446   // Protect memory at the base of the allocated region.
 447   // If special, the page was committed (only matters on windows)
 448   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 449                           _special)) {
 450     fatal("cannot protect protection page");
 451   }
 452   if (PrintCompressedOopsMode) {
 453     tty->cr();
 454     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 455   }
 456 
 457   _base += _noaccess_prefix;
 458   _size -= _noaccess_prefix;
 459   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 460          "must be exactly of required size and alignment");
 461 }
 462 
 463 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 464                                      bool large, char* requested_address) :
 465   ReservedSpace(size, alignment, large,
 466                 requested_address,
 467                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 468                  Universe::narrow_oop_use_implicit_null_checks()) ?
 469                   lcm(os::vm_page_size(), alignment) : 0) {
 470   if (base() > 0) {
 471     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 472   }
 473 
 474   // Only reserved space for the java heap should have a noaccess_prefix
 475   // if using compressed oops.
 476   protect_noaccess_prefix(size);
 477 }
 478 
 479 ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size,
 480                                      const size_t alignment,
 481                                      char* requested_address) :
 482   ReservedSpace(heap_space_size, alignment,
 483                 requested_address,
 484                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 485                  Universe::narrow_oop_use_implicit_null_checks()) ?
 486                   lcm(os::vm_page_size(), alignment) : 0) {
 487   if (base() > 0) {
 488     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 489   }
 490   protect_noaccess_prefix(heap_space_size);
 491 }
 492 
 493 // Reserve space for code segment.  Same as Java heap only we mark this as
 494 // executable.
 495 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 496                                      size_t rs_align,
 497                                      bool large) :
 498   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 499   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 500 }
 501 
 502 // VirtualSpace
 503 
 504 VirtualSpace::VirtualSpace() {
 505   _low_boundary           = NULL;
 506   _high_boundary          = NULL;
 507   _low                    = NULL;
 508   _high                   = NULL;
 509   _lower_high             = NULL;
 510   _middle_high            = NULL;
 511   _upper_high             = NULL;
 512   _lower_high_boundary    = NULL;
 513   _middle_high_boundary   = NULL;
 514   _upper_high_boundary    = NULL;
 515   _lower_alignment        = 0;
 516   _middle_alignment       = 0;
 517   _upper_alignment        = 0;
 518   _special                = false;
 519   _executable             = false;
 520 }
 521 
 522 
 523 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 524   if(!rs.is_reserved()) return false;  // allocation failed.
 525   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 526   _low_boundary  = rs.base();
 527   _high_boundary = low_boundary() + rs.size();
 528 
 529   _low = low_boundary();
 530   _high = low();
 531 
 532   _special = rs.special();
 533   _executable = rs.executable();
 534 
 535   // When a VirtualSpace begins life at a large size, make all future expansion
 536   // and shrinking occur aligned to a granularity of large pages.  This avoids
 537   // fragmentation of physical addresses that inhibits the use of large pages
 538   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 539   // page size, the only spaces that get handled this way are codecache and
 540   // the heap itself, both of which provide a substantial performance
 541   // boost in many benchmarks when covered by large pages.
 542   //
 543   // No attempt is made to force large page alignment at the very top and
 544   // bottom of the space if they are not aligned so already.
 545   _lower_alignment  = os::vm_page_size();
 546   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 547   _upper_alignment  = os::vm_page_size();
 548 
 549   // End of each region
 550   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 551   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 552   _upper_high_boundary = high_boundary();
 553 
 554   // High address of each region
 555   _lower_high = low_boundary();
 556   _middle_high = lower_high_boundary();
 557   _upper_high = middle_high_boundary();
 558 
 559   // commit to initial size
 560   if (committed_size > 0) {
 561     if (!expand_by(committed_size)) {
 562       return false;
 563     }
 564   }
 565   return true;
 566 }
 567 
 568 
 569 VirtualSpace::~VirtualSpace() {
 570   release();
 571 }
 572 
 573 
 574 void VirtualSpace::release() {
 575   // This does not release memory it never reserved.
 576   // Caller must release via rs.release();
 577   _low_boundary           = NULL;
 578   _high_boundary          = NULL;
 579   _low                    = NULL;
 580   _high                   = NULL;
 581   _lower_high             = NULL;
 582   _middle_high            = NULL;
 583   _upper_high             = NULL;
 584   _lower_high_boundary    = NULL;
 585   _middle_high_boundary   = NULL;
 586   _upper_high_boundary    = NULL;
 587   _lower_alignment        = 0;
 588   _middle_alignment       = 0;
 589   _upper_alignment        = 0;
 590   _special                = false;
 591   _executable             = false;
 592 }
 593 
 594 
 595 size_t VirtualSpace::committed_size() const {
 596   return pointer_delta(high(), low(), sizeof(char));
 597 }
 598 
 599 
 600 size_t VirtualSpace::reserved_size() const {
 601   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 602 }
 603 
 604 
 605 size_t VirtualSpace::uncommitted_size()  const {
 606   return reserved_size() - committed_size();
 607 }
 608 
 609 
 610 bool VirtualSpace::contains(const void* p) const {
 611   return low() <= (const char*) p && (const char*) p < high();
 612 }
 613 
 614 /*
 615    First we need to determine if a particular virtual space is using large
 616    pages.  This is done at the initialize function and only virtual spaces
 617    that are larger than LargePageSizeInBytes use large pages.  Once we
 618    have determined this, all expand_by and shrink_by calls must grow and
 619    shrink by large page size chunks.  If a particular request
 620    is within the current large page, the call to commit and uncommit memory
 621    can be ignored.  In the case that the low and high boundaries of this
 622    space is not large page aligned, the pages leading to the first large
 623    page address and the pages after the last large page address must be
 624    allocated with default pages.
 625 */
 626 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 627   if (uncommitted_size() < bytes) return false;
 628 
 629   if (special()) {
 630     // don't commit memory if the entire space is pinned in memory
 631     _high += bytes;
 632     return true;
 633   }
 634 
 635   char* previous_high = high();
 636   char* unaligned_new_high = high() + bytes;
 637   assert(unaligned_new_high <= high_boundary(),
 638          "cannot expand by more than upper boundary");
 639 
 640   // Calculate where the new high for each of the regions should be.  If
 641   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 642   // then the unaligned lower and upper new highs would be the
 643   // lower_high() and upper_high() respectively.
 644   char* unaligned_lower_new_high =
 645     MIN2(unaligned_new_high, lower_high_boundary());
 646   char* unaligned_middle_new_high =
 647     MIN2(unaligned_new_high, middle_high_boundary());
 648   char* unaligned_upper_new_high =
 649     MIN2(unaligned_new_high, upper_high_boundary());
 650 
 651   // Align the new highs based on the regions alignment.  lower and upper
 652   // alignment will always be default page size.  middle alignment will be
 653   // LargePageSizeInBytes if the actual size of the virtual space is in
 654   // fact larger than LargePageSizeInBytes.
 655   char* aligned_lower_new_high =
 656     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 657   char* aligned_middle_new_high =
 658     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 659   char* aligned_upper_new_high =
 660     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 661 
 662   // Determine which regions need to grow in this expand_by call.
 663   // If you are growing in the lower region, high() must be in that
 664   // region so calcuate the size based on high().  For the middle and
 665   // upper regions, determine the starting point of growth based on the
 666   // location of high().  By getting the MAX of the region's low address
 667   // (or the prevoius region's high address) and high(), we can tell if it
 668   // is an intra or inter region growth.
 669   size_t lower_needs = 0;
 670   if (aligned_lower_new_high > lower_high()) {
 671     lower_needs =
 672       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 673   }
 674   size_t middle_needs = 0;
 675   if (aligned_middle_new_high > middle_high()) {
 676     middle_needs =
 677       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 678   }
 679   size_t upper_needs = 0;
 680   if (aligned_upper_new_high > upper_high()) {
 681     upper_needs =
 682       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 683   }
 684 
 685   // Check contiguity.
 686   assert(low_boundary() <= lower_high() &&
 687          lower_high() <= lower_high_boundary(),
 688          "high address must be contained within the region");
 689   assert(lower_high_boundary() <= middle_high() &&
 690          middle_high() <= middle_high_boundary(),
 691          "high address must be contained within the region");
 692   assert(middle_high_boundary() <= upper_high() &&
 693          upper_high() <= upper_high_boundary(),
 694          "high address must be contained within the region");
 695 
 696   // Commit regions
 697   if (lower_needs > 0) {
 698     assert(low_boundary() <= lower_high() &&
 699            lower_high() + lower_needs <= lower_high_boundary(),
 700            "must not expand beyond region");
 701     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 702       debug_only(warning("os::commit_memory failed"));
 703       return false;
 704     } else {
 705       _lower_high += lower_needs;
 706      }
 707   }
 708   if (middle_needs > 0) {
 709     assert(lower_high_boundary() <= middle_high() &&
 710            middle_high() + middle_needs <= middle_high_boundary(),
 711            "must not expand beyond region");
 712     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 713                            _executable)) {
 714       debug_only(warning("os::commit_memory failed"));
 715       return false;
 716     }
 717     _middle_high += middle_needs;
 718   }
 719   if (upper_needs > 0) {
 720     assert(middle_high_boundary() <= upper_high() &&
 721            upper_high() + upper_needs <= upper_high_boundary(),
 722            "must not expand beyond region");
 723     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 724       debug_only(warning("os::commit_memory failed"));
 725       return false;
 726     } else {
 727       _upper_high += upper_needs;
 728     }
 729   }
 730 
 731   if (pre_touch || AlwaysPreTouch) {
 732     int vm_ps = os::vm_page_size();
 733     for (char* curr = previous_high;
 734          curr < unaligned_new_high;
 735          curr += vm_ps) {
 736       // Note the use of a write here; originally we tried just a read, but
 737       // since the value read was unused, the optimizer removed the read.
 738       // If we ever have a concurrent touchahead thread, we'll want to use
 739       // a read, to avoid the potential of overwriting data (if a mutator
 740       // thread beats the touchahead thread to a page).  There are various
 741       // ways of making sure this read is not optimized away: for example,
 742       // generating the code for a read procedure at runtime.
 743       *curr = 0;
 744     }
 745   }
 746 
 747   _high += bytes;
 748   return true;
 749 }
 750 
 751 // A page is uncommitted if the contents of the entire page is deemed unusable.
 752 // Continue to decrement the high() pointer until it reaches a page boundary
 753 // in which case that particular page can now be uncommitted.
 754 void VirtualSpace::shrink_by(size_t size) {
 755   if (committed_size() < size)
 756     fatal("Cannot shrink virtual space to negative size");
 757 
 758   if (special()) {
 759     // don't uncommit if the entire space is pinned in memory
 760     _high -= size;
 761     return;
 762   }
 763 
 764   char* unaligned_new_high = high() - size;
 765   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 766 
 767   // Calculate new unaligned address
 768   char* unaligned_upper_new_high =
 769     MAX2(unaligned_new_high, middle_high_boundary());
 770   char* unaligned_middle_new_high =
 771     MAX2(unaligned_new_high, lower_high_boundary());
 772   char* unaligned_lower_new_high =
 773     MAX2(unaligned_new_high, low_boundary());
 774 
 775   // Align address to region's alignment
 776   char* aligned_upper_new_high =
 777     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 778   char* aligned_middle_new_high =
 779     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 780   char* aligned_lower_new_high =
 781     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 782 
 783   // Determine which regions need to shrink
 784   size_t upper_needs = 0;
 785   if (aligned_upper_new_high < upper_high()) {
 786     upper_needs =
 787       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 788   }
 789   size_t middle_needs = 0;
 790   if (aligned_middle_new_high < middle_high()) {
 791     middle_needs =
 792       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 793   }
 794   size_t lower_needs = 0;
 795   if (aligned_lower_new_high < lower_high()) {
 796     lower_needs =
 797       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 798   }
 799 
 800   // Check contiguity.
 801   assert(middle_high_boundary() <= upper_high() &&
 802          upper_high() <= upper_high_boundary(),
 803          "high address must be contained within the region");
 804   assert(lower_high_boundary() <= middle_high() &&
 805          middle_high() <= middle_high_boundary(),
 806          "high address must be contained within the region");
 807   assert(low_boundary() <= lower_high() &&
 808          lower_high() <= lower_high_boundary(),
 809          "high address must be contained within the region");
 810 
 811   // Uncommit
 812   if (upper_needs > 0) {
 813     assert(middle_high_boundary() <= aligned_upper_new_high &&
 814            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 815            "must not shrink beyond region");
 816     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 817       debug_only(warning("os::uncommit_memory failed"));
 818       return;
 819     } else {
 820       _upper_high -= upper_needs;
 821     }
 822   }
 823   if (middle_needs > 0) {
 824     assert(lower_high_boundary() <= aligned_middle_new_high &&
 825            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 826            "must not shrink beyond region");
 827     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 828       debug_only(warning("os::uncommit_memory failed"));
 829       return;
 830     } else {
 831       _middle_high -= middle_needs;
 832     }
 833   }
 834   if (lower_needs > 0) {
 835     assert(low_boundary() <= aligned_lower_new_high &&
 836            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 837            "must not shrink beyond region");
 838     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 839       debug_only(warning("os::uncommit_memory failed"));
 840       return;
 841     } else {
 842       _lower_high -= lower_needs;
 843     }
 844   }
 845 
 846   _high -= size;
 847 }
 848 
 849 #ifndef PRODUCT
 850 void VirtualSpace::check_for_contiguity() {
 851   // Check contiguity.
 852   assert(low_boundary() <= lower_high() &&
 853          lower_high() <= lower_high_boundary(),
 854          "high address must be contained within the region");
 855   assert(lower_high_boundary() <= middle_high() &&
 856          middle_high() <= middle_high_boundary(),
 857          "high address must be contained within the region");
 858   assert(middle_high_boundary() <= upper_high() &&
 859          upper_high() <= upper_high_boundary(),
 860          "high address must be contained within the region");
 861   assert(low() >= low_boundary(), "low");
 862   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 863   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 864   assert(high() <= upper_high(), "upper high");
 865 }
 866 
 867 void VirtualSpace::print() {
 868   tty->print   ("Virtual space:");
 869   if (special()) tty->print(" (pinned in memory)");
 870   tty->cr();
 871   tty->print_cr(" - committed: %ld", committed_size());
 872   tty->print_cr(" - reserved:  %ld", reserved_size());
 873   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 874   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 875 }
 876 
 877 #endif