1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 ReservedSpace::ReservedSpace(size_t size) {
  46   initialize(size, 0, false, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 char *
  64 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  65                                      const size_t prefix_size,
  66                                      const size_t prefix_align,
  67                                      const size_t suffix_size,
  68                                      const size_t suffix_align)
  69 {
  70   assert(addr != NULL, "sanity");
  71   const size_t required_size = prefix_size + suffix_size;
  72   assert(len >= required_size, "len too small");
  73 
  74   const size_t s = size_t(addr);
  75   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  76   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  77 
  78   if (len < beg_delta + required_size) {
  79      return NULL; // Cannot do proper alignment.
  80   }
  81   const size_t end_delta = len - (beg_delta + required_size);
  82 
  83   if (beg_delta != 0) {
  84     os::release_memory(addr, beg_delta);
  85   }
  86 
  87   if (end_delta != 0) {
  88     char* release_addr = (char*) (s + beg_delta + required_size);
  89     os::release_memory(release_addr, end_delta);
  90   }
  91 
  92   return (char*) (s + beg_delta);
  93 }
  94 
  95 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  96                                        const size_t prefix_size,
  97                                        const size_t prefix_align,
  98                                        const size_t suffix_size,
  99                                        const size_t suffix_align)
 100 {
 101   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 102 
 103   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 104   if (raw_addr == NULL) return NULL;
 105 
 106   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 107                                        prefix_align, suffix_size,
 108                                        suffix_align);
 109   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 110     fatal("os::release_memory failed");
 111   }
 112 
 113 #ifdef ASSERT
 114   if (result != NULL) {
 115     const size_t raw = size_t(raw_addr);
 116     const size_t res = size_t(result);
 117     assert(res >= raw, "alignment decreased start addr");
 118     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 119            "alignment increased end addr");
 120     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 121     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 122            "bad alignment of suffix");
 123   }
 124 #endif
 125 
 126   return result;
 127 }
 128 
 129 // Helper method.
 130 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 131                                            const size_t size, bool special)
 132 {
 133   if (base == requested_address || requested_address == NULL)
 134     return false; // did not fail
 135 
 136   if (base != NULL) {
 137     // Different reserve address may be acceptable in other cases
 138     // but for compressed oops heap should be at requested address.
 139     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 140     if (PrintCompressedOopsMode) {
 141       tty->cr();
 142       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 143     }
 144     // OS ignored requested address. Try different address.
 145     if (special) {
 146       if (!os::release_memory_special(base, size)) {
 147         fatal("os::release_memory_special failed");
 148       }
 149     } else {
 150       if (!os::release_memory(base, size)) {
 151         fatal("os::release_memory failed");
 152       }
 153     }
 154   }
 155   return true;
 156 }
 157 
 158 ReservedSpace::ReservedSpace(const size_t suffix_size,
 159                              const size_t suffix_align,
 160                              char* requested_address,
 161                              const size_t noaccess_prefix)
 162 {
 163   assert(suffix_size != 0, "sanity");
 164   assert(suffix_align != 0, "sanity");
 165   assert((suffix_size & (suffix_align - 1)) == 0,
 166     "suffix_size not divisible by suffix_align");
 167 
 168   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 169   // Add in noaccess_prefix to prefix
 170   const size_t adjusted_prefix_size = noaccess_prefix;
 171   const size_t size = adjusted_prefix_size + suffix_size;
 172 
 173   // On systems where the entire region has to be reserved and committed up
 174   // front, the compound alignment normally done by this method is unnecessary.
 175   const bool try_reserve_special = UseLargePages &&
 176     suffix_align == os::large_page_size();
 177   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 178     initialize(size, suffix_align, true, requested_address, noaccess_prefix,
 179                false);
 180     return;
 181   }
 182 
 183   _base = NULL;
 184   _size = 0;
 185   _alignment = 0;
 186   _special = false;
 187   _noaccess_prefix = 0;
 188   _executable = false;
 189 
 190   // Optimistically try to reserve the exact size needed.
 191   char* addr;
 192   if (requested_address != 0) {
 193     requested_address -= noaccess_prefix; // adjust address
 194     assert(requested_address != NULL, "huge noaccess prefix?");
 195     addr = os::attempt_reserve_memory_at(size, requested_address);
 196     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 197       // OS ignored requested address. Try different address.
 198       addr = NULL;
 199     }
 200   } else {
 201     addr = os::reserve_memory(size, NULL, suffix_align);
 202   }
 203   if (addr == NULL) return;
 204 
 205   // Check whether the result has the needed alignment
 206   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 207   if (ofs != 0) {
 208     // Wrong alignment.  Release, allocate more space and do manual alignment.
 209     //
 210     // On most operating systems, another allocation with a somewhat larger size
 211     // will return an address "close to" that of the previous allocation.  The
 212     // result is often the same address (if the kernel hands out virtual
 213     // addresses from low to high), or an address that is offset by the increase
 214     // in size.  Exploit that to minimize the amount of extra space requested.
 215     if (!os::release_memory(addr, size)) {
 216       fatal("os::release_memory failed");
 217     }
 218 
 219     const size_t extra = MAX2(ofs, suffix_align - ofs);
 220     addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align,
 221                              suffix_size, suffix_align);
 222     if (addr == NULL) {
 223       // Try an even larger region.  If this fails, address space is exhausted.
 224       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 225                                suffix_align, suffix_size, suffix_align);
 226     }
 227 
 228     if (requested_address != 0 &&
 229         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 230       // As a result of the alignment constraints, the allocated addr differs
 231       // from the requested address. Return back to the caller who can
 232       // take remedial action (like try again without a requested address).
 233       assert(_base == NULL, "should be");
 234       return;
 235     }
 236   }
 237 
 238   _base = addr;
 239   _size = size;
 240   _alignment = suffix_align;
 241   _noaccess_prefix = noaccess_prefix;
 242 }
 243 
 244 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 245                                char* requested_address,
 246                                const size_t noaccess_prefix,
 247                                bool executable) {
 248   const size_t granularity = os::vm_allocation_granularity();
 249   assert((size & (granularity - 1)) == 0,
 250          "size not aligned to os::vm_allocation_granularity()");
 251   assert((alignment & (granularity - 1)) == 0,
 252          "alignment not aligned to os::vm_allocation_granularity()");
 253   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 254          "not a power of 2");
 255 
 256   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 257 
 258   // Assert that if noaccess_prefix is used, it is the same as alignment.
 259   assert(noaccess_prefix == 0 ||
 260          noaccess_prefix == alignment, "noaccess prefix wrong");
 261 
 262   _base = NULL;
 263   _size = 0;
 264   _special = false;
 265   _executable = executable;
 266   _alignment = 0;
 267   _noaccess_prefix = 0;
 268   if (size == 0) {
 269     return;
 270   }
 271 
 272   // If OS doesn't support demand paging for large page memory, we need
 273   // to use reserve_memory_special() to reserve and pin the entire region.
 274   bool special = large && !os::can_commit_large_page_memory();
 275   char* base = NULL;
 276 
 277   if (requested_address != 0) {
 278     requested_address -= noaccess_prefix; // adjust requested address
 279     assert(requested_address != NULL, "huge noaccess prefix?");
 280   }
 281 
 282   if (special) {
 283 
 284     base = os::reserve_memory_special(size, requested_address, executable);
 285 
 286     if (base != NULL) {
 287       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 288         // OS ignored requested address. Try different address.
 289         return;
 290       }
 291       // Check alignment constraints
 292       assert((uintptr_t) base % alignment == 0,
 293              "Large pages returned a non-aligned address");
 294       _special = true;
 295     } else {
 296       // failed; try to reserve regular memory below
 297       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 298                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 299         if (PrintCompressedOopsMode) {
 300           tty->cr();
 301           tty->print_cr("Reserve regular memory without large pages.");
 302         }
 303       }
 304     }
 305   }
 306 
 307   if (base == NULL) {
 308     // Optimistically assume that the OSes returns an aligned base pointer.
 309     // When reserving a large address range, most OSes seem to align to at
 310     // least 64K.
 311 
 312     // If the memory was requested at a particular address, use
 313     // os::attempt_reserve_memory_at() to avoid over mapping something
 314     // important.  If available space is not detected, return NULL.
 315 
 316     if (requested_address != 0) {
 317       base = os::attempt_reserve_memory_at(size, requested_address);
 318       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 319         // OS ignored requested address. Try different address.
 320         base = NULL;
 321       }
 322     } else {
 323       base = os::reserve_memory(size, NULL, alignment);
 324     }
 325 
 326     if (base == NULL) return;
 327 
 328     // Check alignment constraints
 329     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 330       // Base not aligned, retry
 331       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 332       base = os::reserve_memory_aligned(size, alignment);
 333 
 334       if (requested_address != 0 &&
 335           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 336         // As a result of the alignment constraints, the allocated base differs
 337         // from the requested address. Return back to the caller who can
 338         // take remedial action (like try again without a requested address).
 339         assert(_base == NULL, "should be");
 340         return;
 341       }
 342     }
 343   }
 344   // Done
 345   _base = base;
 346   _size = size;
 347   _alignment = alignment;
 348   _noaccess_prefix = noaccess_prefix;
 349 
 350   // Assert that if noaccess_prefix is used, it is the same as alignment.
 351   assert(noaccess_prefix == 0 ||
 352          noaccess_prefix == _alignment, "noaccess prefix wrong");
 353 
 354   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 355          "area must be distinguisable from marks for mark-sweep");
 356   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 357          "area must be distinguisable from marks for mark-sweep");
 358 }
 359 
 360 
 361 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 362                              bool special, bool executable) {
 363   assert((size % os::vm_allocation_granularity()) == 0,
 364          "size not allocation aligned");
 365   _base = base;
 366   _size = size;
 367   _alignment = alignment;
 368   _noaccess_prefix = 0;
 369   _special = special;
 370   _executable = executable;
 371 }
 372 
 373 
 374 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 375                                         bool split, bool realloc) {
 376   assert(partition_size <= size(), "partition failed");
 377   if (split) {
 378     os::split_reserved_memory(base(), size(), partition_size, realloc);
 379   }
 380   ReservedSpace result(base(), partition_size, alignment, special(),
 381                        executable());
 382   return result;
 383 }
 384 
 385 
 386 ReservedSpace
 387 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 388   assert(partition_size <= size(), "partition failed");
 389   ReservedSpace result(base() + partition_size, size() - partition_size,
 390                        alignment, special(), executable());
 391   return result;
 392 }
 393 
 394 
 395 size_t ReservedSpace::page_align_size_up(size_t size) {
 396   return align_size_up(size, os::vm_page_size());
 397 }
 398 
 399 
 400 size_t ReservedSpace::page_align_size_down(size_t size) {
 401   return align_size_down(size, os::vm_page_size());
 402 }
 403 
 404 
 405 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 406   return align_size_up(size, os::vm_allocation_granularity());
 407 }
 408 
 409 
 410 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 411   return align_size_down(size, os::vm_allocation_granularity());
 412 }
 413 
 414 
 415 void ReservedSpace::release() {
 416   if (is_reserved()) {
 417     char *real_base = _base - _noaccess_prefix;
 418     const size_t real_size = _size + _noaccess_prefix;
 419     if (special()) {
 420       os::release_memory_special(real_base, real_size);
 421     } else{
 422       os::release_memory(real_base, real_size);
 423     }
 424     _base = NULL;
 425     _size = 0;
 426     _noaccess_prefix = 0;
 427     _special = false;
 428     _executable = false;
 429   }
 430 }
 431 
 432 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 433   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 434                                       (Universe::narrow_oop_base() != NULL) &&
 435                                       Universe::narrow_oop_use_implicit_null_checks()),
 436          "noaccess_prefix should be used only with non zero based compressed oops");
 437 
 438   // If there is no noaccess prefix, return.
 439   if (_noaccess_prefix == 0) return;
 440 
 441   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 442          "must be at least page size big");
 443 
 444   // Protect memory at the base of the allocated region.
 445   // If special, the page was committed (only matters on windows)
 446   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 447                           _special)) {
 448     fatal("cannot protect protection page");
 449   }
 450   if (PrintCompressedOopsMode) {
 451     tty->cr();
 452     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 453   }
 454 
 455   _base += _noaccess_prefix;
 456   _size -= _noaccess_prefix;
 457   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 458          "must be exactly of required size and alignment");
 459 }
 460 
 461 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 462                                      bool large, char* requested_address) :
 463   ReservedSpace(size, alignment, large,
 464                 requested_address,
 465                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 466                  Universe::narrow_oop_use_implicit_null_checks()) ?
 467                   lcm(os::vm_page_size(), alignment) : 0) {
 468   if (base() > 0) {
 469     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 470   }
 471 
 472   // Only reserved space for the java heap should have a noaccess_prefix
 473   // if using compressed oops.
 474   protect_noaccess_prefix(size);
 475 }
 476 
 477 ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size,
 478                                      const size_t alignment,
 479                                      char* requested_address) :
 480   ReservedSpace(heap_space_size, alignment,
 481                 requested_address,
 482                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 483                  Universe::narrow_oop_use_implicit_null_checks()) ?
 484                   lcm(os::vm_page_size(), alignment) : 0) {
 485   if (base() > 0) {
 486     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 487   }
 488   protect_noaccess_prefix(heap_space_size);
 489 }
 490 
 491 // Reserve space for code segment.  Same as Java heap only we mark this as
 492 // executable.
 493 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 494                                      size_t rs_align,
 495                                      bool large) :
 496   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 497   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 498 }
 499 
 500 // VirtualSpace
 501 
 502 VirtualSpace::VirtualSpace() {
 503   _low_boundary           = NULL;
 504   _high_boundary          = NULL;
 505   _low                    = NULL;
 506   _high                   = NULL;
 507   _lower_high             = NULL;
 508   _middle_high            = NULL;
 509   _upper_high             = NULL;
 510   _lower_high_boundary    = NULL;
 511   _middle_high_boundary   = NULL;
 512   _upper_high_boundary    = NULL;
 513   _lower_alignment        = 0;
 514   _middle_alignment       = 0;
 515   _upper_alignment        = 0;
 516   _special                = false;
 517   _executable             = false;
 518 }
 519 
 520 
 521 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 522   if(!rs.is_reserved()) return false;  // allocation failed.
 523   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 524   _low_boundary  = rs.base();
 525   _high_boundary = low_boundary() + rs.size();
 526 
 527   _low = low_boundary();
 528   _high = low();
 529 
 530   _special = rs.special();
 531   _executable = rs.executable();
 532 
 533   // When a VirtualSpace begins life at a large size, make all future expansion
 534   // and shrinking occur aligned to a granularity of large pages.  This avoids
 535   // fragmentation of physical addresses that inhibits the use of large pages
 536   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 537   // page size, the only spaces that get handled this way are codecache and
 538   // the heap itself, both of which provide a substantial performance
 539   // boost in many benchmarks when covered by large pages.
 540   //
 541   // No attempt is made to force large page alignment at the very top and
 542   // bottom of the space if they are not aligned so already.
 543   _lower_alignment  = os::vm_page_size();
 544   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 545   _upper_alignment  = os::vm_page_size();
 546 
 547   // End of each region
 548   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 549   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 550   _upper_high_boundary = high_boundary();
 551 
 552   // High address of each region
 553   _lower_high = low_boundary();
 554   _middle_high = lower_high_boundary();
 555   _upper_high = middle_high_boundary();
 556 
 557   // commit to initial size
 558   if (committed_size > 0) {
 559     if (!expand_by(committed_size)) {
 560       return false;
 561     }
 562   }
 563   return true;
 564 }
 565 
 566 
 567 VirtualSpace::~VirtualSpace() {
 568   release();
 569 }
 570 
 571 
 572 void VirtualSpace::release() {
 573   // This does not release memory it never reserved.
 574   // Caller must release via rs.release();
 575   _low_boundary           = NULL;
 576   _high_boundary          = NULL;
 577   _low                    = NULL;
 578   _high                   = NULL;
 579   _lower_high             = NULL;
 580   _middle_high            = NULL;
 581   _upper_high             = NULL;
 582   _lower_high_boundary    = NULL;
 583   _middle_high_boundary   = NULL;
 584   _upper_high_boundary    = NULL;
 585   _lower_alignment        = 0;
 586   _middle_alignment       = 0;
 587   _upper_alignment        = 0;
 588   _special                = false;
 589   _executable             = false;
 590 }
 591 
 592 
 593 size_t VirtualSpace::committed_size() const {
 594   return pointer_delta(high(), low(), sizeof(char));
 595 }
 596 
 597 
 598 size_t VirtualSpace::reserved_size() const {
 599   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 600 }
 601 
 602 
 603 size_t VirtualSpace::uncommitted_size()  const {
 604   return reserved_size() - committed_size();
 605 }
 606 
 607 
 608 bool VirtualSpace::contains(const void* p) const {
 609   return low() <= (const char*) p && (const char*) p < high();
 610 }
 611 
 612 /*
 613    First we need to determine if a particular virtual space is using large
 614    pages.  This is done at the initialize function and only virtual spaces
 615    that are larger than LargePageSizeInBytes use large pages.  Once we
 616    have determined this, all expand_by and shrink_by calls must grow and
 617    shrink by large page size chunks.  If a particular request
 618    is within the current large page, the call to commit and uncommit memory
 619    can be ignored.  In the case that the low and high boundaries of this
 620    space is not large page aligned, the pages leading to the first large
 621    page address and the pages after the last large page address must be
 622    allocated with default pages.
 623 */
 624 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 625   if (uncommitted_size() < bytes) return false;
 626 
 627   if (special()) {
 628     // don't commit memory if the entire space is pinned in memory
 629     _high += bytes;
 630     return true;
 631   }
 632 
 633   char* previous_high = high();
 634   char* unaligned_new_high = high() + bytes;
 635   assert(unaligned_new_high <= high_boundary(),
 636          "cannot expand by more than upper boundary");
 637 
 638   // Calculate where the new high for each of the regions should be.  If
 639   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 640   // then the unaligned lower and upper new highs would be the
 641   // lower_high() and upper_high() respectively.
 642   char* unaligned_lower_new_high =
 643     MIN2(unaligned_new_high, lower_high_boundary());
 644   char* unaligned_middle_new_high =
 645     MIN2(unaligned_new_high, middle_high_boundary());
 646   char* unaligned_upper_new_high =
 647     MIN2(unaligned_new_high, upper_high_boundary());
 648 
 649   // Align the new highs based on the regions alignment.  lower and upper
 650   // alignment will always be default page size.  middle alignment will be
 651   // LargePageSizeInBytes if the actual size of the virtual space is in
 652   // fact larger than LargePageSizeInBytes.
 653   char* aligned_lower_new_high =
 654     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 655   char* aligned_middle_new_high =
 656     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 657   char* aligned_upper_new_high =
 658     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 659 
 660   // Determine which regions need to grow in this expand_by call.
 661   // If you are growing in the lower region, high() must be in that
 662   // region so calcuate the size based on high().  For the middle and
 663   // upper regions, determine the starting point of growth based on the
 664   // location of high().  By getting the MAX of the region's low address
 665   // (or the prevoius region's high address) and high(), we can tell if it
 666   // is an intra or inter region growth.
 667   size_t lower_needs = 0;
 668   if (aligned_lower_new_high > lower_high()) {
 669     lower_needs =
 670       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 671   }
 672   size_t middle_needs = 0;
 673   if (aligned_middle_new_high > middle_high()) {
 674     middle_needs =
 675       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 676   }
 677   size_t upper_needs = 0;
 678   if (aligned_upper_new_high > upper_high()) {
 679     upper_needs =
 680       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 681   }
 682 
 683   // Check contiguity.
 684   assert(low_boundary() <= lower_high() &&
 685          lower_high() <= lower_high_boundary(),
 686          "high address must be contained within the region");
 687   assert(lower_high_boundary() <= middle_high() &&
 688          middle_high() <= middle_high_boundary(),
 689          "high address must be contained within the region");
 690   assert(middle_high_boundary() <= upper_high() &&
 691          upper_high() <= upper_high_boundary(),
 692          "high address must be contained within the region");
 693 
 694   // Commit regions
 695   if (lower_needs > 0) {
 696     assert(low_boundary() <= lower_high() &&
 697            lower_high() + lower_needs <= lower_high_boundary(),
 698            "must not expand beyond region");
 699     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 700       debug_only(warning("os::commit_memory failed"));
 701       return false;
 702     } else {
 703       _lower_high += lower_needs;
 704      }
 705   }
 706   if (middle_needs > 0) {
 707     assert(lower_high_boundary() <= middle_high() &&
 708            middle_high() + middle_needs <= middle_high_boundary(),
 709            "must not expand beyond region");
 710     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 711                            _executable)) {
 712       debug_only(warning("os::commit_memory failed"));
 713       return false;
 714     }
 715     _middle_high += middle_needs;
 716   }
 717   if (upper_needs > 0) {
 718     assert(middle_high_boundary() <= upper_high() &&
 719            upper_high() + upper_needs <= upper_high_boundary(),
 720            "must not expand beyond region");
 721     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 722       debug_only(warning("os::commit_memory failed"));
 723       return false;
 724     } else {
 725       _upper_high += upper_needs;
 726     }
 727   }
 728 
 729   if (pre_touch || AlwaysPreTouch) {
 730     int vm_ps = os::vm_page_size();
 731     for (char* curr = previous_high;
 732          curr < unaligned_new_high;
 733          curr += vm_ps) {
 734       // Note the use of a write here; originally we tried just a read, but
 735       // since the value read was unused, the optimizer removed the read.
 736       // If we ever have a concurrent touchahead thread, we'll want to use
 737       // a read, to avoid the potential of overwriting data (if a mutator
 738       // thread beats the touchahead thread to a page).  There are various
 739       // ways of making sure this read is not optimized away: for example,
 740       // generating the code for a read procedure at runtime.
 741       *curr = 0;
 742     }
 743   }
 744 
 745   _high += bytes;
 746   return true;
 747 }
 748 
 749 // A page is uncommitted if the contents of the entire page is deemed unusable.
 750 // Continue to decrement the high() pointer until it reaches a page boundary
 751 // in which case that particular page can now be uncommitted.
 752 void VirtualSpace::shrink_by(size_t size) {
 753   if (committed_size() < size)
 754     fatal("Cannot shrink virtual space to negative size");
 755 
 756   if (special()) {
 757     // don't uncommit if the entire space is pinned in memory
 758     _high -= size;
 759     return;
 760   }
 761 
 762   char* unaligned_new_high = high() - size;
 763   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 764 
 765   // Calculate new unaligned address
 766   char* unaligned_upper_new_high =
 767     MAX2(unaligned_new_high, middle_high_boundary());
 768   char* unaligned_middle_new_high =
 769     MAX2(unaligned_new_high, lower_high_boundary());
 770   char* unaligned_lower_new_high =
 771     MAX2(unaligned_new_high, low_boundary());
 772 
 773   // Align address to region's alignment
 774   char* aligned_upper_new_high =
 775     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 776   char* aligned_middle_new_high =
 777     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 778   char* aligned_lower_new_high =
 779     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 780 
 781   // Determine which regions need to shrink
 782   size_t upper_needs = 0;
 783   if (aligned_upper_new_high < upper_high()) {
 784     upper_needs =
 785       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 786   }
 787   size_t middle_needs = 0;
 788   if (aligned_middle_new_high < middle_high()) {
 789     middle_needs =
 790       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 791   }
 792   size_t lower_needs = 0;
 793   if (aligned_lower_new_high < lower_high()) {
 794     lower_needs =
 795       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 796   }
 797 
 798   // Check contiguity.
 799   assert(middle_high_boundary() <= upper_high() &&
 800          upper_high() <= upper_high_boundary(),
 801          "high address must be contained within the region");
 802   assert(lower_high_boundary() <= middle_high() &&
 803          middle_high() <= middle_high_boundary(),
 804          "high address must be contained within the region");
 805   assert(low_boundary() <= lower_high() &&
 806          lower_high() <= lower_high_boundary(),
 807          "high address must be contained within the region");
 808 
 809   // Uncommit
 810   if (upper_needs > 0) {
 811     assert(middle_high_boundary() <= aligned_upper_new_high &&
 812            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 813            "must not shrink beyond region");
 814     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 815       debug_only(warning("os::uncommit_memory failed"));
 816       return;
 817     } else {
 818       _upper_high -= upper_needs;
 819     }
 820   }
 821   if (middle_needs > 0) {
 822     assert(lower_high_boundary() <= aligned_middle_new_high &&
 823            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 824            "must not shrink beyond region");
 825     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 826       debug_only(warning("os::uncommit_memory failed"));
 827       return;
 828     } else {
 829       _middle_high -= middle_needs;
 830     }
 831   }
 832   if (lower_needs > 0) {
 833     assert(low_boundary() <= aligned_lower_new_high &&
 834            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 835            "must not shrink beyond region");
 836     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 837       debug_only(warning("os::uncommit_memory failed"));
 838       return;
 839     } else {
 840       _lower_high -= lower_needs;
 841     }
 842   }
 843 
 844   _high -= size;
 845 }
 846 
 847 #ifndef PRODUCT
 848 void VirtualSpace::check_for_contiguity() {
 849   // Check contiguity.
 850   assert(low_boundary() <= lower_high() &&
 851          lower_high() <= lower_high_boundary(),
 852          "high address must be contained within the region");
 853   assert(lower_high_boundary() <= middle_high() &&
 854          middle_high() <= middle_high_boundary(),
 855          "high address must be contained within the region");
 856   assert(middle_high_boundary() <= upper_high() &&
 857          upper_high() <= upper_high_boundary(),
 858          "high address must be contained within the region");
 859   assert(low() >= low_boundary(), "low");
 860   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 861   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 862   assert(high() <= upper_high(), "upper high");
 863 }
 864 
 865 void VirtualSpace::print() {
 866   tty->print   ("Virtual space:");
 867   if (special()) tty->print(" (pinned in memory)");
 868   tty->cr();
 869   tty->print_cr(" - committed: %ld", committed_size());
 870   tty->print_cr(" - reserved:  %ld", reserved_size());
 871   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 872   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 873 }
 874 
 875 #endif