1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 ReservedSpace::ReservedSpace(size_t size) {
  46   initialize(size, 0, false, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 char *
  64 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  65                                      const size_t prefix_size,
  66                                      const size_t prefix_align,
  67                                      const size_t suffix_size,
  68                                      const size_t suffix_align)
  69 {
  70   assert(addr != NULL, "sanity");
  71   const size_t required_size = prefix_size + suffix_size;
  72   assert(len >= required_size, "len too small");
  73 
  74   const size_t s = size_t(addr);
  75   const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
  76   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  77 
  78   if (len < beg_delta + required_size) {
  79      return NULL; // Cannot do proper alignment.
  80   }
  81   const size_t end_delta = len - (beg_delta + required_size);
  82 
  83   if (beg_delta != 0) {
  84     os::release_memory(addr, beg_delta);
  85   }
  86 
  87   if (end_delta != 0) {
  88     char* release_addr = (char*) (s + beg_delta + required_size);
  89     os::release_memory(release_addr, end_delta);
  90   }
  91 
  92   return (char*) (s + beg_delta);
  93 }
  94 
  95 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  96                                        const size_t prefix_size,
  97                                        const size_t prefix_align,
  98                                        const size_t suffix_size,
  99                                        const size_t suffix_align)
 100 {
 101   assert(reserve_size > prefix_size + suffix_size, "should not be here");
 102 
 103   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
 104   if (raw_addr == NULL) return NULL;
 105 
 106   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
 107                                        prefix_align, suffix_size,
 108                                        suffix_align);
 109   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
 110     fatal("os::release_memory failed");
 111   }
 112 
 113 #ifdef ASSERT
 114   if (result != NULL) {
 115     const size_t raw = size_t(raw_addr);
 116     const size_t res = size_t(result);
 117     assert(res >= raw, "alignment decreased start addr");
 118     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 119            "alignment increased end addr");
 120     assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
 121     assert(((res + prefix_size) & (suffix_align - 1)) == 0,
 122            "bad alignment of suffix");
 123   }
 124 #endif
 125 
 126   return result;
 127 }
 128 
 129 // Helper method.
 130 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 131                                            const size_t size, bool special)
 132 {
 133   if (base == requested_address || requested_address == NULL)
 134     return false; // did not fail
 135 
 136   if (base != NULL) {
 137     // Different reserve address may be acceptable in other cases
 138     // but for compressed oops heap should be at requested address.
 139     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 140     if (PrintCompressedOopsMode) {
 141       tty->cr();
 142       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 143     }
 144     // OS ignored requested address. Try different address.
 145     if (special) {
 146       if (!os::release_memory_special(base, size)) {
 147         fatal("os::release_memory_special failed");
 148       }
 149     } else {
 150       if (!os::release_memory(base, size)) {
 151         fatal("os::release_memory failed");
 152       }
 153     }
 154   }
 155   return true;
 156 }
 157 
 158 ReservedSpace::ReservedSpace(const size_t suffix_size,
 159                              const size_t suffix_align,
 160                              char* requested_address,
 161                              const size_t noaccess_prefix)
 162 {
 163   assert(suffix_size != 0, "sanity");
 164   assert(suffix_align != 0, "sanity");
 165   assert((suffix_size & (suffix_align - 1)) == 0,
 166     "suffix_size not divisible by suffix_align");
 167 
 168   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 169   // Add in noaccess_prefix to prefix
 170   const size_t adjusted_prefix_size = noaccess_prefix;
 171   const size_t size = adjusted_prefix_size + suffix_size;
 172 
 173   // On systems where the entire region has to be reserved and committed up
 174   // front, the compound alignment normally done by this method is unnecessary.
 175   const bool try_reserve_special = UseLargePages &&
 176     suffix_align == os::large_page_size();
 177   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 178     initialize(size, suffix_align, true, requested_address, noaccess_prefix,
 179                false);
 180     return;
 181   }
 182 
 183   _base = NULL;
 184   _size = 0;
 185   _alignment = 0;
 186   _special = false;
 187   _noaccess_prefix = 0;
 188   _executable = false;
 189 
 190   // Optimistically try to reserve the exact size needed.
 191   char* addr;
 192   if (requested_address != 0) {
 193     requested_address -= noaccess_prefix; // adjust address
 194     assert(requested_address != NULL, "huge noaccess prefix?");
 195     addr = os::attempt_reserve_memory_at(size, requested_address);
 196     if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 197       // OS ignored requested address. Try different address.
 198       addr = NULL;
 199     }
 200   } else {
 201     addr = os::reserve_memory(size, NULL, suffix_align);
 202   }
 203   if (addr == NULL) return;
 204 
 205   // Check whether the result has the needed alignment
 206   const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
 207   if (ofs != 0) {
 208     // Wrong alignment.  Release, allocate more space and do manual alignment.
 209     //
 210     // On most operating systems, another allocation with a somewhat larger size
 211     // will return an address "close to" that of the previous allocation.  The
 212     // result is often the same address (if the kernel hands out virtual
 213     // addresses from low to high), or an address that is offset by the increase
 214     // in size.  Exploit that to minimize the amount of extra space requested.
 215     if (!os::release_memory(addr, size)) {
 216       fatal("os::release_memory failed");
 217     }
 218 
 219     const size_t extra = MAX2(ofs, suffix_align - ofs);
 220     addr = reserve_and_align(size + extra, adjusted_prefix_size, suffix_align,
 221                              suffix_size, suffix_align);
 222     if (addr == NULL) {
 223       // Try an even larger region.  If this fails, address space is exhausted.
 224       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 225                                suffix_align, suffix_size, suffix_align);
 226     }
 227 
 228     if (requested_address != 0 &&
 229         failed_to_reserve_as_requested(addr, requested_address, size, false)) {
 230       // As a result of the alignment constraints, the allocated addr differs
 231       // from the requested address. Return back to the caller who can
 232       // take remedial action (like try again without a requested address).
 233       assert(_base == NULL, "should be");
 234       return;
 235     }
 236   }
 237 
 238   _base = addr;
 239   _size = size;
 240   _alignment = suffix_align;
 241   _noaccess_prefix = noaccess_prefix;
 242 }
 243 
 244 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 245                                char* requested_address,
 246                                const size_t noaccess_prefix,
 247                                bool executable) {
 248   const size_t granularity = os::vm_allocation_granularity();
 249   assert((size & (granularity - 1)) == 0,
 250          "size not aligned to os::vm_allocation_granularity()");
 251   assert((alignment & (granularity - 1)) == 0,
 252          "alignment not aligned to os::vm_allocation_granularity()");
 253   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 254          "not a power of 2");
 255 
 256   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 257 
 258   // Assert that if noaccess_prefix is used, it is the same as alignment.
 259   assert(noaccess_prefix == 0 ||
 260          noaccess_prefix == alignment, "noaccess prefix wrong");
 261 
 262   _base = NULL;
 263   _size = 0;
 264   _special = false;
 265   _executable = executable;
 266   _alignment = 0;
 267   _noaccess_prefix = 0;
 268   if (size == 0) {
 269     return;
 270   }
 271 
 272   // If OS doesn't support demand paging for large page memory, we need
 273   // to use reserve_memory_special() to reserve and pin the entire region.
 274   bool special = large && !os::can_commit_large_page_memory();
 275   char* base = NULL;
 276 
 277   if (requested_address != 0) {
 278     requested_address -= noaccess_prefix; // adjust requested address
 279     assert(requested_address != NULL, "huge noaccess prefix?");
 280   }
 281 
 282   if (special) {
 283 
 284     base = os::reserve_memory_special(size, requested_address, executable);
 285 
 286     if (base != NULL) {
 287       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 288         // OS ignored requested address. Try different address.
 289         return;
 290       }
 291       // Check alignment constraints
 292       assert((uintptr_t) base % alignment == 0,
 293              "Large pages returned a non-aligned address");
 294       _special = true;
 295     } else {
 296       // failed; try to reserve regular memory below
 297       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 298                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 299         if (PrintCompressedOopsMode) {
 300           tty->cr();
 301           tty->print_cr("Reserve regular memory without large pages.");
 302         }
 303       }
 304     }
 305   }
 306 
 307   if (base == NULL) {
 308     // Optimistically assume that the OSes returns an aligned base pointer.
 309     // When reserving a large address range, most OSes seem to align to at
 310     // least 64K.
 311 
 312     // If the memory was requested at a particular address, use
 313     // os::attempt_reserve_memory_at() to avoid over mapping something
 314     // important.  If available space is not detected, return NULL.
 315 
 316     if (requested_address != 0) {
 317       base = os::attempt_reserve_memory_at(size, requested_address);
 318       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 319         // OS ignored requested address. Try different address.
 320         base = NULL;
 321       }
 322     } else {
 323       base = os::reserve_memory(size, NULL, alignment);
 324     }
 325 
 326     if (base == NULL) return;
 327 
 328     // Check alignment constraints
 329     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 330       // Base not aligned, retry
 331       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 332       // Reserve size large enough to do manual alignment and
 333       // increase size to a multiple of the desired alignment
 334       size = align_size_up(size, alignment);
 335       base = os::reserve_memory_aligned(size, alignment);
 336 
 337       if (requested_address != 0 &&
 338           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 339         // As a result of the alignment constraints, the allocated base differs
 340         // from the requested address. Return back to the caller who can
 341         // take remedial action (like try again without a requested address).
 342         assert(_base == NULL, "should be");
 343         return;
 344       }
 345     }
 346   }
 347   // Done
 348   _base = base;
 349   _size = size;
 350   _alignment = alignment;
 351   _noaccess_prefix = noaccess_prefix;
 352 
 353   // Assert that if noaccess_prefix is used, it is the same as alignment.
 354   assert(noaccess_prefix == 0 ||
 355          noaccess_prefix == _alignment, "noaccess prefix wrong");
 356 
 357   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 358          "area must be distinguisable from marks for mark-sweep");
 359   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 360          "area must be distinguisable from marks for mark-sweep");
 361 }
 362 
 363 
 364 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 365                              bool special, bool executable) {
 366   assert((size % os::vm_allocation_granularity()) == 0,
 367          "size not allocation aligned");
 368   _base = base;
 369   _size = size;
 370   _alignment = alignment;
 371   _noaccess_prefix = 0;
 372   _special = special;
 373   _executable = executable;
 374 }
 375 
 376 
 377 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 378                                         bool split, bool realloc) {
 379   assert(partition_size <= size(), "partition failed");
 380   if (split) {
 381     os::split_reserved_memory(base(), size(), partition_size, realloc);
 382   }
 383   ReservedSpace result(base(), partition_size, alignment, special(),
 384                        executable());
 385   return result;
 386 }
 387 
 388 
 389 ReservedSpace
 390 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 391   assert(partition_size <= size(), "partition failed");
 392   ReservedSpace result(base() + partition_size, size() - partition_size,
 393                        alignment, special(), executable());
 394   return result;
 395 }
 396 
 397 
 398 size_t ReservedSpace::page_align_size_up(size_t size) {
 399   return align_size_up(size, os::vm_page_size());
 400 }
 401 
 402 
 403 size_t ReservedSpace::page_align_size_down(size_t size) {
 404   return align_size_down(size, os::vm_page_size());
 405 }
 406 
 407 
 408 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 409   return align_size_up(size, os::vm_allocation_granularity());
 410 }
 411 
 412 
 413 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 414   return align_size_down(size, os::vm_allocation_granularity());
 415 }
 416 
 417 
 418 void ReservedSpace::release() {
 419   if (is_reserved()) {
 420     char *real_base = _base - _noaccess_prefix;
 421     const size_t real_size = _size + _noaccess_prefix;
 422     if (special()) {
 423       os::release_memory_special(real_base, real_size);
 424     } else{
 425       os::release_memory(real_base, real_size);
 426     }
 427     _base = NULL;
 428     _size = 0;
 429     _noaccess_prefix = 0;
 430     _special = false;
 431     _executable = false;
 432   }
 433 }
 434 
 435 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 436   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 437                                       (Universe::narrow_oop_base() != NULL) &&
 438                                       Universe::narrow_oop_use_implicit_null_checks()),
 439          "noaccess_prefix should be used only with non zero based compressed oops");
 440 
 441   // If there is no noaccess prefix, return.
 442   if (_noaccess_prefix == 0) return;
 443 
 444   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 445          "must be at least page size big");
 446 
 447   // Protect memory at the base of the allocated region.
 448   // If special, the page was committed (only matters on windows)
 449   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 450                           _special)) {
 451     fatal("cannot protect protection page");
 452   }
 453   if (PrintCompressedOopsMode) {
 454     tty->cr();
 455     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 456   }
 457 
 458   _base += _noaccess_prefix;
 459   _size -= _noaccess_prefix;
 460   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 461          "must be exactly of required size and alignment");
 462 }
 463 
 464 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 465                                      bool large, char* requested_address) :
 466   ReservedSpace(size, alignment, large,
 467                 requested_address,
 468                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 469                  Universe::narrow_oop_use_implicit_null_checks()) ?
 470                   lcm(os::vm_page_size(), alignment) : 0) {
 471   if (base() > 0) {
 472     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 473   }
 474 
 475   // Only reserved space for the java heap should have a noaccess_prefix
 476   // if using compressed oops.
 477   protect_noaccess_prefix(size);
 478 }
 479 
 480 ReservedHeapSpace::ReservedHeapSpace(const size_t heap_space_size,
 481                                      const size_t alignment,
 482                                      char* requested_address) :
 483   ReservedSpace(heap_space_size, alignment,
 484                 requested_address,
 485                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 486                  Universe::narrow_oop_use_implicit_null_checks()) ?
 487                   lcm(os::vm_page_size(), alignment) : 0) {
 488   if (base() > 0) {
 489     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 490   }
 491   protect_noaccess_prefix(heap_space_size);
 492 }
 493 
 494 // Reserve space for code segment.  Same as Java heap only we mark this as
 495 // executable.
 496 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 497                                      size_t rs_align,
 498                                      bool large) :
 499   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 500   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 501 }
 502 
 503 // VirtualSpace
 504 
 505 VirtualSpace::VirtualSpace() {
 506   _low_boundary           = NULL;
 507   _high_boundary          = NULL;
 508   _low                    = NULL;
 509   _high                   = NULL;
 510   _lower_high             = NULL;
 511   _middle_high            = NULL;
 512   _upper_high             = NULL;
 513   _lower_high_boundary    = NULL;
 514   _middle_high_boundary   = NULL;
 515   _upper_high_boundary    = NULL;
 516   _lower_alignment        = 0;
 517   _middle_alignment       = 0;
 518   _upper_alignment        = 0;
 519   _special                = false;
 520   _executable             = false;
 521 }
 522 
 523 
 524 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 525   if(!rs.is_reserved()) return false;  // allocation failed.
 526   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 527   _low_boundary  = rs.base();
 528   _high_boundary = low_boundary() + rs.size();
 529 
 530   _low = low_boundary();
 531   _high = low();
 532 
 533   _special = rs.special();
 534   _executable = rs.executable();
 535 
 536   // When a VirtualSpace begins life at a large size, make all future expansion
 537   // and shrinking occur aligned to a granularity of large pages.  This avoids
 538   // fragmentation of physical addresses that inhibits the use of large pages
 539   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 540   // page size, the only spaces that get handled this way are codecache and
 541   // the heap itself, both of which provide a substantial performance
 542   // boost in many benchmarks when covered by large pages.
 543   //
 544   // No attempt is made to force large page alignment at the very top and
 545   // bottom of the space if they are not aligned so already.
 546   _lower_alignment  = os::vm_page_size();
 547   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 548   _upper_alignment  = os::vm_page_size();
 549 
 550   // End of each region
 551   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 552   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 553   _upper_high_boundary = high_boundary();
 554 
 555   // High address of each region
 556   _lower_high = low_boundary();
 557   _middle_high = lower_high_boundary();
 558   _upper_high = middle_high_boundary();
 559 
 560   // commit to initial size
 561   if (committed_size > 0) {
 562     if (!expand_by(committed_size)) {
 563       return false;
 564     }
 565   }
 566   return true;
 567 }
 568 
 569 
 570 VirtualSpace::~VirtualSpace() {
 571   release();
 572 }
 573 
 574 
 575 void VirtualSpace::release() {
 576   // This does not release memory it never reserved.
 577   // Caller must release via rs.release();
 578   _low_boundary           = NULL;
 579   _high_boundary          = NULL;
 580   _low                    = NULL;
 581   _high                   = NULL;
 582   _lower_high             = NULL;
 583   _middle_high            = NULL;
 584   _upper_high             = NULL;
 585   _lower_high_boundary    = NULL;
 586   _middle_high_boundary   = NULL;
 587   _upper_high_boundary    = NULL;
 588   _lower_alignment        = 0;
 589   _middle_alignment       = 0;
 590   _upper_alignment        = 0;
 591   _special                = false;
 592   _executable             = false;
 593 }
 594 
 595 
 596 size_t VirtualSpace::committed_size() const {
 597   return pointer_delta(high(), low(), sizeof(char));
 598 }
 599 
 600 
 601 size_t VirtualSpace::reserved_size() const {
 602   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 603 }
 604 
 605 
 606 size_t VirtualSpace::uncommitted_size()  const {
 607   return reserved_size() - committed_size();
 608 }
 609 
 610 
 611 bool VirtualSpace::contains(const void* p) const {
 612   return low() <= (const char*) p && (const char*) p < high();
 613 }
 614 
 615 /*
 616    First we need to determine if a particular virtual space is using large
 617    pages.  This is done at the initialize function and only virtual spaces
 618    that are larger than LargePageSizeInBytes use large pages.  Once we
 619    have determined this, all expand_by and shrink_by calls must grow and
 620    shrink by large page size chunks.  If a particular request
 621    is within the current large page, the call to commit and uncommit memory
 622    can be ignored.  In the case that the low and high boundaries of this
 623    space is not large page aligned, the pages leading to the first large
 624    page address and the pages after the last large page address must be
 625    allocated with default pages.
 626 */
 627 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 628   if (uncommitted_size() < bytes) return false;
 629 
 630   if (special()) {
 631     // don't commit memory if the entire space is pinned in memory
 632     _high += bytes;
 633     return true;
 634   }
 635 
 636   char* previous_high = high();
 637   char* unaligned_new_high = high() + bytes;
 638   assert(unaligned_new_high <= high_boundary(),
 639          "cannot expand by more than upper boundary");
 640 
 641   // Calculate where the new high for each of the regions should be.  If
 642   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 643   // then the unaligned lower and upper new highs would be the
 644   // lower_high() and upper_high() respectively.
 645   char* unaligned_lower_new_high =
 646     MIN2(unaligned_new_high, lower_high_boundary());
 647   char* unaligned_middle_new_high =
 648     MIN2(unaligned_new_high, middle_high_boundary());
 649   char* unaligned_upper_new_high =
 650     MIN2(unaligned_new_high, upper_high_boundary());
 651 
 652   // Align the new highs based on the regions alignment.  lower and upper
 653   // alignment will always be default page size.  middle alignment will be
 654   // LargePageSizeInBytes if the actual size of the virtual space is in
 655   // fact larger than LargePageSizeInBytes.
 656   char* aligned_lower_new_high =
 657     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 658   char* aligned_middle_new_high =
 659     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 660   char* aligned_upper_new_high =
 661     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 662 
 663   // Determine which regions need to grow in this expand_by call.
 664   // If you are growing in the lower region, high() must be in that
 665   // region so calcuate the size based on high().  For the middle and
 666   // upper regions, determine the starting point of growth based on the
 667   // location of high().  By getting the MAX of the region's low address
 668   // (or the prevoius region's high address) and high(), we can tell if it
 669   // is an intra or inter region growth.
 670   size_t lower_needs = 0;
 671   if (aligned_lower_new_high > lower_high()) {
 672     lower_needs =
 673       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 674   }
 675   size_t middle_needs = 0;
 676   if (aligned_middle_new_high > middle_high()) {
 677     middle_needs =
 678       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 679   }
 680   size_t upper_needs = 0;
 681   if (aligned_upper_new_high > upper_high()) {
 682     upper_needs =
 683       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 684   }
 685 
 686   // Check contiguity.
 687   assert(low_boundary() <= lower_high() &&
 688          lower_high() <= lower_high_boundary(),
 689          "high address must be contained within the region");
 690   assert(lower_high_boundary() <= middle_high() &&
 691          middle_high() <= middle_high_boundary(),
 692          "high address must be contained within the region");
 693   assert(middle_high_boundary() <= upper_high() &&
 694          upper_high() <= upper_high_boundary(),
 695          "high address must be contained within the region");
 696 
 697   // Commit regions
 698   if (lower_needs > 0) {
 699     assert(low_boundary() <= lower_high() &&
 700            lower_high() + lower_needs <= lower_high_boundary(),
 701            "must not expand beyond region");
 702     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 703       debug_only(warning("os::commit_memory failed"));
 704       return false;
 705     } else {
 706       _lower_high += lower_needs;
 707      }
 708   }
 709   if (middle_needs > 0) {
 710     assert(lower_high_boundary() <= middle_high() &&
 711            middle_high() + middle_needs <= middle_high_boundary(),
 712            "must not expand beyond region");
 713     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 714                            _executable)) {
 715       debug_only(warning("os::commit_memory failed"));
 716       return false;
 717     }
 718     _middle_high += middle_needs;
 719   }
 720   if (upper_needs > 0) {
 721     assert(middle_high_boundary() <= upper_high() &&
 722            upper_high() + upper_needs <= upper_high_boundary(),
 723            "must not expand beyond region");
 724     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 725       debug_only(warning("os::commit_memory failed"));
 726       return false;
 727     } else {
 728       _upper_high += upper_needs;
 729     }
 730   }
 731 
 732   if (pre_touch || AlwaysPreTouch) {
 733     int vm_ps = os::vm_page_size();
 734     for (char* curr = previous_high;
 735          curr < unaligned_new_high;
 736          curr += vm_ps) {
 737       // Note the use of a write here; originally we tried just a read, but
 738       // since the value read was unused, the optimizer removed the read.
 739       // If we ever have a concurrent touchahead thread, we'll want to use
 740       // a read, to avoid the potential of overwriting data (if a mutator
 741       // thread beats the touchahead thread to a page).  There are various
 742       // ways of making sure this read is not optimized away: for example,
 743       // generating the code for a read procedure at runtime.
 744       *curr = 0;
 745     }
 746   }
 747 
 748   _high += bytes;
 749   return true;
 750 }
 751 
 752 // A page is uncommitted if the contents of the entire page is deemed unusable.
 753 // Continue to decrement the high() pointer until it reaches a page boundary
 754 // in which case that particular page can now be uncommitted.
 755 void VirtualSpace::shrink_by(size_t size) {
 756   if (committed_size() < size)
 757     fatal("Cannot shrink virtual space to negative size");
 758 
 759   if (special()) {
 760     // don't uncommit if the entire space is pinned in memory
 761     _high -= size;
 762     return;
 763   }
 764 
 765   char* unaligned_new_high = high() - size;
 766   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 767 
 768   // Calculate new unaligned address
 769   char* unaligned_upper_new_high =
 770     MAX2(unaligned_new_high, middle_high_boundary());
 771   char* unaligned_middle_new_high =
 772     MAX2(unaligned_new_high, lower_high_boundary());
 773   char* unaligned_lower_new_high =
 774     MAX2(unaligned_new_high, low_boundary());
 775 
 776   // Align address to region's alignment
 777   char* aligned_upper_new_high =
 778     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 779   char* aligned_middle_new_high =
 780     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 781   char* aligned_lower_new_high =
 782     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 783 
 784   // Determine which regions need to shrink
 785   size_t upper_needs = 0;
 786   if (aligned_upper_new_high < upper_high()) {
 787     upper_needs =
 788       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 789   }
 790   size_t middle_needs = 0;
 791   if (aligned_middle_new_high < middle_high()) {
 792     middle_needs =
 793       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 794   }
 795   size_t lower_needs = 0;
 796   if (aligned_lower_new_high < lower_high()) {
 797     lower_needs =
 798       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 799   }
 800 
 801   // Check contiguity.
 802   assert(middle_high_boundary() <= upper_high() &&
 803          upper_high() <= upper_high_boundary(),
 804          "high address must be contained within the region");
 805   assert(lower_high_boundary() <= middle_high() &&
 806          middle_high() <= middle_high_boundary(),
 807          "high address must be contained within the region");
 808   assert(low_boundary() <= lower_high() &&
 809          lower_high() <= lower_high_boundary(),
 810          "high address must be contained within the region");
 811 
 812   // Uncommit
 813   if (upper_needs > 0) {
 814     assert(middle_high_boundary() <= aligned_upper_new_high &&
 815            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 816            "must not shrink beyond region");
 817     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 818       debug_only(warning("os::uncommit_memory failed"));
 819       return;
 820     } else {
 821       _upper_high -= upper_needs;
 822     }
 823   }
 824   if (middle_needs > 0) {
 825     assert(lower_high_boundary() <= aligned_middle_new_high &&
 826            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 827            "must not shrink beyond region");
 828     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 829       debug_only(warning("os::uncommit_memory failed"));
 830       return;
 831     } else {
 832       _middle_high -= middle_needs;
 833     }
 834   }
 835   if (lower_needs > 0) {
 836     assert(low_boundary() <= aligned_lower_new_high &&
 837            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 838            "must not shrink beyond region");
 839     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 840       debug_only(warning("os::uncommit_memory failed"));
 841       return;
 842     } else {
 843       _lower_high -= lower_needs;
 844     }
 845   }
 846 
 847   _high -= size;
 848 }
 849 
 850 #ifndef PRODUCT
 851 void VirtualSpace::check_for_contiguity() {
 852   // Check contiguity.
 853   assert(low_boundary() <= lower_high() &&
 854          lower_high() <= lower_high_boundary(),
 855          "high address must be contained within the region");
 856   assert(lower_high_boundary() <= middle_high() &&
 857          middle_high() <= middle_high_boundary(),
 858          "high address must be contained within the region");
 859   assert(middle_high_boundary() <= upper_high() &&
 860          upper_high() <= upper_high_boundary(),
 861          "high address must be contained within the region");
 862   assert(low() >= low_boundary(), "low");
 863   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 864   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 865   assert(high() <= upper_high(), "upper high");
 866 }
 867 
 868 void VirtualSpace::print() {
 869   tty->print   ("Virtual space:");
 870   if (special()) tty->print(" (pinned in memory)");
 871   tty->cr();
 872   tty->print_cr(" - committed: %ld", committed_size());
 873   tty->print_cr(" - reserved:  %ld", reserved_size());
 874   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 875   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 876 }
 877 
 878 #endif