1 /*
   2  * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_virtualspace.cpp.incl"
  27 
  28 
  29 // ReservedSpace
  30 ReservedSpace::ReservedSpace(size_t size) {
  31   initialize(size, 0, false, NULL, 0, false);
  32 }
  33 
  34 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  35                              bool large,
  36                              char* requested_address,
  37                              const size_t noaccess_prefix) {
  38   initialize(size+noaccess_prefix, alignment, large, requested_address,
  39              noaccess_prefix, false);
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  43                              bool large,
  44                              bool executable) {
  45   initialize(size, alignment, large, NULL, 0, executable);
  46 }
  47 
  48 char *
  49 ReservedSpace::align_reserved_region(char* addr, const size_t len,
  50                                      const size_t prefix_size,
  51                                      const size_t prefix_align,
  52                                      const size_t suffix_size,
  53                                      const size_t suffix_align)
  54 {
  55   assert(addr != NULL, "sanity");
  56   const size_t required_size = prefix_size + suffix_size;
  57   assert(len >= required_size, "len too small");
  58 
  59   const size_t s = size_t(addr);
  60   const size_t beg_ofs = s + prefix_size & suffix_align - 1;
  61   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
  62 
  63   if (len < beg_delta + required_size) {
  64      return NULL; // Cannot do proper alignment.
  65   }
  66   const size_t end_delta = len - (beg_delta + required_size);
  67 
  68   if (beg_delta != 0) {
  69     os::release_memory(addr, beg_delta);
  70   }
  71 
  72   if (end_delta != 0) {
  73     char* release_addr = (char*) (s + beg_delta + required_size);
  74     os::release_memory(release_addr, end_delta);
  75   }
  76 
  77   return (char*) (s + beg_delta);
  78 }
  79 
  80 char* ReservedSpace::reserve_and_align(const size_t reserve_size,
  81                                        const size_t prefix_size,
  82                                        const size_t prefix_align,
  83                                        const size_t suffix_size,
  84                                        const size_t suffix_align)
  85 {
  86   assert(reserve_size > prefix_size + suffix_size, "should not be here");
  87 
  88   char* raw_addr = os::reserve_memory(reserve_size, NULL, prefix_align);
  89   if (raw_addr == NULL) return NULL;
  90 
  91   char* result = align_reserved_region(raw_addr, reserve_size, prefix_size,
  92                                        prefix_align, suffix_size,
  93                                        suffix_align);
  94   if (result == NULL && !os::release_memory(raw_addr, reserve_size)) {
  95     fatal("os::release_memory failed");
  96   }
  97 
  98 #ifdef ASSERT
  99   if (result != NULL) {
 100     const size_t raw = size_t(raw_addr);
 101     const size_t res = size_t(result);
 102     assert(res >= raw, "alignment decreased start addr");
 103     assert(res + prefix_size + suffix_size <= raw + reserve_size,
 104            "alignment increased end addr");
 105     assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
 106     assert((res + prefix_size & suffix_align - 1) == 0,
 107            "bad alignment of suffix");
 108   }
 109 #endif
 110 
 111   return result;
 112 }
 113 
 114 ReservedSpace::ReservedSpace(const size_t prefix_size,
 115                              const size_t prefix_align,
 116                              const size_t suffix_size,
 117                              const size_t suffix_align,
 118                              char* requested_address,
 119                              const size_t noaccess_prefix)
 120 {
 121   assert(prefix_size != 0, "sanity");
 122   assert(prefix_align != 0, "sanity");
 123   assert(suffix_size != 0, "sanity");
 124   assert(suffix_align != 0, "sanity");
 125   assert((prefix_size & prefix_align - 1) == 0,
 126     "prefix_size not divisible by prefix_align");
 127   assert((suffix_size & suffix_align - 1) == 0,
 128     "suffix_size not divisible by suffix_align");
 129   assert((suffix_align & prefix_align - 1) == 0,
 130     "suffix_align not divisible by prefix_align");
 131 
 132   // Add in noaccess_prefix to prefix_size;
 133   const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
 134   const size_t size = adjusted_prefix_size + suffix_size;
 135 
 136   // On systems where the entire region has to be reserved and committed up
 137   // front, the compound alignment normally done by this method is unnecessary.
 138   const bool try_reserve_special = UseLargePages &&
 139     prefix_align == os::large_page_size();
 140   if (!os::can_commit_large_page_memory() && try_reserve_special) {
 141     initialize(size, prefix_align, true, requested_address, noaccess_prefix,
 142                false);
 143     return;
 144   }
 145 
 146   _base = NULL;
 147   _size = 0;
 148   _alignment = 0;
 149   _special = false;
 150   _noaccess_prefix = 0;
 151   _executable = false;
 152 
 153   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
 154   assert(noaccess_prefix == 0 ||
 155          noaccess_prefix == prefix_align, "noaccess prefix wrong");
 156 
 157   // Optimistically try to reserve the exact size needed.
 158   char* addr;
 159   if (requested_address != 0) {
 160     addr = os::attempt_reserve_memory_at(size,
 161                                          requested_address-noaccess_prefix);
 162   } else {
 163     addr = os::reserve_memory(size, NULL, prefix_align);
 164   }
 165   if (addr == NULL) return;
 166 
 167   // Check whether the result has the needed alignment (unlikely unless
 168   // prefix_align == suffix_align).
 169   const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
 170   if (ofs != 0) {
 171     // Wrong alignment.  Release, allocate more space and do manual alignment.
 172     //
 173     // On most operating systems, another allocation with a somewhat larger size
 174     // will return an address "close to" that of the previous allocation.  The
 175     // result is often the same address (if the kernel hands out virtual
 176     // addresses from low to high), or an address that is offset by the increase
 177     // in size.  Exploit that to minimize the amount of extra space requested.
 178     if (!os::release_memory(addr, size)) {
 179       fatal("os::release_memory failed");
 180     }
 181 
 182     const size_t extra = MAX2(ofs, suffix_align - ofs);
 183     addr = reserve_and_align(size + extra, adjusted_prefix_size, prefix_align,
 184                              suffix_size, suffix_align);
 185     if (addr == NULL) {
 186       // Try an even larger region.  If this fails, address space is exhausted.
 187       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
 188                                prefix_align, suffix_size, suffix_align);
 189     }
 190   }
 191 
 192   _base = addr;
 193   _size = size;
 194   _alignment = prefix_align;
 195   _noaccess_prefix = noaccess_prefix;
 196 }
 197 
 198 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 199                                char* requested_address,
 200                                const size_t noaccess_prefix,
 201                                bool executable) {
 202   const size_t granularity = os::vm_allocation_granularity();
 203   assert((size & granularity - 1) == 0,
 204          "size not aligned to os::vm_allocation_granularity()");
 205   assert((alignment & granularity - 1) == 0,
 206          "alignment not aligned to os::vm_allocation_granularity()");
 207   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 208          "not a power of 2");
 209 
 210   _base = NULL;
 211   _size = 0;
 212   _special = false;
 213   _executable = executable;
 214   _alignment = 0;
 215   _noaccess_prefix = 0;
 216   if (size == 0) {
 217     return;
 218   }
 219 
 220   // If OS doesn't support demand paging for large page memory, we need
 221   // to use reserve_memory_special() to reserve and pin the entire region.
 222   bool special = large && !os::can_commit_large_page_memory();
 223   char* base = NULL;
 224 
 225   if (special) {
 226 
 227     base = os::reserve_memory_special(size, requested_address, executable);
 228 
 229     if (base != NULL) {
 230       // Check alignment constraints
 231       if (alignment > 0) {
 232         assert((uintptr_t) base % alignment == 0,
 233                "Large pages returned a non-aligned address");
 234       }
 235       _special = true;
 236     } else {
 237       // failed; try to reserve regular memory below
 238     }
 239   }
 240 
 241   if (base == NULL) {
 242     // Optimistically assume that the OSes returns an aligned base pointer.
 243     // When reserving a large address range, most OSes seem to align to at
 244     // least 64K.
 245 
 246     // If the memory was requested at a particular address, use
 247     // os::attempt_reserve_memory_at() to avoid over mapping something
 248     // important.  If available space is not detected, return NULL.
 249 
 250     if (requested_address != 0) {
 251       base = os::attempt_reserve_memory_at(size,
 252                                            requested_address-noaccess_prefix);
 253     } else {
 254       base = os::reserve_memory(size, NULL, alignment);
 255     }
 256 
 257     if (base == NULL) return;
 258 
 259     // Check alignment constraints
 260     if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
 261       // Base not aligned, retry
 262       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 263       // Reserve size large enough to do manual alignment and
 264       // increase size to a multiple of the desired alignment
 265       size = align_size_up(size, alignment);
 266       size_t extra_size = size + alignment;
 267       do {
 268         char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
 269         if (extra_base == NULL) return;
 270         // Do manual alignement
 271         base = (char*) align_size_up((uintptr_t) extra_base, alignment);
 272         assert(base >= extra_base, "just checking");
 273         // Re-reserve the region at the aligned base address.
 274         os::release_memory(extra_base, extra_size);
 275         base = os::reserve_memory(size, base);
 276       } while (base == NULL);
 277     }
 278   }
 279   // Done
 280   _base = base;
 281   _size = size;
 282   _alignment = MAX2(alignment, (size_t) os::vm_page_size());
 283   _noaccess_prefix = noaccess_prefix;
 284 
 285   // Assert that if noaccess_prefix is used, it is the same as alignment.
 286   assert(noaccess_prefix == 0 ||
 287          noaccess_prefix == _alignment, "noaccess prefix wrong");
 288 
 289   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 290          "area must be distinguisable from marks for mark-sweep");
 291   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 292          "area must be distinguisable from marks for mark-sweep");
 293 }
 294 
 295 
 296 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 297                              bool special, bool executable) {
 298   assert((size % os::vm_allocation_granularity()) == 0,
 299          "size not allocation aligned");
 300   _base = base;
 301   _size = size;
 302   _alignment = alignment;
 303   _noaccess_prefix = 0;
 304   _special = special;
 305   _executable = executable;
 306 }
 307 
 308 
 309 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 310                                         bool split, bool realloc) {
 311   assert(partition_size <= size(), "partition failed");
 312   if (split) {
 313     os::split_reserved_memory(base(), size(), partition_size, realloc);
 314   }
 315   ReservedSpace result(base(), partition_size, alignment, special(),
 316                        executable());
 317   return result;
 318 }
 319 
 320 
 321 ReservedSpace
 322 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 323   assert(partition_size <= size(), "partition failed");
 324   ReservedSpace result(base() + partition_size, size() - partition_size,
 325                        alignment, special(), executable());
 326   return result;
 327 }
 328 
 329 
 330 size_t ReservedSpace::page_align_size_up(size_t size) {
 331   return align_size_up(size, os::vm_page_size());
 332 }
 333 
 334 
 335 size_t ReservedSpace::page_align_size_down(size_t size) {
 336   return align_size_down(size, os::vm_page_size());
 337 }
 338 
 339 
 340 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 341   return align_size_up(size, os::vm_allocation_granularity());
 342 }
 343 
 344 
 345 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 346   return align_size_down(size, os::vm_allocation_granularity());
 347 }
 348 
 349 
 350 void ReservedSpace::release() {
 351   if (is_reserved()) {
 352     char *real_base = _base - _noaccess_prefix;
 353     const size_t real_size = _size + _noaccess_prefix;
 354     if (special()) {
 355       os::release_memory_special(real_base, real_size);
 356     } else{
 357       os::release_memory(real_base, real_size);
 358     }
 359     _base = NULL;
 360     _size = 0;
 361     _noaccess_prefix = 0;
 362     _special = false;
 363     _executable = false;
 364   }
 365 }
 366 
 367 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 368   // If there is noaccess prefix, return.
 369   if (_noaccess_prefix == 0) return;
 370 
 371   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 372          "must be at least page size big");
 373 
 374   // Protect memory at the base of the allocated region.
 375   // If special, the page was committed (only matters on windows)
 376   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 377                           _special)) {
 378     fatal("cannot protect protection page");
 379   }
 380 
 381   _base += _noaccess_prefix;
 382   _size -= _noaccess_prefix;
 383   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 384          "must be exactly of required size and alignment");
 385 }
 386 
 387 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 388                                      bool large, char* requested_address) :
 389   ReservedSpace(size, alignment, large,
 390                 requested_address,
 391                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 392                  Universe::narrow_oop_use_implicit_null_checks()) ?
 393                   lcm(os::vm_page_size(), alignment) : 0) {
 394   // Only reserved space for the java heap should have a noaccess_prefix
 395   // if using compressed oops.
 396   protect_noaccess_prefix(size);
 397 }
 398 
 399 ReservedHeapSpace::ReservedHeapSpace(const size_t prefix_size,
 400                                      const size_t prefix_align,
 401                                      const size_t suffix_size,
 402                                      const size_t suffix_align,
 403                                      char* requested_address) :
 404   ReservedSpace(prefix_size, prefix_align, suffix_size, suffix_align,
 405                 requested_address,
 406                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 407                  Universe::narrow_oop_use_implicit_null_checks()) ?
 408                   lcm(os::vm_page_size(), prefix_align) : 0) {
 409   protect_noaccess_prefix(prefix_size+suffix_size);
 410 }
 411 
 412 // Reserve space for code segment.  Same as Java heap only we mark this as
 413 // executable.
 414 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 415                                      size_t rs_align,
 416                                      bool large) :
 417   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 418 }
 419 
 420 // VirtualSpace
 421 
 422 VirtualSpace::VirtualSpace() {
 423   _low_boundary           = NULL;
 424   _high_boundary          = NULL;
 425   _low                    = NULL;
 426   _high                   = NULL;
 427   _lower_high             = NULL;
 428   _middle_high            = NULL;
 429   _upper_high             = NULL;
 430   _lower_high_boundary    = NULL;
 431   _middle_high_boundary   = NULL;
 432   _upper_high_boundary    = NULL;
 433   _lower_alignment        = 0;
 434   _middle_alignment       = 0;
 435   _upper_alignment        = 0;
 436   _special                = false;
 437   _executable             = false;
 438 }
 439 
 440 
 441 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 442   if(!rs.is_reserved()) return false;  // allocation failed.
 443   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 444   _low_boundary  = rs.base();
 445   _high_boundary = low_boundary() + rs.size();
 446 
 447   _low = low_boundary();
 448   _high = low();
 449 
 450   _special = rs.special();
 451   _executable = rs.executable();
 452 
 453   // When a VirtualSpace begins life at a large size, make all future expansion
 454   // and shrinking occur aligned to a granularity of large pages.  This avoids
 455   // fragmentation of physical addresses that inhibits the use of large pages
 456   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 457   // page size, the only spaces that get handled this way are codecache and
 458   // the heap itself, both of which provide a substantial performance
 459   // boost in many benchmarks when covered by large pages.
 460   //
 461   // No attempt is made to force large page alignment at the very top and
 462   // bottom of the space if they are not aligned so already.
 463   _lower_alignment  = os::vm_page_size();
 464   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 465   _upper_alignment  = os::vm_page_size();
 466 
 467   // End of each region
 468   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 469   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 470   _upper_high_boundary = high_boundary();
 471 
 472   // High address of each region
 473   _lower_high = low_boundary();
 474   _middle_high = lower_high_boundary();
 475   _upper_high = middle_high_boundary();
 476 
 477   // commit to initial size
 478   if (committed_size > 0) {
 479     if (!expand_by(committed_size)) {
 480       return false;
 481     }
 482   }
 483   return true;
 484 }
 485 
 486 
 487 VirtualSpace::~VirtualSpace() {
 488   release();
 489 }
 490 
 491 
 492 void VirtualSpace::release() {
 493   // This does not release memory it never reserved.
 494   // Caller must release via rs.release();
 495   _low_boundary           = NULL;
 496   _high_boundary          = NULL;
 497   _low                    = NULL;
 498   _high                   = NULL;
 499   _lower_high             = NULL;
 500   _middle_high            = NULL;
 501   _upper_high             = NULL;
 502   _lower_high_boundary    = NULL;
 503   _middle_high_boundary   = NULL;
 504   _upper_high_boundary    = NULL;
 505   _lower_alignment        = 0;
 506   _middle_alignment       = 0;
 507   _upper_alignment        = 0;
 508   _special                = false;
 509   _executable             = false;
 510 }
 511 
 512 
 513 size_t VirtualSpace::committed_size() const {
 514   return pointer_delta(high(), low(), sizeof(char));
 515 }
 516 
 517 
 518 size_t VirtualSpace::reserved_size() const {
 519   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 520 }
 521 
 522 
 523 size_t VirtualSpace::uncommitted_size()  const {
 524   return reserved_size() - committed_size();
 525 }
 526 
 527 
 528 bool VirtualSpace::contains(const void* p) const {
 529   return low() <= (const char*) p && (const char*) p < high();
 530 }
 531 
 532 /*
 533    First we need to determine if a particular virtual space is using large
 534    pages.  This is done at the initialize function and only virtual spaces
 535    that are larger than LargePageSizeInBytes use large pages.  Once we
 536    have determined this, all expand_by and shrink_by calls must grow and
 537    shrink by large page size chunks.  If a particular request
 538    is within the current large page, the call to commit and uncommit memory
 539    can be ignored.  In the case that the low and high boundaries of this
 540    space is not large page aligned, the pages leading to the first large
 541    page address and the pages after the last large page address must be
 542    allocated with default pages.
 543 */
 544 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 545   if (uncommitted_size() < bytes) return false;
 546 
 547   if (special()) {
 548     // don't commit memory if the entire space is pinned in memory
 549     _high += bytes;
 550     return true;
 551   }
 552 
 553   char* previous_high = high();
 554   char* unaligned_new_high = high() + bytes;
 555   assert(unaligned_new_high <= high_boundary(),
 556          "cannot expand by more than upper boundary");
 557 
 558   // Calculate where the new high for each of the regions should be.  If
 559   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 560   // then the unaligned lower and upper new highs would be the
 561   // lower_high() and upper_high() respectively.
 562   char* unaligned_lower_new_high =
 563     MIN2(unaligned_new_high, lower_high_boundary());
 564   char* unaligned_middle_new_high =
 565     MIN2(unaligned_new_high, middle_high_boundary());
 566   char* unaligned_upper_new_high =
 567     MIN2(unaligned_new_high, upper_high_boundary());
 568 
 569   // Align the new highs based on the regions alignment.  lower and upper
 570   // alignment will always be default page size.  middle alignment will be
 571   // LargePageSizeInBytes if the actual size of the virtual space is in
 572   // fact larger than LargePageSizeInBytes.
 573   char* aligned_lower_new_high =
 574     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 575   char* aligned_middle_new_high =
 576     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 577   char* aligned_upper_new_high =
 578     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 579 
 580   // Determine which regions need to grow in this expand_by call.
 581   // If you are growing in the lower region, high() must be in that
 582   // region so calcuate the size based on high().  For the middle and
 583   // upper regions, determine the starting point of growth based on the
 584   // location of high().  By getting the MAX of the region's low address
 585   // (or the prevoius region's high address) and high(), we can tell if it
 586   // is an intra or inter region growth.
 587   size_t lower_needs = 0;
 588   if (aligned_lower_new_high > lower_high()) {
 589     lower_needs =
 590       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 591   }
 592   size_t middle_needs = 0;
 593   if (aligned_middle_new_high > middle_high()) {
 594     middle_needs =
 595       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 596   }
 597   size_t upper_needs = 0;
 598   if (aligned_upper_new_high > upper_high()) {
 599     upper_needs =
 600       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 601   }
 602 
 603   // Check contiguity.
 604   assert(low_boundary() <= lower_high() &&
 605          lower_high() <= lower_high_boundary(),
 606          "high address must be contained within the region");
 607   assert(lower_high_boundary() <= middle_high() &&
 608          middle_high() <= middle_high_boundary(),
 609          "high address must be contained within the region");
 610   assert(middle_high_boundary() <= upper_high() &&
 611          upper_high() <= upper_high_boundary(),
 612          "high address must be contained within the region");
 613 
 614   // Commit regions
 615   if (lower_needs > 0) {
 616     assert(low_boundary() <= lower_high() &&
 617            lower_high() + lower_needs <= lower_high_boundary(),
 618            "must not expand beyond region");
 619     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 620       debug_only(warning("os::commit_memory failed"));
 621       return false;
 622     } else {
 623       _lower_high += lower_needs;
 624      }
 625   }
 626   if (middle_needs > 0) {
 627     assert(lower_high_boundary() <= middle_high() &&
 628            middle_high() + middle_needs <= middle_high_boundary(),
 629            "must not expand beyond region");
 630     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 631                            _executable)) {
 632       debug_only(warning("os::commit_memory failed"));
 633       return false;
 634     }
 635     _middle_high += middle_needs;
 636   }
 637   if (upper_needs > 0) {
 638     assert(middle_high_boundary() <= upper_high() &&
 639            upper_high() + upper_needs <= upper_high_boundary(),
 640            "must not expand beyond region");
 641     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 642       debug_only(warning("os::commit_memory failed"));
 643       return false;
 644     } else {
 645       _upper_high += upper_needs;
 646     }
 647   }
 648 
 649   if (pre_touch || AlwaysPreTouch) {
 650     int vm_ps = os::vm_page_size();
 651     for (char* curr = previous_high;
 652          curr < unaligned_new_high;
 653          curr += vm_ps) {
 654       // Note the use of a write here; originally we tried just a read, but
 655       // since the value read was unused, the optimizer removed the read.
 656       // If we ever have a concurrent touchahead thread, we'll want to use
 657       // a read, to avoid the potential of overwriting data (if a mutator
 658       // thread beats the touchahead thread to a page).  There are various
 659       // ways of making sure this read is not optimized away: for example,
 660       // generating the code for a read procedure at runtime.
 661       *curr = 0;
 662     }
 663   }
 664 
 665   _high += bytes;
 666   return true;
 667 }
 668 
 669 // A page is uncommitted if the contents of the entire page is deemed unusable.
 670 // Continue to decrement the high() pointer until it reaches a page boundary
 671 // in which case that particular page can now be uncommitted.
 672 void VirtualSpace::shrink_by(size_t size) {
 673   if (committed_size() < size)
 674     fatal("Cannot shrink virtual space to negative size");
 675 
 676   if (special()) {
 677     // don't uncommit if the entire space is pinned in memory
 678     _high -= size;
 679     return;
 680   }
 681 
 682   char* unaligned_new_high = high() - size;
 683   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 684 
 685   // Calculate new unaligned address
 686   char* unaligned_upper_new_high =
 687     MAX2(unaligned_new_high, middle_high_boundary());
 688   char* unaligned_middle_new_high =
 689     MAX2(unaligned_new_high, lower_high_boundary());
 690   char* unaligned_lower_new_high =
 691     MAX2(unaligned_new_high, low_boundary());
 692 
 693   // Align address to region's alignment
 694   char* aligned_upper_new_high =
 695     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 696   char* aligned_middle_new_high =
 697     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 698   char* aligned_lower_new_high =
 699     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 700 
 701   // Determine which regions need to shrink
 702   size_t upper_needs = 0;
 703   if (aligned_upper_new_high < upper_high()) {
 704     upper_needs =
 705       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 706   }
 707   size_t middle_needs = 0;
 708   if (aligned_middle_new_high < middle_high()) {
 709     middle_needs =
 710       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 711   }
 712   size_t lower_needs = 0;
 713   if (aligned_lower_new_high < lower_high()) {
 714     lower_needs =
 715       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 716   }
 717 
 718   // Check contiguity.
 719   assert(middle_high_boundary() <= upper_high() &&
 720          upper_high() <= upper_high_boundary(),
 721          "high address must be contained within the region");
 722   assert(lower_high_boundary() <= middle_high() &&
 723          middle_high() <= middle_high_boundary(),
 724          "high address must be contained within the region");
 725   assert(low_boundary() <= lower_high() &&
 726          lower_high() <= lower_high_boundary(),
 727          "high address must be contained within the region");
 728 
 729   // Uncommit
 730   if (upper_needs > 0) {
 731     assert(middle_high_boundary() <= aligned_upper_new_high &&
 732            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 733            "must not shrink beyond region");
 734     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 735       debug_only(warning("os::uncommit_memory failed"));
 736       return;
 737     } else {
 738       _upper_high -= upper_needs;
 739     }
 740   }
 741   if (middle_needs > 0) {
 742     assert(lower_high_boundary() <= aligned_middle_new_high &&
 743            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 744            "must not shrink beyond region");
 745     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 746       debug_only(warning("os::uncommit_memory failed"));
 747       return;
 748     } else {
 749       _middle_high -= middle_needs;
 750     }
 751   }
 752   if (lower_needs > 0) {
 753     assert(low_boundary() <= aligned_lower_new_high &&
 754            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 755            "must not shrink beyond region");
 756     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 757       debug_only(warning("os::uncommit_memory failed"));
 758       return;
 759     } else {
 760       _lower_high -= lower_needs;
 761     }
 762   }
 763 
 764   _high -= size;
 765 }
 766 
 767 #ifndef PRODUCT
 768 void VirtualSpace::check_for_contiguity() {
 769   // Check contiguity.
 770   assert(low_boundary() <= lower_high() &&
 771          lower_high() <= lower_high_boundary(),
 772          "high address must be contained within the region");
 773   assert(lower_high_boundary() <= middle_high() &&
 774          middle_high() <= middle_high_boundary(),
 775          "high address must be contained within the region");
 776   assert(middle_high_boundary() <= upper_high() &&
 777          upper_high() <= upper_high_boundary(),
 778          "high address must be contained within the region");
 779   assert(low() >= low_boundary(), "low");
 780   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 781   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 782   assert(high() <= upper_high(), "upper high");
 783 }
 784 
 785 void VirtualSpace::print() {
 786   tty->print   ("Virtual space:");
 787   if (special()) tty->print(" (pinned in memory)");
 788   tty->cr();
 789   tty->print_cr(" - committed: %ld", committed_size());
 790   tty->print_cr(" - reserved:  %ld", reserved_size());
 791   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 792   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 793 }
 794 
 795 #endif