1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_bsd
  40 # include "os_bsd.inline.hpp"
  41 #endif
  42 
  43 
  44 // ReservedSpace
  45 ReservedSpace::ReservedSpace(size_t size) {
  46   initialize(size, 0, false, NULL, 0, false);
  47 }
  48 
  49 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  50                              bool large,
  51                              char* requested_address,
  52                              const size_t noaccess_prefix) {
  53   initialize(size+noaccess_prefix, alignment, large, requested_address,
  54              noaccess_prefix, false);
  55 }
  56 
  57 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  58                              bool large,
  59                              bool executable) {
  60   initialize(size, alignment, large, NULL, 0, executable);
  61 }
  62 
  63 // Helper method.
  64 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  65                                            const size_t size, bool special)
  66 {
  67   if (base == requested_address || requested_address == NULL)
  68     return false; // did not fail
  69 
  70   if (base != NULL) {
  71     // Different reserve address may be acceptable in other cases
  72     // but for compressed oops heap should be at requested address.
  73     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  74     if (PrintCompressedOopsMode) {
  75       tty->cr();
  76       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  77     }
  78     // OS ignored requested address. Try different address.
  79     if (special) {
  80       if (!os::release_memory_special(base, size)) {
  81         fatal("os::release_memory_special failed");
  82       }
  83     } else {
  84       if (!os::release_memory(base, size)) {
  85         fatal("os::release_memory failed");
  86       }
  87     }
  88   }
  89   return true;
  90 }
  91 
  92 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  93                                char* requested_address,
  94                                const size_t noaccess_prefix,
  95                                bool executable) {
  96   const size_t granularity = os::vm_allocation_granularity();
  97   assert((size & (granularity - 1)) == 0,
  98          "size not aligned to os::vm_allocation_granularity()");
  99   assert((alignment & (granularity - 1)) == 0,
 100          "alignment not aligned to os::vm_allocation_granularity()");
 101   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 102          "not a power of 2");
 103 
 104   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 105 
 106   // Assert that if noaccess_prefix is used, it is the same as alignment.
 107   assert(noaccess_prefix == 0 ||
 108          noaccess_prefix == alignment, "noaccess prefix wrong");
 109 
 110   _base = NULL;
 111   _size = 0;
 112   _special = false;
 113   _executable = executable;
 114   _alignment = 0;
 115   _noaccess_prefix = 0;
 116   if (size == 0) {
 117     return;
 118   }
 119 
 120   // If OS doesn't support demand paging for large page memory, we need
 121   // to use reserve_memory_special() to reserve and pin the entire region.
 122   bool special = large && !os::can_commit_large_page_memory();
 123   char* base = NULL;
 124 
 125   if (requested_address != 0) {
 126     requested_address -= noaccess_prefix; // adjust requested address
 127     assert(requested_address != NULL, "huge noaccess prefix?");
 128   }
 129 
 130   if (special) {
 131 
 132     base = os::reserve_memory_special(size, requested_address, executable);
 133 
 134     if (base != NULL) {
 135       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 136         // OS ignored requested address. Try different address.
 137         return;
 138       }
 139       // Check alignment constraints
 140       assert((uintptr_t) base % alignment == 0,
 141              "Large pages returned a non-aligned address");
 142       _special = true;
 143     } else {
 144       // failed; try to reserve regular memory below
 145       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 146                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 147         if (PrintCompressedOopsMode) {
 148           tty->cr();
 149           tty->print_cr("Reserve regular memory without large pages.");
 150         }
 151       }
 152     }
 153   }
 154 
 155   if (base == NULL) {
 156     // Optimistically assume that the OSes returns an aligned base pointer.
 157     // When reserving a large address range, most OSes seem to align to at
 158     // least 64K.
 159 
 160     // If the memory was requested at a particular address, use
 161     // os::attempt_reserve_memory_at() to avoid over mapping something
 162     // important.  If available space is not detected, return NULL.
 163 
 164     if (requested_address != 0) {
 165       base = os::attempt_reserve_memory_at(size, requested_address);
 166       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 167         // OS ignored requested address. Try different address.
 168         base = NULL;
 169       }
 170     } else {
 171       base = os::reserve_memory(size, NULL, alignment);
 172     }
 173 
 174     if (base == NULL) return;
 175 
 176     // Check alignment constraints
 177     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 178       // Base not aligned, retry
 179       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 180       // Make sure that size is aligned
 181       size = align_size_up(size, alignment);
 182       base = os::reserve_memory_aligned(size, alignment);
 183 
 184       if (requested_address != 0 &&
 185           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 186         // As a result of the alignment constraints, the allocated base differs
 187         // from the requested address. Return back to the caller who can
 188         // take remedial action (like try again without a requested address).
 189         assert(_base == NULL, "should be");
 190         return;
 191       }
 192     }
 193   }
 194   // Done
 195   _base = base;
 196   _size = size;
 197   _alignment = alignment;
 198   _noaccess_prefix = noaccess_prefix;
 199 
 200   // Assert that if noaccess_prefix is used, it is the same as alignment.
 201   assert(noaccess_prefix == 0 ||
 202          noaccess_prefix == _alignment, "noaccess prefix wrong");
 203 
 204   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 205          "area must be distinguisable from marks for mark-sweep");
 206   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 207          "area must be distinguisable from marks for mark-sweep");
 208 }
 209 
 210 
 211 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 212                              bool special, bool executable) {
 213   assert((size % os::vm_allocation_granularity()) == 0,
 214          "size not allocation aligned");
 215   _base = base;
 216   _size = size;
 217   _alignment = alignment;
 218   _noaccess_prefix = 0;
 219   _special = special;
 220   _executable = executable;
 221 }
 222 
 223 
 224 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 225                                         bool split, bool realloc) {
 226   assert(partition_size <= size(), "partition failed");
 227   if (split) {
 228     os::split_reserved_memory(base(), size(), partition_size, realloc);
 229   }
 230   ReservedSpace result(base(), partition_size, alignment, special(),
 231                        executable());
 232   return result;
 233 }
 234 
 235 
 236 ReservedSpace
 237 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 238   assert(partition_size <= size(), "partition failed");
 239   ReservedSpace result(base() + partition_size, size() - partition_size,
 240                        alignment, special(), executable());
 241   return result;
 242 }
 243 
 244 
 245 size_t ReservedSpace::page_align_size_up(size_t size) {
 246   return align_size_up(size, os::vm_page_size());
 247 }
 248 
 249 
 250 size_t ReservedSpace::page_align_size_down(size_t size) {
 251   return align_size_down(size, os::vm_page_size());
 252 }
 253 
 254 
 255 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 256   return align_size_up(size, os::vm_allocation_granularity());
 257 }
 258 
 259 
 260 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 261   return align_size_down(size, os::vm_allocation_granularity());
 262 }
 263 
 264 
 265 void ReservedSpace::release() {
 266   if (is_reserved()) {
 267     char *real_base = _base - _noaccess_prefix;
 268     const size_t real_size = _size + _noaccess_prefix;
 269     if (special()) {
 270       os::release_memory_special(real_base, real_size);
 271     } else{
 272       os::release_memory(real_base, real_size);
 273     }
 274     _base = NULL;
 275     _size = 0;
 276     _noaccess_prefix = 0;
 277     _special = false;
 278     _executable = false;
 279   }
 280 }
 281 
 282 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 283   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 284                                       (Universe::narrow_oop_base() != NULL) &&
 285                                       Universe::narrow_oop_use_implicit_null_checks()),
 286          "noaccess_prefix should be used only with non zero based compressed oops");
 287 
 288   // If there is no noaccess prefix, return.
 289   if (_noaccess_prefix == 0) return;
 290 
 291   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 292          "must be at least page size big");
 293 
 294   // Protect memory at the base of the allocated region.
 295   // If special, the page was committed (only matters on windows)
 296   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 297                           _special)) {
 298     fatal("cannot protect protection page");
 299   }
 300   if (PrintCompressedOopsMode) {
 301     tty->cr();
 302     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 303   }
 304 
 305   _base += _noaccess_prefix;
 306   _size -= _noaccess_prefix;
 307   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 308          "must be exactly of required size and alignment");
 309 }
 310 
 311 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 312                                      bool large, char* requested_address) :
 313   ReservedSpace(size, alignment, large,
 314                 requested_address,
 315                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 316                  Universe::narrow_oop_use_implicit_null_checks()) ?
 317                   lcm(os::vm_page_size(), alignment) : 0) {
 318   if (base() > 0) {
 319     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 320   }
 321 
 322   // Only reserved space for the java heap should have a noaccess_prefix
 323   // if using compressed oops.
 324   protect_noaccess_prefix(size);
 325 }
 326 
 327 // Reserve space for code segment.  Same as Java heap only we mark this as
 328 // executable.
 329 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 330                                      size_t rs_align,
 331                                      bool large) :
 332   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 333   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 334 }
 335 
 336 // VirtualSpace
 337 
 338 VirtualSpace::VirtualSpace() {
 339   _low_boundary           = NULL;
 340   _high_boundary          = NULL;
 341   _low                    = NULL;
 342   _high                   = NULL;
 343   _lower_high             = NULL;
 344   _middle_high            = NULL;
 345   _upper_high             = NULL;
 346   _lower_high_boundary    = NULL;
 347   _middle_high_boundary   = NULL;
 348   _upper_high_boundary    = NULL;
 349   _lower_alignment        = 0;
 350   _middle_alignment       = 0;
 351   _upper_alignment        = 0;
 352   _special                = false;
 353   _executable             = false;
 354 }
 355 
 356 
 357 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 358   if(!rs.is_reserved()) return false;  // allocation failed.
 359   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 360   _low_boundary  = rs.base();
 361   _high_boundary = low_boundary() + rs.size();
 362 
 363   _low = low_boundary();
 364   _high = low();
 365 
 366   _special = rs.special();
 367   _executable = rs.executable();
 368 
 369   // When a VirtualSpace begins life at a large size, make all future expansion
 370   // and shrinking occur aligned to a granularity of large pages.  This avoids
 371   // fragmentation of physical addresses that inhibits the use of large pages
 372   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 373   // page size, the only spaces that get handled this way are codecache and
 374   // the heap itself, both of which provide a substantial performance
 375   // boost in many benchmarks when covered by large pages.
 376   //
 377   // No attempt is made to force large page alignment at the very top and
 378   // bottom of the space if they are not aligned so already.
 379   _lower_alignment  = os::vm_page_size();
 380   _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
 381   _upper_alignment  = os::vm_page_size();
 382 
 383   // End of each region
 384   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 385   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 386   _upper_high_boundary = high_boundary();
 387 
 388   // High address of each region
 389   _lower_high = low_boundary();
 390   _middle_high = lower_high_boundary();
 391   _upper_high = middle_high_boundary();
 392 
 393   // commit to initial size
 394   if (committed_size > 0) {
 395     if (!expand_by(committed_size)) {
 396       return false;
 397     }
 398   }
 399   return true;
 400 }
 401 
 402 
 403 VirtualSpace::~VirtualSpace() {
 404   release();
 405 }
 406 
 407 
 408 void VirtualSpace::release() {
 409   // This does not release memory it never reserved.
 410   // Caller must release via rs.release();
 411   _low_boundary           = NULL;
 412   _high_boundary          = NULL;
 413   _low                    = NULL;
 414   _high                   = NULL;
 415   _lower_high             = NULL;
 416   _middle_high            = NULL;
 417   _upper_high             = NULL;
 418   _lower_high_boundary    = NULL;
 419   _middle_high_boundary   = NULL;
 420   _upper_high_boundary    = NULL;
 421   _lower_alignment        = 0;
 422   _middle_alignment       = 0;
 423   _upper_alignment        = 0;
 424   _special                = false;
 425   _executable             = false;
 426 }
 427 
 428 
 429 size_t VirtualSpace::committed_size() const {
 430   return pointer_delta(high(), low(), sizeof(char));
 431 }
 432 
 433 
 434 size_t VirtualSpace::reserved_size() const {
 435   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 436 }
 437 
 438 
 439 size_t VirtualSpace::uncommitted_size()  const {
 440   return reserved_size() - committed_size();
 441 }
 442 
 443 
 444 bool VirtualSpace::contains(const void* p) const {
 445   return low() <= (const char*) p && (const char*) p < high();
 446 }
 447 
 448 /*
 449    First we need to determine if a particular virtual space is using large
 450    pages.  This is done at the initialize function and only virtual spaces
 451    that are larger than LargePageSizeInBytes use large pages.  Once we
 452    have determined this, all expand_by and shrink_by calls must grow and
 453    shrink by large page size chunks.  If a particular request
 454    is within the current large page, the call to commit and uncommit memory
 455    can be ignored.  In the case that the low and high boundaries of this
 456    space is not large page aligned, the pages leading to the first large
 457    page address and the pages after the last large page address must be
 458    allocated with default pages.
 459 */
 460 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 461   if (uncommitted_size() < bytes) return false;
 462 
 463   if (special()) {
 464     // don't commit memory if the entire space is pinned in memory
 465     _high += bytes;
 466     return true;
 467   }
 468 
 469   char* previous_high = high();
 470   char* unaligned_new_high = high() + bytes;
 471   assert(unaligned_new_high <= high_boundary(),
 472          "cannot expand by more than upper boundary");
 473 
 474   // Calculate where the new high for each of the regions should be.  If
 475   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 476   // then the unaligned lower and upper new highs would be the
 477   // lower_high() and upper_high() respectively.
 478   char* unaligned_lower_new_high =
 479     MIN2(unaligned_new_high, lower_high_boundary());
 480   char* unaligned_middle_new_high =
 481     MIN2(unaligned_new_high, middle_high_boundary());
 482   char* unaligned_upper_new_high =
 483     MIN2(unaligned_new_high, upper_high_boundary());
 484 
 485   // Align the new highs based on the regions alignment.  lower and upper
 486   // alignment will always be default page size.  middle alignment will be
 487   // LargePageSizeInBytes if the actual size of the virtual space is in
 488   // fact larger than LargePageSizeInBytes.
 489   char* aligned_lower_new_high =
 490     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 491   char* aligned_middle_new_high =
 492     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 493   char* aligned_upper_new_high =
 494     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 495 
 496   // Determine which regions need to grow in this expand_by call.
 497   // If you are growing in the lower region, high() must be in that
 498   // region so calcuate the size based on high().  For the middle and
 499   // upper regions, determine the starting point of growth based on the
 500   // location of high().  By getting the MAX of the region's low address
 501   // (or the prevoius region's high address) and high(), we can tell if it
 502   // is an intra or inter region growth.
 503   size_t lower_needs = 0;
 504   if (aligned_lower_new_high > lower_high()) {
 505     lower_needs =
 506       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 507   }
 508   size_t middle_needs = 0;
 509   if (aligned_middle_new_high > middle_high()) {
 510     middle_needs =
 511       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 512   }
 513   size_t upper_needs = 0;
 514   if (aligned_upper_new_high > upper_high()) {
 515     upper_needs =
 516       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 517   }
 518 
 519   // Check contiguity.
 520   assert(low_boundary() <= lower_high() &&
 521          lower_high() <= lower_high_boundary(),
 522          "high address must be contained within the region");
 523   assert(lower_high_boundary() <= middle_high() &&
 524          middle_high() <= middle_high_boundary(),
 525          "high address must be contained within the region");
 526   assert(middle_high_boundary() <= upper_high() &&
 527          upper_high() <= upper_high_boundary(),
 528          "high address must be contained within the region");
 529 
 530   // Commit regions
 531   if (lower_needs > 0) {
 532     assert(low_boundary() <= lower_high() &&
 533            lower_high() + lower_needs <= lower_high_boundary(),
 534            "must not expand beyond region");
 535     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 536       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 537                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 538                          lower_high(), lower_needs, _executable);)
 539       return false;
 540     } else {
 541       _lower_high += lower_needs;
 542     }
 543   }
 544   if (middle_needs > 0) {
 545     assert(lower_high_boundary() <= middle_high() &&
 546            middle_high() + middle_needs <= middle_high_boundary(),
 547            "must not expand beyond region");
 548     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 549                            _executable)) {
 550       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 551                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 552                          ", %d) failed", middle_high(), middle_needs,
 553                          middle_alignment(), _executable);)
 554       return false;
 555     }
 556     _middle_high += middle_needs;
 557   }
 558   if (upper_needs > 0) {
 559     assert(middle_high_boundary() <= upper_high() &&
 560            upper_high() + upper_needs <= upper_high_boundary(),
 561            "must not expand beyond region");
 562     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 563       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 564                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 565                          upper_high(), upper_needs, _executable);)
 566       return false;
 567     } else {
 568       _upper_high += upper_needs;
 569     }
 570   }
 571 
 572   if (pre_touch || AlwaysPreTouch) {
 573     int vm_ps = os::vm_page_size();
 574     for (char* curr = previous_high;
 575          curr < unaligned_new_high;
 576          curr += vm_ps) {
 577       // Note the use of a write here; originally we tried just a read, but
 578       // since the value read was unused, the optimizer removed the read.
 579       // If we ever have a concurrent touchahead thread, we'll want to use
 580       // a read, to avoid the potential of overwriting data (if a mutator
 581       // thread beats the touchahead thread to a page).  There are various
 582       // ways of making sure this read is not optimized away: for example,
 583       // generating the code for a read procedure at runtime.
 584       *curr = 0;
 585     }
 586   }
 587 
 588   _high += bytes;
 589   return true;
 590 }
 591 
 592 // A page is uncommitted if the contents of the entire page is deemed unusable.
 593 // Continue to decrement the high() pointer until it reaches a page boundary
 594 // in which case that particular page can now be uncommitted.
 595 void VirtualSpace::shrink_by(size_t size) {
 596   if (committed_size() < size)
 597     fatal("Cannot shrink virtual space to negative size");
 598 
 599   if (special()) {
 600     // don't uncommit if the entire space is pinned in memory
 601     _high -= size;
 602     return;
 603   }
 604 
 605   char* unaligned_new_high = high() - size;
 606   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 607 
 608   // Calculate new unaligned address
 609   char* unaligned_upper_new_high =
 610     MAX2(unaligned_new_high, middle_high_boundary());
 611   char* unaligned_middle_new_high =
 612     MAX2(unaligned_new_high, lower_high_boundary());
 613   char* unaligned_lower_new_high =
 614     MAX2(unaligned_new_high, low_boundary());
 615 
 616   // Align address to region's alignment
 617   char* aligned_upper_new_high =
 618     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 619   char* aligned_middle_new_high =
 620     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 621   char* aligned_lower_new_high =
 622     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 623 
 624   // Determine which regions need to shrink
 625   size_t upper_needs = 0;
 626   if (aligned_upper_new_high < upper_high()) {
 627     upper_needs =
 628       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 629   }
 630   size_t middle_needs = 0;
 631   if (aligned_middle_new_high < middle_high()) {
 632     middle_needs =
 633       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 634   }
 635   size_t lower_needs = 0;
 636   if (aligned_lower_new_high < lower_high()) {
 637     lower_needs =
 638       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 639   }
 640 
 641   // Check contiguity.
 642   assert(middle_high_boundary() <= upper_high() &&
 643          upper_high() <= upper_high_boundary(),
 644          "high address must be contained within the region");
 645   assert(lower_high_boundary() <= middle_high() &&
 646          middle_high() <= middle_high_boundary(),
 647          "high address must be contained within the region");
 648   assert(low_boundary() <= lower_high() &&
 649          lower_high() <= lower_high_boundary(),
 650          "high address must be contained within the region");
 651 
 652   // Uncommit
 653   if (upper_needs > 0) {
 654     assert(middle_high_boundary() <= aligned_upper_new_high &&
 655            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 656            "must not shrink beyond region");
 657     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 658       debug_only(warning("os::uncommit_memory failed"));
 659       return;
 660     } else {
 661       _upper_high -= upper_needs;
 662     }
 663   }
 664   if (middle_needs > 0) {
 665     assert(lower_high_boundary() <= aligned_middle_new_high &&
 666            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 667            "must not shrink beyond region");
 668     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 669       debug_only(warning("os::uncommit_memory failed"));
 670       return;
 671     } else {
 672       _middle_high -= middle_needs;
 673     }
 674   }
 675   if (lower_needs > 0) {
 676     assert(low_boundary() <= aligned_lower_new_high &&
 677            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 678            "must not shrink beyond region");
 679     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 680       debug_only(warning("os::uncommit_memory failed"));
 681       return;
 682     } else {
 683       _lower_high -= lower_needs;
 684     }
 685   }
 686 
 687   _high -= size;
 688 }
 689 
 690 #ifndef PRODUCT
 691 void VirtualSpace::check_for_contiguity() {
 692   // Check contiguity.
 693   assert(low_boundary() <= lower_high() &&
 694          lower_high() <= lower_high_boundary(),
 695          "high address must be contained within the region");
 696   assert(lower_high_boundary() <= middle_high() &&
 697          middle_high() <= middle_high_boundary(),
 698          "high address must be contained within the region");
 699   assert(middle_high_boundary() <= upper_high() &&
 700          upper_high() <= upper_high_boundary(),
 701          "high address must be contained within the region");
 702   assert(low() >= low_boundary(), "low");
 703   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 704   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 705   assert(high() <= upper_high(), "upper high");
 706 }
 707 
 708 void VirtualSpace::print() {
 709   tty->print   ("Virtual space:");
 710   if (special()) tty->print(" (pinned in memory)");
 711   tty->cr();
 712   tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 713   tty->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 714   tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 715   tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 716 }
 717 
 718 #endif