1 /*
   2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 #ifdef TARGET_OS_FAMILY_linux
  31 # include "os_linux.inline.hpp"
  32 #endif
  33 #ifdef TARGET_OS_FAMILY_solaris
  34 # include "os_solaris.inline.hpp"
  35 #endif
  36 #ifdef TARGET_OS_FAMILY_windows
  37 # include "os_windows.inline.hpp"
  38 #endif
  39 #ifdef TARGET_OS_FAMILY_aix
  40 # include "os_aix.inline.hpp"
  41 #endif
  42 #ifdef TARGET_OS_FAMILY_bsd
  43 # include "os_bsd.inline.hpp"
  44 #endif
  45 
  46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  47 
  48 // ReservedSpace
  49 
  50 // Dummy constructor
  51 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  52     _alignment(0), _special(false), _executable(false) {
  53 }
  54 
  55 ReservedSpace::ReservedSpace(size_t size, bool prefer_large_pages) {
  56   // Want to use large pages where possible and pad with small pages.
  57   size_t page_size = os::page_size_for_region_unaligned(size, 1);
  58   bool large_pages = page_size != (size_t)os::vm_page_size();
  59   size_t alignment;
  60   if (large_pages && prefer_large_pages) {
  61     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  62     // ReservedSpace initialization requires size to be aligned to the given
  63     // alignment. Align the size up.
  64     size = align_size_up(size, alignment);
  65   } else {
  66     // Don't force the alignment to be large page aligned,
  67     // since that will waste memory.
  68     alignment = os::vm_allocation_granularity();
  69   }
  70   initialize(size, alignment, large_pages, NULL, 0, false);
  71 }
  72 
  73 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  74                              bool large,
  75                              char* requested_address,
  76                              const size_t noaccess_prefix) {
  77   initialize(size+noaccess_prefix, alignment, large, requested_address,
  78              noaccess_prefix, false);
  79 }
  80 
  81 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  82                              bool large,
  83                              bool executable) {
  84   initialize(size, alignment, large, NULL, 0, executable);
  85 }
  86 
  87 // Helper method.
  88 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  89                                            const size_t size, bool special)
  90 {
  91   if (base == requested_address || requested_address == NULL)
  92     return false; // did not fail
  93 
  94   if (base != NULL) {
  95     // Different reserve address may be acceptable in other cases
  96     // but for compressed oops heap should be at requested address.
  97     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  98     if (PrintCompressedOopsMode) {
  99       tty->cr();
 100       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
 101     }
 102     // OS ignored requested address. Try different address.
 103     if (special) {
 104       if (!os::release_memory_special(base, size)) {
 105         fatal("os::release_memory_special failed");
 106       }
 107     } else {
 108       if (!os::release_memory(base, size)) {
 109         fatal("os::release_memory failed");
 110       }
 111     }
 112   }
 113   return true;
 114 }
 115 
 116 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 117                                char* requested_address,
 118                                const size_t noaccess_prefix,
 119                                bool executable) {
 120   const size_t granularity = os::vm_allocation_granularity();
 121   assert((size & (granularity - 1)) == 0,
 122          "size not aligned to os::vm_allocation_granularity()");
 123   assert((alignment & (granularity - 1)) == 0,
 124          "alignment not aligned to os::vm_allocation_granularity()");
 125   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 126          "not a power of 2");
 127 
 128   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 129 
 130   // Assert that if noaccess_prefix is used, it is the same as alignment.
 131   assert(noaccess_prefix == 0 ||
 132          noaccess_prefix == alignment, "noaccess prefix wrong");
 133 
 134   _base = NULL;
 135   _size = 0;
 136   _special = false;
 137   _executable = executable;
 138   _alignment = 0;
 139   _noaccess_prefix = 0;
 140   if (size == 0) {
 141     return;
 142   }
 143 
 144   // If OS doesn't support demand paging for large page memory, we need
 145   // to use reserve_memory_special() to reserve and pin the entire region.
 146   bool special = large && !os::can_commit_large_page_memory();
 147   char* base = NULL;
 148 
 149   if (requested_address != 0) {
 150     requested_address -= noaccess_prefix; // adjust requested address
 151     assert(requested_address != NULL, "huge noaccess prefix?");
 152   }
 153 
 154   if (special) {
 155 
 156     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 157 
 158     if (base != NULL) {
 159       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 160         // OS ignored requested address. Try different address.
 161         return;
 162       }
 163       // Check alignment constraints.
 164       assert((uintptr_t) base % alignment == 0,
 165              err_msg("Large pages returned a non-aligned address, base: "
 166                  PTR_FORMAT " alignment: " PTR_FORMAT,
 167                  base, (void*)(uintptr_t)alignment));
 168       _special = true;
 169     } else {
 170       // failed; try to reserve regular memory below
 171       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 172                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 173         if (PrintCompressedOopsMode) {
 174           tty->cr();
 175           tty->print_cr("Reserve regular memory without large pages.");
 176         }
 177       }
 178     }
 179   }
 180 
 181   if (base == NULL) {
 182     // Optimistically assume that the OSes returns an aligned base pointer.
 183     // When reserving a large address range, most OSes seem to align to at
 184     // least 64K.
 185 
 186     // If the memory was requested at a particular address, use
 187     // os::attempt_reserve_memory_at() to avoid over mapping something
 188     // important.  If available space is not detected, return NULL.
 189 
 190     if (requested_address != 0) {
 191       base = os::attempt_reserve_memory_at(size, requested_address);
 192       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 193         // OS ignored requested address. Try different address.
 194         base = NULL;
 195       }
 196     } else {
 197       base = os::reserve_memory(size, NULL, alignment);
 198     }
 199 
 200     if (base == NULL) return;
 201 
 202     // Check alignment constraints
 203     if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
 204       // Base not aligned, retry
 205       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 206       // Make sure that size is aligned
 207       size = align_size_up(size, alignment);
 208       base = os::reserve_memory_aligned(size, alignment);
 209 
 210       if (requested_address != 0 &&
 211           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 212         // As a result of the alignment constraints, the allocated base differs
 213         // from the requested address. Return back to the caller who can
 214         // take remedial action (like try again without a requested address).
 215         assert(_base == NULL, "should be");
 216         return;
 217       }
 218     }
 219   }
 220   // Done
 221   _base = base;
 222   _size = size;
 223   _alignment = alignment;
 224   _noaccess_prefix = noaccess_prefix;
 225 
 226   // Assert that if noaccess_prefix is used, it is the same as alignment.
 227   assert(noaccess_prefix == 0 ||
 228          noaccess_prefix == _alignment, "noaccess prefix wrong");
 229 
 230   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 231          "area must be distinguisable from marks for mark-sweep");
 232   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 233          "area must be distinguisable from marks for mark-sweep");
 234 }
 235 
 236 
 237 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 238                              bool special, bool executable) {
 239   assert((size % os::vm_allocation_granularity()) == 0,
 240          "size not allocation aligned");
 241   _base = base;
 242   _size = size;
 243   _alignment = alignment;
 244   _noaccess_prefix = 0;
 245   _special = special;
 246   _executable = executable;
 247 }
 248 
 249 
 250 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 251                                         bool split, bool realloc) {
 252   assert(partition_size <= size(), "partition failed");
 253   if (split) {
 254     os::split_reserved_memory(base(), size(), partition_size, realloc);
 255   }
 256   ReservedSpace result(base(), partition_size, alignment, special(),
 257                        executable());
 258   return result;
 259 }
 260 
 261 
 262 ReservedSpace
 263 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 264   assert(partition_size <= size(), "partition failed");
 265   ReservedSpace result(base() + partition_size, size() - partition_size,
 266                        alignment, special(), executable());
 267   return result;
 268 }
 269 
 270 
 271 size_t ReservedSpace::page_align_size_up(size_t size) {
 272   return align_size_up(size, os::vm_page_size());
 273 }
 274 
 275 
 276 size_t ReservedSpace::page_align_size_down(size_t size) {
 277   return align_size_down(size, os::vm_page_size());
 278 }
 279 
 280 
 281 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 282   return align_size_up(size, os::vm_allocation_granularity());
 283 }
 284 
 285 
 286 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 287   return align_size_down(size, os::vm_allocation_granularity());
 288 }
 289 
 290 
 291 void ReservedSpace::release() {
 292   if (is_reserved()) {
 293     char *real_base = _base - _noaccess_prefix;
 294     const size_t real_size = _size + _noaccess_prefix;
 295     if (special()) {
 296       os::release_memory_special(real_base, real_size);
 297     } else{
 298       os::release_memory(real_base, real_size);
 299     }
 300     _base = NULL;
 301     _size = 0;
 302     _noaccess_prefix = 0;
 303     _special = false;
 304     _executable = false;
 305   }
 306 }
 307 
 308 void ReservedSpace::protect_noaccess_prefix(const size_t size) {
 309   assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
 310                                       (Universe::narrow_oop_base() != NULL) &&
 311                                       Universe::narrow_oop_use_implicit_null_checks()),
 312          "noaccess_prefix should be used only with non zero based compressed oops");
 313 
 314   // If there is no noaccess prefix, return.
 315   if (_noaccess_prefix == 0) return;
 316 
 317   assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
 318          "must be at least page size big");
 319 
 320   // Protect memory at the base of the allocated region.
 321   // If special, the page was committed (only matters on windows)
 322   if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE,
 323                           _special)) {
 324     fatal("cannot protect protection page");
 325   }
 326   if (PrintCompressedOopsMode) {
 327     tty->cr();
 328     tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 329   }
 330 
 331   _base += _noaccess_prefix;
 332   _size -= _noaccess_prefix;
 333   assert((size == _size) && ((uintptr_t)_base % _alignment == 0),
 334          "must be exactly of required size and alignment");
 335 }
 336 
 337 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment,
 338                                      bool large, char* requested_address) :
 339   ReservedSpace(size, alignment, large,
 340                 requested_address,
 341                 (UseCompressedOops && (Universe::narrow_oop_base() != NULL) &&
 342                  Universe::narrow_oop_use_implicit_null_checks()) ?
 343                   lcm(os::vm_page_size(), alignment) : 0) {
 344   if (base() > 0) {
 345     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 346   }
 347 
 348   // Only reserved space for the java heap should have a noaccess_prefix
 349   // if using compressed oops.
 350   protect_noaccess_prefix(size);
 351 }
 352 
 353 // Reserve space for code segment.  Same as Java heap only we mark this as
 354 // executable.
 355 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 356                                      size_t rs_align,
 357                                      bool large) :
 358   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 359   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 360 }
 361 
 362 // VirtualSpace
 363 
 364 VirtualSpace::VirtualSpace() {
 365   _low_boundary           = NULL;
 366   _high_boundary          = NULL;
 367   _low                    = NULL;
 368   _high                   = NULL;
 369   _lower_high             = NULL;
 370   _middle_high            = NULL;
 371   _upper_high             = NULL;
 372   _lower_high_boundary    = NULL;
 373   _middle_high_boundary   = NULL;
 374   _upper_high_boundary    = NULL;
 375   _lower_alignment        = 0;
 376   _middle_alignment       = 0;
 377   _upper_alignment        = 0;
 378   _special                = false;
 379   _executable             = false;
 380 }
 381 
 382 
 383 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 384   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 385   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 386 }
 387 
 388 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 389   if(!rs.is_reserved()) return false;  // allocation failed.
 390   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 391   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 392 
 393   _low_boundary  = rs.base();
 394   _high_boundary = low_boundary() + rs.size();
 395 
 396   _low = low_boundary();
 397   _high = low();
 398 
 399   _special = rs.special();
 400   _executable = rs.executable();
 401 
 402   // When a VirtualSpace begins life at a large size, make all future expansion
 403   // and shrinking occur aligned to a granularity of large pages.  This avoids
 404   // fragmentation of physical addresses that inhibits the use of large pages
 405   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 406   // page size, the only spaces that get handled this way are codecache and
 407   // the heap itself, both of which provide a substantial performance
 408   // boost in many benchmarks when covered by large pages.
 409   //
 410   // No attempt is made to force large page alignment at the very top and
 411   // bottom of the space if they are not aligned so already.
 412   _lower_alignment  = os::vm_page_size();
 413   _middle_alignment = max_commit_granularity;
 414   _upper_alignment  = os::vm_page_size();
 415 
 416   // End of each region
 417   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 418   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 419   _upper_high_boundary = high_boundary();
 420 
 421   // High address of each region
 422   _lower_high = low_boundary();
 423   _middle_high = lower_high_boundary();
 424   _upper_high = middle_high_boundary();
 425 
 426   // commit to initial size
 427   if (committed_size > 0) {
 428     if (!expand_by(committed_size)) {
 429       return false;
 430     }
 431   }
 432   return true;
 433 }
 434 
 435 
 436 VirtualSpace::~VirtualSpace() {
 437   release();
 438 }
 439 
 440 
 441 void VirtualSpace::release() {
 442   // This does not release memory it never reserved.
 443   // Caller must release via rs.release();
 444   _low_boundary           = NULL;
 445   _high_boundary          = NULL;
 446   _low                    = NULL;
 447   _high                   = NULL;
 448   _lower_high             = NULL;
 449   _middle_high            = NULL;
 450   _upper_high             = NULL;
 451   _lower_high_boundary    = NULL;
 452   _middle_high_boundary   = NULL;
 453   _upper_high_boundary    = NULL;
 454   _lower_alignment        = 0;
 455   _middle_alignment       = 0;
 456   _upper_alignment        = 0;
 457   _special                = false;
 458   _executable             = false;
 459 }
 460 
 461 
 462 size_t VirtualSpace::committed_size() const {
 463   return pointer_delta(high(), low(), sizeof(char));
 464 }
 465 
 466 
 467 size_t VirtualSpace::reserved_size() const {
 468   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 469 }
 470 
 471 
 472 size_t VirtualSpace::uncommitted_size()  const {
 473   return reserved_size() - committed_size();
 474 }
 475 
 476 size_t VirtualSpace::actual_committed_size() const {
 477   // Special VirtualSpaces commit all reserved space up front.
 478   if (special()) {
 479     return reserved_size();
 480   }
 481 
 482   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 483   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 484   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 485 
 486 #ifdef ASSERT
 487   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 488   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 489   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 490 
 491   if (committed_high > 0) {
 492     assert(committed_low == lower, "Must be");
 493     assert(committed_middle == middle, "Must be");
 494   }
 495 
 496   if (committed_middle > 0) {
 497     assert(committed_low == lower, "Must be");
 498   }
 499   if (committed_middle < middle) {
 500     assert(committed_high == 0, "Must be");
 501   }
 502 
 503   if (committed_low < lower) {
 504     assert(committed_high == 0, "Must be");
 505     assert(committed_middle == 0, "Must be");
 506   }
 507 #endif
 508 
 509   return committed_low + committed_middle + committed_high;
 510 }
 511 
 512 
 513 bool VirtualSpace::contains(const void* p) const {
 514   return low() <= (const char*) p && (const char*) p < high();
 515 }
 516 
 517 /*
 518    First we need to determine if a particular virtual space is using large
 519    pages.  This is done at the initialize function and only virtual spaces
 520    that are larger than LargePageSizeInBytes use large pages.  Once we
 521    have determined this, all expand_by and shrink_by calls must grow and
 522    shrink by large page size chunks.  If a particular request
 523    is within the current large page, the call to commit and uncommit memory
 524    can be ignored.  In the case that the low and high boundaries of this
 525    space is not large page aligned, the pages leading to the first large
 526    page address and the pages after the last large page address must be
 527    allocated with default pages.
 528 */
 529 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 530   if (uncommitted_size() < bytes) return false;
 531 
 532   if (special()) {
 533     // don't commit memory if the entire space is pinned in memory
 534     _high += bytes;
 535     return true;
 536   }
 537 
 538   char* previous_high = high();
 539   char* unaligned_new_high = high() + bytes;
 540   assert(unaligned_new_high <= high_boundary(),
 541          "cannot expand by more than upper boundary");
 542 
 543   // Calculate where the new high for each of the regions should be.  If
 544   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 545   // then the unaligned lower and upper new highs would be the
 546   // lower_high() and upper_high() respectively.
 547   char* unaligned_lower_new_high =
 548     MIN2(unaligned_new_high, lower_high_boundary());
 549   char* unaligned_middle_new_high =
 550     MIN2(unaligned_new_high, middle_high_boundary());
 551   char* unaligned_upper_new_high =
 552     MIN2(unaligned_new_high, upper_high_boundary());
 553 
 554   // Align the new highs based on the regions alignment.  lower and upper
 555   // alignment will always be default page size.  middle alignment will be
 556   // LargePageSizeInBytes if the actual size of the virtual space is in
 557   // fact larger than LargePageSizeInBytes.
 558   char* aligned_lower_new_high =
 559     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 560   char* aligned_middle_new_high =
 561     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 562   char* aligned_upper_new_high =
 563     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 564 
 565   // Determine which regions need to grow in this expand_by call.
 566   // If you are growing in the lower region, high() must be in that
 567   // region so calcuate the size based on high().  For the middle and
 568   // upper regions, determine the starting point of growth based on the
 569   // location of high().  By getting the MAX of the region's low address
 570   // (or the prevoius region's high address) and high(), we can tell if it
 571   // is an intra or inter region growth.
 572   size_t lower_needs = 0;
 573   if (aligned_lower_new_high > lower_high()) {
 574     lower_needs =
 575       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 576   }
 577   size_t middle_needs = 0;
 578   if (aligned_middle_new_high > middle_high()) {
 579     middle_needs =
 580       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 581   }
 582   size_t upper_needs = 0;
 583   if (aligned_upper_new_high > upper_high()) {
 584     upper_needs =
 585       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 586   }
 587 
 588   // Check contiguity.
 589   assert(low_boundary() <= lower_high() &&
 590          lower_high() <= lower_high_boundary(),
 591          "high address must be contained within the region");
 592   assert(lower_high_boundary() <= middle_high() &&
 593          middle_high() <= middle_high_boundary(),
 594          "high address must be contained within the region");
 595   assert(middle_high_boundary() <= upper_high() &&
 596          upper_high() <= upper_high_boundary(),
 597          "high address must be contained within the region");
 598 
 599   // Commit regions
 600   if (lower_needs > 0) {
 601     assert(low_boundary() <= lower_high() &&
 602            lower_high() + lower_needs <= lower_high_boundary(),
 603            "must not expand beyond region");
 604     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 605       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 606                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 607                          lower_high(), lower_needs, _executable);)
 608       return false;
 609     } else {
 610       _lower_high += lower_needs;
 611     }
 612   }
 613   if (middle_needs > 0) {
 614     assert(lower_high_boundary() <= middle_high() &&
 615            middle_high() + middle_needs <= middle_high_boundary(),
 616            "must not expand beyond region");
 617     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 618                            _executable)) {
 619       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 620                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 621                          ", %d) failed", middle_high(), middle_needs,
 622                          middle_alignment(), _executable);)
 623       return false;
 624     }
 625     _middle_high += middle_needs;
 626   }
 627   if (upper_needs > 0) {
 628     assert(middle_high_boundary() <= upper_high() &&
 629            upper_high() + upper_needs <= upper_high_boundary(),
 630            "must not expand beyond region");
 631     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 632       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 633                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 634                          upper_high(), upper_needs, _executable);)
 635       return false;
 636     } else {
 637       _upper_high += upper_needs;
 638     }
 639   }
 640 
 641   if (pre_touch || AlwaysPreTouch) {
 642     os::pretouch_memory(previous_high, unaligned_new_high);
 643   }
 644 
 645   _high += bytes;
 646   return true;
 647 }
 648 
 649 // A page is uncommitted if the contents of the entire page is deemed unusable.
 650 // Continue to decrement the high() pointer until it reaches a page boundary
 651 // in which case that particular page can now be uncommitted.
 652 void VirtualSpace::shrink_by(size_t size) {
 653   if (committed_size() < size)
 654     fatal("Cannot shrink virtual space to negative size");
 655 
 656   if (special()) {
 657     // don't uncommit if the entire space is pinned in memory
 658     _high -= size;
 659     return;
 660   }
 661 
 662   char* unaligned_new_high = high() - size;
 663   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 664 
 665   // Calculate new unaligned address
 666   char* unaligned_upper_new_high =
 667     MAX2(unaligned_new_high, middle_high_boundary());
 668   char* unaligned_middle_new_high =
 669     MAX2(unaligned_new_high, lower_high_boundary());
 670   char* unaligned_lower_new_high =
 671     MAX2(unaligned_new_high, low_boundary());
 672 
 673   // Align address to region's alignment
 674   char* aligned_upper_new_high =
 675     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 676   char* aligned_middle_new_high =
 677     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 678   char* aligned_lower_new_high =
 679     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 680 
 681   // Determine which regions need to shrink
 682   size_t upper_needs = 0;
 683   if (aligned_upper_new_high < upper_high()) {
 684     upper_needs =
 685       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 686   }
 687   size_t middle_needs = 0;
 688   if (aligned_middle_new_high < middle_high()) {
 689     middle_needs =
 690       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 691   }
 692   size_t lower_needs = 0;
 693   if (aligned_lower_new_high < lower_high()) {
 694     lower_needs =
 695       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 696   }
 697 
 698   // Check contiguity.
 699   assert(middle_high_boundary() <= upper_high() &&
 700          upper_high() <= upper_high_boundary(),
 701          "high address must be contained within the region");
 702   assert(lower_high_boundary() <= middle_high() &&
 703          middle_high() <= middle_high_boundary(),
 704          "high address must be contained within the region");
 705   assert(low_boundary() <= lower_high() &&
 706          lower_high() <= lower_high_boundary(),
 707          "high address must be contained within the region");
 708 
 709   // Uncommit
 710   if (upper_needs > 0) {
 711     assert(middle_high_boundary() <= aligned_upper_new_high &&
 712            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 713            "must not shrink beyond region");
 714     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 715       debug_only(warning("os::uncommit_memory failed"));
 716       return;
 717     } else {
 718       _upper_high -= upper_needs;
 719     }
 720   }
 721   if (middle_needs > 0) {
 722     assert(lower_high_boundary() <= aligned_middle_new_high &&
 723            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 724            "must not shrink beyond region");
 725     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 726       debug_only(warning("os::uncommit_memory failed"));
 727       return;
 728     } else {
 729       _middle_high -= middle_needs;
 730     }
 731   }
 732   if (lower_needs > 0) {
 733     assert(low_boundary() <= aligned_lower_new_high &&
 734            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 735            "must not shrink beyond region");
 736     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 737       debug_only(warning("os::uncommit_memory failed"));
 738       return;
 739     } else {
 740       _lower_high -= lower_needs;
 741     }
 742   }
 743 
 744   _high -= size;
 745 }
 746 
 747 #ifndef PRODUCT
 748 void VirtualSpace::check_for_contiguity() {
 749   // Check contiguity.
 750   assert(low_boundary() <= lower_high() &&
 751          lower_high() <= lower_high_boundary(),
 752          "high address must be contained within the region");
 753   assert(lower_high_boundary() <= middle_high() &&
 754          middle_high() <= middle_high_boundary(),
 755          "high address must be contained within the region");
 756   assert(middle_high_boundary() <= upper_high() &&
 757          upper_high() <= upper_high_boundary(),
 758          "high address must be contained within the region");
 759   assert(low() >= low_boundary(), "low");
 760   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
 761   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
 762   assert(high() <= upper_high(), "upper high");
 763 }
 764 
 765 void VirtualSpace::print_on(outputStream* out) {
 766   out->print   ("Virtual space:");
 767   if (special()) out->print(" (pinned in memory)");
 768   out->cr();
 769   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
 770   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
 771   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
 772   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
 773 }
 774 
 775 void VirtualSpace::print() {
 776   print_on(tty);
 777 }
 778 
 779 /////////////// Unit tests ///////////////
 780 
 781 #ifndef PRODUCT
 782 
 783 #define test_log(...) \
 784   do {\
 785     if (VerboseInternalVMTests) { \
 786       tty->print_cr(__VA_ARGS__); \
 787       tty->flush(); \
 788     }\
 789   } while (false)
 790 
 791 class TestReservedSpace : AllStatic {
 792  public:
 793   static void small_page_write(void* addr, size_t size) {
 794     size_t page_size = os::vm_page_size();
 795 
 796     char* end = (char*)addr + size;
 797     for (char* p = (char*)addr; p < end; p += page_size) {
 798       *p = 1;
 799     }
 800   }
 801 
 802   static void release_memory_for_test(ReservedSpace rs) {
 803     if (rs.special()) {
 804       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
 805     } else {
 806       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
 807     }
 808   }
 809 
 810   static void test_reserved_space1(size_t size, size_t alignment) {
 811     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
 812 
 813     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
 814 
 815     ReservedSpace rs(size,          // size
 816                      alignment,     // alignment
 817                      UseLargePages, // large
 818                      NULL,          // requested_address
 819                      0);            // noacces_prefix
 820 
 821     test_log(" rs.special() == %d", rs.special());
 822 
 823     assert(rs.base() != NULL, "Must be");
 824     assert(rs.size() == size, "Must be");
 825 
 826     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
 827     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
 828 
 829     if (rs.special()) {
 830       small_page_write(rs.base(), size);
 831     }
 832 
 833     release_memory_for_test(rs);
 834   }
 835 
 836   static void test_reserved_space2(size_t size) {
 837     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
 838 
 839     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 840 
 841     ReservedSpace rs(size);
 842 
 843     test_log(" rs.special() == %d", rs.special());
 844 
 845     assert(rs.base() != NULL, "Must be");
 846     assert(rs.size() == size, "Must be");
 847 
 848     if (rs.special()) {
 849       small_page_write(rs.base(), size);
 850     }
 851 
 852     release_memory_for_test(rs);
 853   }
 854 
 855   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
 856     test_log("test_reserved_space3(%p, %p, %d)",
 857         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
 858 
 859     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
 860     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
 861 
 862     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
 863 
 864     ReservedSpace rs(size, alignment, large, false);
 865 
 866     test_log(" rs.special() == %d", rs.special());
 867 
 868     assert(rs.base() != NULL, "Must be");
 869     assert(rs.size() == size, "Must be");
 870 
 871     if (rs.special()) {
 872       small_page_write(rs.base(), size);
 873     }
 874 
 875     release_memory_for_test(rs);
 876   }
 877 
 878 
 879   static void test_reserved_space1() {
 880     size_t size = 2 * 1024 * 1024;
 881     size_t ag   = os::vm_allocation_granularity();
 882 
 883     test_reserved_space1(size,      ag);
 884     test_reserved_space1(size * 2,  ag);
 885     test_reserved_space1(size * 10, ag);
 886   }
 887 
 888   static void test_reserved_space2() {
 889     size_t size = 2 * 1024 * 1024;
 890     size_t ag = os::vm_allocation_granularity();
 891 
 892     test_reserved_space2(size * 1);
 893     test_reserved_space2(size * 2);
 894     test_reserved_space2(size * 10);
 895     test_reserved_space2(ag);
 896     test_reserved_space2(size - ag);
 897     test_reserved_space2(size);
 898     test_reserved_space2(size + ag);
 899     test_reserved_space2(size * 2);
 900     test_reserved_space2(size * 2 - ag);
 901     test_reserved_space2(size * 2 + ag);
 902     test_reserved_space2(size * 3);
 903     test_reserved_space2(size * 3 - ag);
 904     test_reserved_space2(size * 3 + ag);
 905     test_reserved_space2(size * 10);
 906     test_reserved_space2(size * 10 + size / 2);
 907   }
 908 
 909   static void test_reserved_space3() {
 910     size_t ag = os::vm_allocation_granularity();
 911 
 912     test_reserved_space3(ag,      ag    , false);
 913     test_reserved_space3(ag * 2,  ag    , false);
 914     test_reserved_space3(ag * 3,  ag    , false);
 915     test_reserved_space3(ag * 2,  ag * 2, false);
 916     test_reserved_space3(ag * 4,  ag * 2, false);
 917     test_reserved_space3(ag * 8,  ag * 2, false);
 918     test_reserved_space3(ag * 4,  ag * 4, false);
 919     test_reserved_space3(ag * 8,  ag * 4, false);
 920     test_reserved_space3(ag * 16, ag * 4, false);
 921 
 922     if (UseLargePages) {
 923       size_t lp = os::large_page_size();
 924 
 925       // Without large pages
 926       test_reserved_space3(lp,     ag * 4, false);
 927       test_reserved_space3(lp * 2, ag * 4, false);
 928       test_reserved_space3(lp * 4, ag * 4, false);
 929       test_reserved_space3(lp,     lp    , false);
 930       test_reserved_space3(lp * 2, lp    , false);
 931       test_reserved_space3(lp * 3, lp    , false);
 932       test_reserved_space3(lp * 2, lp * 2, false);
 933       test_reserved_space3(lp * 4, lp * 2, false);
 934       test_reserved_space3(lp * 8, lp * 2, false);
 935 
 936       // With large pages
 937       test_reserved_space3(lp, ag * 4    , true);
 938       test_reserved_space3(lp * 2, ag * 4, true);
 939       test_reserved_space3(lp * 4, ag * 4, true);
 940       test_reserved_space3(lp, lp        , true);
 941       test_reserved_space3(lp * 2, lp    , true);
 942       test_reserved_space3(lp * 3, lp    , true);
 943       test_reserved_space3(lp * 2, lp * 2, true);
 944       test_reserved_space3(lp * 4, lp * 2, true);
 945       test_reserved_space3(lp * 8, lp * 2, true);
 946     }
 947   }
 948 
 949   static void test_reserved_space() {
 950     test_reserved_space1();
 951     test_reserved_space2();
 952     test_reserved_space3();
 953   }
 954 };
 955 
 956 void TestReservedSpace_test() {
 957   TestReservedSpace::test_reserved_space();
 958 }
 959 
 960 #define assert_equals(actual, expected)     \
 961   assert(actual == expected,                \
 962     err_msg("Got " SIZE_FORMAT " expected " \
 963       SIZE_FORMAT, actual, expected));
 964 
 965 #define assert_ge(value1, value2)                  \
 966   assert(value1 >= value2,                         \
 967     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 968       #value2 "': " SIZE_FORMAT, value1, value2));
 969 
 970 #define assert_lt(value1, value2)                  \
 971   assert(value1 < value2,                          \
 972     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
 973       #value2 "': " SIZE_FORMAT, value1, value2));
 974 
 975 
 976 class TestVirtualSpace : AllStatic {
 977   enum TestLargePages {
 978     Default,
 979     Disable,
 980     Reserve,
 981     Commit
 982   };
 983 
 984   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
 985     switch(mode) {
 986     default:
 987     case Default:
 988     case Reserve:
 989       return ReservedSpace(reserve_size_aligned);
 990     case Disable:
 991     case Commit:
 992       return ReservedSpace(reserve_size_aligned,
 993                            os::vm_allocation_granularity(),
 994                            /* large */ false, /* exec */ false);
 995     }
 996   }
 997 
 998   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
 999     switch(mode) {
1000     default:
1001     case Default:
1002     case Reserve:
1003       return vs.initialize(rs, 0);
1004     case Disable:
1005       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1006     case Commit:
1007       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1008     }
1009   }
1010 
1011  public:
1012   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1013                                                         TestLargePages mode = Default) {
1014     size_t granularity = os::vm_allocation_granularity();
1015     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1016 
1017     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1018 
1019     assert(reserved.is_reserved(), "Must be");
1020 
1021     VirtualSpace vs;
1022     bool initialized = initialize_virtual_space(vs, reserved, mode);
1023     assert(initialized, "Failed to initialize VirtualSpace");
1024 
1025     vs.expand_by(commit_size, false);
1026 
1027     if (vs.special()) {
1028       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1029     } else {
1030       assert_ge(vs.actual_committed_size(), commit_size);
1031       // Approximate the commit granularity.
1032       // Make sure that we don't commit using large pages
1033       // if large pages has been disabled for this VirtualSpace.
1034       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1035                                    os::vm_page_size() : os::large_page_size();
1036       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1037     }
1038 
1039     reserved.release();
1040   }
1041 
1042   static void test_virtual_space_actual_committed_space_one_large_page() {
1043     if (!UseLargePages) {
1044       return;
1045     }
1046 
1047     size_t large_page_size = os::large_page_size();
1048 
1049     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1050 
1051     assert(reserved.is_reserved(), "Must be");
1052 
1053     VirtualSpace vs;
1054     bool initialized = vs.initialize(reserved, 0);
1055     assert(initialized, "Failed to initialize VirtualSpace");
1056 
1057     vs.expand_by(large_page_size, false);
1058 
1059     assert_equals(vs.actual_committed_size(), large_page_size);
1060 
1061     reserved.release();
1062   }
1063 
1064   static void test_virtual_space_actual_committed_space() {
1065     test_virtual_space_actual_committed_space(4 * K, 0);
1066     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1067     test_virtual_space_actual_committed_space(8 * K, 0);
1068     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1069     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1070     test_virtual_space_actual_committed_space(12 * K, 0);
1071     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1072     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1073     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1074     test_virtual_space_actual_committed_space(64 * K, 0);
1075     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1076     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1077     test_virtual_space_actual_committed_space(2 * M, 0);
1078     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1079     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1080     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1081     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1082     test_virtual_space_actual_committed_space(10 * M, 0);
1083     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1084     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1085     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1086     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1087     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1088     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1089   }
1090 
1091   static void test_virtual_space_disable_large_pages() {
1092     if (!UseLargePages) {
1093       return;
1094     }
1095     // These test cases verify that if we force VirtualSpace to disable large pages
1096     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1097     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1098     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1099     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1100     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1101     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1102     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1103 
1104     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1105     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1106     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1107     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1108     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1109     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1110     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1111 
1112     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1113     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1114     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1115     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1116     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1117     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1118     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1119   }
1120 
1121   static void test_virtual_space() {
1122     test_virtual_space_actual_committed_space();
1123     test_virtual_space_actual_committed_space_one_large_page();
1124     test_virtual_space_disable_large_pages();
1125   }
1126 };
1127 
1128 void TestVirtualSpace_test() {
1129   TestVirtualSpace::test_virtual_space();
1130 }
1131 
1132 #endif // PRODUCT
1133 
1134 #endif