1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/compressedOops.hpp"
  30 #include "oops/markWord.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/os.inline.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/powerOfTwo.hpp"
  36 
  37 // ReservedSpace
  38 
  39 // Dummy constructor
  40 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  41     _alignment(0), _special(false), _fd(-1), _executable(false) {
  42 }
  43 
  44 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd(-1) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd(-1) {
  66   initialize(size, alignment, large, requested_address, false);
  67 }
  68 
  69 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  70                              bool special, bool executable) : _fd(-1) {
  71   assert((size % os::vm_allocation_granularity()) == 0,
  72          "size not allocation aligned");
  73   _base = base;
  74   _size = size;
  75   _alignment = alignment;
  76   _noaccess_prefix = 0;
  77   _special = special;
  78   _executable = executable;
  79 }
  80 
  81 // Helper method
  82 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  83   if (is_file_mapped) {
  84     if (!os::unmap_memory(base, size)) {
  85       fatal("os::unmap_memory failed");
  86     }
  87   } else if (!os::release_memory(base, size)) {
  88     fatal("os::release_memory failed");
  89   }
  90 }
  91 
  92 // Helper method.
  93 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  94                                            const size_t size, bool special, bool is_file_mapped = false)
  95 {
  96   if (base == requested_address || requested_address == NULL)
  97     return false; // did not fail
  98 
  99   if (base != NULL) {
 100     // Different reserve address may be acceptable in other cases
 101     // but for compressed oops heap should be at requested address.
 102     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 103     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 104     // OS ignored requested address. Try different address.
 105     if (special) {
 106       if (!os::release_memory_special(base, size)) {
 107         fatal("os::release_memory_special failed");
 108       }
 109     } else {
 110       unmap_or_release_memory(base, size, is_file_mapped);
 111     }
 112   }
 113   return true;
 114 }
 115 
 116 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 117                                char* requested_address,
 118                                bool executable) {
 119   const size_t granularity = os::vm_allocation_granularity();
 120   assert((size & (granularity - 1)) == 0,
 121          "size not aligned to os::vm_allocation_granularity()");
 122   assert((alignment & (granularity - 1)) == 0,
 123          "alignment not aligned to os::vm_allocation_granularity()");
 124   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 125          "not a power of 2");
 126 
 127   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 128 
 129   _base = NULL;
 130   _size = 0;
 131   _special = false;
 132   _executable = executable;
 133   _alignment = 0;
 134   _noaccess_prefix = 0;
 135   if (size == 0) {
 136     return;
 137   }
 138 
 139   char* base = NULL;
 140   bool special = large && !os::can_commit_large_page_memory();
 141   assert(special || _fd != -1);
 142 
 143   if (special) {
 144 
 145     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 146 
 147     if (base != NULL) {
 148       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 149         // OS ignored requested address. Try different address.
 150         return;
 151       }
 152       // Check alignment constraints.
 153       assert((uintptr_t) base % alignment == 0,
 154              "Large pages returned a non-aligned address, base: "
 155              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 156              p2i(base), alignment);
 157       _special = true;
 158     } else {
 159       // failed; try to reserve regular memory below
 160       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 161                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 162         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 163       }
 164     }
 165   }
 166 
 167   if (base == NULL) {
 168     // Optimistically assume that the OSes returns an aligned base pointer.
 169     // When reserving a large address range, most OSes seem to align to at
 170     // least 64K.
 171 
 172     // If the memory was requested at a particular address, use
 173     // os::attempt_reserve_memory_at() to avoid over mapping something
 174     // important.  If available space is not detected, return NULL.
 175 
 176     if (requested_address != 0) {
 177       base = os::attempt_reserve_memory_at(size, requested_address, _fd);
 178       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd != -1)) {
 179         // OS ignored requested address. Try different address.
 180         base = NULL;
 181       }
 182     } else {
 183       base = os::reserve_memory(size, NULL, alignment, _fd);
 184     }
 185 
 186     if (base == NULL) return;
 187 
 188     // Check alignment constraints
 189     if ((((size_t)base) & (alignment - 1)) != 0) {
 190       // Base not aligned, retry
 191       unmap_or_release_memory(base, size, _fd != -1 /*is_file_mapped*/);
 192 
 193       // Make sure that size is aligned
 194       size = align_up(size, alignment);
 195       base = os::reserve_memory_aligned(size, alignment, _fd);
 196 
 197       if (requested_address != 0 &&
 198           failed_to_reserve_as_requested(base, requested_address, size, false, _fd != -1)) {
 199         // As a result of the alignment constraints, the allocated base differs
 200         // from the requested address. Return back to the caller who can
 201         // take remedial action (like try again without a requested address).
 202         assert(_base == NULL, "should be");
 203         return;
 204       }
 205     }
 206   }
 207   // Done
 208   _base = base;
 209   _size = size;
 210   _alignment = alignment;
 211   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 212   if (_fd != -1) {
 213     _special = true;
 214   }
 215 }
 216 
 217 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, bool split) {
 218   assert(partition_size <= size(), "partition failed");
 219   if (split && partition_size > 0 && partition_size < size()) {
 220     os::split_reserved_memory(base(), size(), partition_size);
 221   }
 222   ReservedSpace result(base(), partition_size, alignment, special(),
 223                        executable());
 224   return result;
 225 }
 226 
 227 
 228 ReservedSpace
 229 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 230   assert(partition_size <= size(), "partition failed");
 231   ReservedSpace result(base() + partition_size, size() - partition_size,
 232                        alignment, special(), executable());
 233   return result;
 234 }
 235 
 236 
 237 size_t ReservedSpace::page_align_size_up(size_t size) {
 238   return align_up(size, os::vm_page_size());
 239 }
 240 
 241 
 242 size_t ReservedSpace::page_align_size_down(size_t size) {
 243   return align_down(size, os::vm_page_size());
 244 }
 245 
 246 
 247 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 248   return align_up(size, os::vm_allocation_granularity());
 249 }
 250 
 251 
 252 void ReservedSpace::release() {
 253   if (is_reserved()) {
 254     char *real_base = _base - _noaccess_prefix;
 255     const size_t real_size = _size + _noaccess_prefix;
 256     if (special()) {
 257       if (_fd != -1) {
 258         os::unmap_memory(real_base, real_size);
 259       } else {
 260         os::release_memory_special(real_base, real_size);
 261       }
 262     } else{
 263       os::release_memory(real_base, real_size);
 264     }
 265     _base = NULL;
 266     _size = 0;
 267     _noaccess_prefix = 0;
 268     _alignment = 0;
 269     _special = false;
 270     _executable = false;
 271   }
 272 }
 273 
 274 static size_t noaccess_prefix_size(size_t alignment) {
 275   return lcm(os::vm_page_size(), alignment);
 276 }
 277 
 278 void ReservedHeapSpace::establish_noaccess_prefix() {
 279   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 280   _noaccess_prefix = noaccess_prefix_size(_alignment);
 281 
 282   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 283     if (true
 284         WIN64_ONLY(&& !UseLargePages)
 285         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 286       // Protect memory at the base of the allocated region.
 287       // If special, the page was committed (only matters on windows)
 288       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 289         fatal("cannot protect protection page");
 290       }
 291       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 292                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 293                                  p2i(_base),
 294                                  _noaccess_prefix);
 295       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
 296     } else {
 297       CompressedOops::set_use_implicit_null_checks(false);
 298     }
 299   }
 300 
 301   _base += _noaccess_prefix;
 302   _size -= _noaccess_prefix;
 303   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 304 }
 305 
 306 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 307 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 308 // might still fulfill the wishes of the caller.
 309 // Assures the memory is aligned to 'alignment'.
 310 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 311 void ReservedHeapSpace::try_reserve_heap(size_t size,
 312                                          size_t alignment,
 313                                          bool large,
 314                                          char* requested_address) {
 315   if (_base != NULL) {
 316     // We tried before, but we didn't like the address delivered.
 317     release();
 318   }
 319 
 320   char* base = NULL;
 321   bool special = large && !os::can_commit_large_page_memory();
 322   assert(special || _fd != -1);
 323 
 324   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 325                              " heap of size " SIZE_FORMAT_HEX,
 326                              p2i(requested_address),
 327                              size);
 328 
 329   if (special) {
 330     base = os::reserve_memory_special(size, alignment, requested_address, false);
 331 
 332     if (base != NULL) {
 333       // Check alignment constraints.
 334       assert((uintptr_t) base % alignment == 0,
 335              "Large pages returned a non-aligned address, base: "
 336              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 337              p2i(base), alignment);
 338       _special = true;
 339     }
 340   }
 341 
 342   if (base == NULL) {
 343     // Failed; try to reserve regular memory below
 344     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 345                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 346       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 347     }
 348 
 349     // Optimistically assume that the OSes returns an aligned base pointer.
 350     // When reserving a large address range, most OSes seem to align to at
 351     // least 64K.
 352 
 353     // If the memory was requested at a particular address, use
 354     // os::attempt_reserve_memory_at() to avoid over mapping something
 355     // important.  If available space is not detected, return NULL.
 356 
 357     if (requested_address != 0) {
 358       base = os::attempt_reserve_memory_at(size, requested_address, _fd);
 359     } else {
 360       base = os::reserve_memory(size, NULL, alignment, _fd);
 361     }
 362   }
 363   if (base == NULL) { return; }
 364 
 365   // Done
 366   _base = base;
 367   _size = size;
 368   _alignment = alignment;
 369 
 370   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 371   if (_fd != -1) {
 372     _special = true;
 373   }
 374 
 375   // Check alignment constraints
 376   if ((((size_t)base) & (alignment - 1)) != 0) {
 377     // Base not aligned, retry.
 378     release();
 379   }
 380 }
 381 
 382 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 383                                           char *lowest_start,
 384                                           size_t attach_point_alignment,
 385                                           char *aligned_heap_base_min_address,
 386                                           char *upper_bound,
 387                                           size_t size,
 388                                           size_t alignment,
 389                                           bool large) {
 390   const size_t attach_range = highest_start - lowest_start;
 391   // Cap num_attempts at possible number.
 392   // At least one is possible even for 0 sized attach range.
 393   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 394   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 395 
 396   const size_t stepsize = (attach_range == 0) ? // Only one try.
 397     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 398 
 399   // Try attach points from top to bottom.
 400   char* attach_point = highest_start;
 401   while (attach_point >= lowest_start  &&
 402          attach_point <= highest_start &&  // Avoid wrap around.
 403          ((_base == NULL) ||
 404           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 405     try_reserve_heap(size, alignment, large, attach_point);
 406     attach_point -= stepsize;
 407   }
 408 }
 409 
 410 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 411 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 412 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 413 
 414 // Helper for heap allocation. Returns an array with addresses
 415 // (OS-specific) which are suited for disjoint base mode. Array is
 416 // NULL terminated.
 417 static char** get_attach_addresses_for_disjoint_mode() {
 418   static uint64_t addresses[] = {
 419      2 * SIZE_32G,
 420      3 * SIZE_32G,
 421      4 * SIZE_32G,
 422      8 * SIZE_32G,
 423     10 * SIZE_32G,
 424      1 * SIZE_64K * SIZE_32G,
 425      2 * SIZE_64K * SIZE_32G,
 426      3 * SIZE_64K * SIZE_32G,
 427      4 * SIZE_64K * SIZE_32G,
 428     16 * SIZE_64K * SIZE_32G,
 429     32 * SIZE_64K * SIZE_32G,
 430     34 * SIZE_64K * SIZE_32G,
 431     0
 432   };
 433 
 434   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 435   // the array is sorted.
 436   uint i = 0;
 437   while (addresses[i] != 0 &&
 438          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 439     i++;
 440   }
 441   uint start = i;
 442 
 443   // Avoid more steps than requested.
 444   i = 0;
 445   while (addresses[start+i] != 0) {
 446     if (i == HeapSearchSteps) {
 447       addresses[start+i] = 0;
 448       break;
 449     }
 450     i++;
 451   }
 452 
 453   return (char**) &addresses[start];
 454 }
 455 
 456 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 457   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 458             "can not allocate compressed oop heap for this size");
 459   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 460 
 461   const size_t granularity = os::vm_allocation_granularity();
 462   assert((size & (granularity - 1)) == 0,
 463          "size not aligned to os::vm_allocation_granularity()");
 464   assert((alignment & (granularity - 1)) == 0,
 465          "alignment not aligned to os::vm_allocation_granularity()");
 466   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 467          "not a power of 2");
 468 
 469   // The necessary attach point alignment for generated wish addresses.
 470   // This is needed to increase the chance of attaching for mmap and shmat.
 471   const size_t os_attach_point_alignment =
 472     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 473     NOT_AIX(os::vm_allocation_granularity());
 474   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 475 
 476   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 477   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 478     noaccess_prefix_size(alignment) : 0;
 479 
 480   // Attempt to alloc at user-given address.
 481   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 482     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 483     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 484       release();
 485     }
 486   }
 487 
 488   // Keep heap at HeapBaseMinAddress.
 489   if (_base == NULL) {
 490 
 491     // Try to allocate the heap at addresses that allow efficient oop compression.
 492     // Different schemes are tried, in order of decreasing optimization potential.
 493     //
 494     // For this, try_reserve_heap() is called with the desired heap base addresses.
 495     // A call into the os layer to allocate at a given address can return memory
 496     // at a different address than requested.  Still, this might be memory at a useful
 497     // address. try_reserve_heap() always returns this allocated memory, as only here
 498     // the criteria for a good heap are checked.
 499 
 500     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 501     // Give it several tries from top of range to bottom.
 502     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 503 
 504       // Calc address range within we try to attach (range of possible start addresses).
 505       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 506       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 507       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 508                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 509     }
 510 
 511     // zerobased: Attempt to allocate in the lower 32G.
 512     // But leave room for the compressed class pointers, which is allocated above
 513     // the heap.
 514     char *zerobased_max = (char *)OopEncodingHeapMax;
 515     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 516     // For small heaps, save some space for compressed class pointer
 517     // space so it can be decoded with no base.
 518     if (UseCompressedClassPointers && !UseSharedSpaces &&
 519         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 520         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 521       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 522     }
 523 
 524     // Give it several tries from top of range to bottom.
 525     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 526         ((_base == NULL) ||                        // No previous try succeeded.
 527          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 528 
 529       // Calc address range within we try to attach (range of possible start addresses).
 530       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 531       // Need to be careful about size being guaranteed to be less
 532       // than UnscaledOopHeapMax due to type constraints.
 533       char *lowest_start = aligned_heap_base_min_address;
 534       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 535       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 536         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 537       }
 538       lowest_start = align_up(lowest_start, attach_point_alignment);
 539       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 540                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 541     }
 542 
 543     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 544     // implement null checks.
 545     noaccess_prefix = noaccess_prefix_size(alignment);
 546 
 547     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 548     char** addresses = get_attach_addresses_for_disjoint_mode();
 549     int i = 0;
 550     while (addresses[i] &&                                 // End of array not yet reached.
 551            ((_base == NULL) ||                             // No previous try succeeded.
 552             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 553              !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 554       char* const attach_point = addresses[i];
 555       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 556       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 557       i++;
 558     }
 559 
 560     // Last, desperate try without any placement.
 561     if (_base == NULL) {
 562       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 563       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 564     }
 565   }
 566 }
 567 
 568 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 569 
 570   if (size == 0) {
 571     return;
 572   }
 573 
 574   if (heap_allocation_directory != NULL) {
 575     _fd = os::create_file_for_heap(heap_allocation_directory);
 576     if (_fd == -1) {
 577       vm_exit_during_initialization(
 578         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 579     }
 580   }
 581 
 582   // Heap size should be aligned to alignment, too.
 583   guarantee(is_aligned(size, alignment), "set by caller");
 584 
 585   if (UseCompressedOops) {
 586     initialize_compressed_heap(size, alignment, large);
 587     if (_size > size) {
 588       // We allocated heap with noaccess prefix.
 589       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 590       // if we had to try at arbitrary address.
 591       establish_noaccess_prefix();
 592     }
 593   } else {
 594     initialize(size, alignment, large, NULL, false);
 595   }
 596 
 597   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 598          "area must be distinguishable from marks for mark-sweep");
 599   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 600          "area must be distinguishable from marks for mark-sweep");
 601 
 602   if (base() != NULL) {
 603     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 604   }
 605 
 606   if (_fd != -1) {
 607     os::close(_fd);
 608   }
 609 }
 610 
 611 MemRegion ReservedHeapSpace::region() const {
 612   return MemRegion((HeapWord*)base(), (HeapWord*)end());
 613 }
 614 
 615 // Reserve space for code segment.  Same as Java heap only we mark this as
 616 // executable.
 617 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 618                                      size_t rs_align,
 619                                      bool large) : ReservedSpace() {
 620   initialize(r_size, rs_align, large, /*requested address*/ NULL, /*executable*/ true);
 621   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 622 }
 623 
 624 // VirtualSpace
 625 
 626 VirtualSpace::VirtualSpace() {
 627   _low_boundary           = NULL;
 628   _high_boundary          = NULL;
 629   _low                    = NULL;
 630   _high                   = NULL;
 631   _lower_high             = NULL;
 632   _middle_high            = NULL;
 633   _upper_high             = NULL;
 634   _lower_high_boundary    = NULL;
 635   _middle_high_boundary   = NULL;
 636   _upper_high_boundary    = NULL;
 637   _lower_alignment        = 0;
 638   _middle_alignment       = 0;
 639   _upper_alignment        = 0;
 640   _special                = false;
 641   _executable             = false;
 642 }
 643 
 644 
 645 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 646   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 647   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 648 }
 649 
 650 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 651   if(!rs.is_reserved()) return false;  // allocation failed.
 652   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 653   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 654 
 655   _low_boundary  = rs.base();
 656   _high_boundary = low_boundary() + rs.size();
 657 
 658   _low = low_boundary();
 659   _high = low();
 660 
 661   _special = rs.special();
 662   _executable = rs.executable();
 663 
 664   // When a VirtualSpace begins life at a large size, make all future expansion
 665   // and shrinking occur aligned to a granularity of large pages.  This avoids
 666   // fragmentation of physical addresses that inhibits the use of large pages
 667   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 668   // page size, the only spaces that get handled this way are codecache and
 669   // the heap itself, both of which provide a substantial performance
 670   // boost in many benchmarks when covered by large pages.
 671   //
 672   // No attempt is made to force large page alignment at the very top and
 673   // bottom of the space if they are not aligned so already.
 674   _lower_alignment  = os::vm_page_size();
 675   _middle_alignment = max_commit_granularity;
 676   _upper_alignment  = os::vm_page_size();
 677 
 678   // End of each region
 679   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 680   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 681   _upper_high_boundary = high_boundary();
 682 
 683   // High address of each region
 684   _lower_high = low_boundary();
 685   _middle_high = lower_high_boundary();
 686   _upper_high = middle_high_boundary();
 687 
 688   // commit to initial size
 689   if (committed_size > 0) {
 690     if (!expand_by(committed_size)) {
 691       return false;
 692     }
 693   }
 694   return true;
 695 }
 696 
 697 
 698 VirtualSpace::~VirtualSpace() {
 699   release();
 700 }
 701 
 702 
 703 void VirtualSpace::release() {
 704   // This does not release memory it reserved.
 705   // Caller must release via rs.release();
 706   _low_boundary           = NULL;
 707   _high_boundary          = NULL;
 708   _low                    = NULL;
 709   _high                   = NULL;
 710   _lower_high             = NULL;
 711   _middle_high            = NULL;
 712   _upper_high             = NULL;
 713   _lower_high_boundary    = NULL;
 714   _middle_high_boundary   = NULL;
 715   _upper_high_boundary    = NULL;
 716   _lower_alignment        = 0;
 717   _middle_alignment       = 0;
 718   _upper_alignment        = 0;
 719   _special                = false;
 720   _executable             = false;
 721 }
 722 
 723 
 724 size_t VirtualSpace::committed_size() const {
 725   return pointer_delta(high(), low(), sizeof(char));
 726 }
 727 
 728 
 729 size_t VirtualSpace::reserved_size() const {
 730   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 731 }
 732 
 733 
 734 size_t VirtualSpace::uncommitted_size()  const {
 735   return reserved_size() - committed_size();
 736 }
 737 
 738 size_t VirtualSpace::actual_committed_size() const {
 739   // Special VirtualSpaces commit all reserved space up front.
 740   if (special()) {
 741     return reserved_size();
 742   }
 743 
 744   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 745   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 746   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 747 
 748 #ifdef ASSERT
 749   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 750   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 751   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 752 
 753   if (committed_high > 0) {
 754     assert(committed_low == lower, "Must be");
 755     assert(committed_middle == middle, "Must be");
 756   }
 757 
 758   if (committed_middle > 0) {
 759     assert(committed_low == lower, "Must be");
 760   }
 761   if (committed_middle < middle) {
 762     assert(committed_high == 0, "Must be");
 763   }
 764 
 765   if (committed_low < lower) {
 766     assert(committed_high == 0, "Must be");
 767     assert(committed_middle == 0, "Must be");
 768   }
 769 #endif
 770 
 771   return committed_low + committed_middle + committed_high;
 772 }
 773 
 774 
 775 bool VirtualSpace::contains(const void* p) const {
 776   return low() <= (const char*) p && (const char*) p < high();
 777 }
 778 
 779 static void pretouch_expanded_memory(void* start, void* end) {
 780   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 781   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 782 
 783   os::pretouch_memory(start, end);
 784 }
 785 
 786 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 787   if (os::commit_memory(start, size, alignment, executable)) {
 788     if (pre_touch || AlwaysPreTouch) {
 789       pretouch_expanded_memory(start, start + size);
 790     }
 791     return true;
 792   }
 793 
 794   debug_only(warning(
 795       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 796       " size=" SIZE_FORMAT ", executable=%d) failed",
 797       p2i(start), p2i(start + size), size, executable);)
 798 
 799   return false;
 800 }
 801 
 802 /*
 803    First we need to determine if a particular virtual space is using large
 804    pages.  This is done at the initialize function and only virtual spaces
 805    that are larger than LargePageSizeInBytes use large pages.  Once we
 806    have determined this, all expand_by and shrink_by calls must grow and
 807    shrink by large page size chunks.  If a particular request
 808    is within the current large page, the call to commit and uncommit memory
 809    can be ignored.  In the case that the low and high boundaries of this
 810    space is not large page aligned, the pages leading to the first large
 811    page address and the pages after the last large page address must be
 812    allocated with default pages.
 813 */
 814 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 815   if (uncommitted_size() < bytes) {
 816     return false;
 817   }
 818 
 819   if (special()) {
 820     // don't commit memory if the entire space is pinned in memory
 821     _high += bytes;
 822     return true;
 823   }
 824 
 825   char* previous_high = high();
 826   char* unaligned_new_high = high() + bytes;
 827   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 828 
 829   // Calculate where the new high for each of the regions should be.  If
 830   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 831   // then the unaligned lower and upper new highs would be the
 832   // lower_high() and upper_high() respectively.
 833   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 834   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 835   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 836 
 837   // Align the new highs based on the regions alignment.  lower and upper
 838   // alignment will always be default page size.  middle alignment will be
 839   // LargePageSizeInBytes if the actual size of the virtual space is in
 840   // fact larger than LargePageSizeInBytes.
 841   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 842   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 843   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 844 
 845   // Determine which regions need to grow in this expand_by call.
 846   // If you are growing in the lower region, high() must be in that
 847   // region so calculate the size based on high().  For the middle and
 848   // upper regions, determine the starting point of growth based on the
 849   // location of high().  By getting the MAX of the region's low address
 850   // (or the previous region's high address) and high(), we can tell if it
 851   // is an intra or inter region growth.
 852   size_t lower_needs = 0;
 853   if (aligned_lower_new_high > lower_high()) {
 854     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 855   }
 856   size_t middle_needs = 0;
 857   if (aligned_middle_new_high > middle_high()) {
 858     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 859   }
 860   size_t upper_needs = 0;
 861   if (aligned_upper_new_high > upper_high()) {
 862     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 863   }
 864 
 865   // Check contiguity.
 866   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 867          "high address must be contained within the region");
 868   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 869          "high address must be contained within the region");
 870   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 871          "high address must be contained within the region");
 872 
 873   // Commit regions
 874   if (lower_needs > 0) {
 875     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 876     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 877       return false;
 878     }
 879     _lower_high += lower_needs;
 880   }
 881 
 882   if (middle_needs > 0) {
 883     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 884     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 885       return false;
 886     }
 887     _middle_high += middle_needs;
 888   }
 889 
 890   if (upper_needs > 0) {
 891     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 892     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 893       return false;
 894     }
 895     _upper_high += upper_needs;
 896   }
 897 
 898   _high += bytes;
 899   return true;
 900 }
 901 
 902 // A page is uncommitted if the contents of the entire page is deemed unusable.
 903 // Continue to decrement the high() pointer until it reaches a page boundary
 904 // in which case that particular page can now be uncommitted.
 905 void VirtualSpace::shrink_by(size_t size) {
 906   if (committed_size() < size)
 907     fatal("Cannot shrink virtual space to negative size");
 908 
 909   if (special()) {
 910     // don't uncommit if the entire space is pinned in memory
 911     _high -= size;
 912     return;
 913   }
 914 
 915   char* unaligned_new_high = high() - size;
 916   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 917 
 918   // Calculate new unaligned address
 919   char* unaligned_upper_new_high =
 920     MAX2(unaligned_new_high, middle_high_boundary());
 921   char* unaligned_middle_new_high =
 922     MAX2(unaligned_new_high, lower_high_boundary());
 923   char* unaligned_lower_new_high =
 924     MAX2(unaligned_new_high, low_boundary());
 925 
 926   // Align address to region's alignment
 927   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 928   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 929   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 930 
 931   // Determine which regions need to shrink
 932   size_t upper_needs = 0;
 933   if (aligned_upper_new_high < upper_high()) {
 934     upper_needs =
 935       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 936   }
 937   size_t middle_needs = 0;
 938   if (aligned_middle_new_high < middle_high()) {
 939     middle_needs =
 940       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 941   }
 942   size_t lower_needs = 0;
 943   if (aligned_lower_new_high < lower_high()) {
 944     lower_needs =
 945       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 946   }
 947 
 948   // Check contiguity.
 949   assert(middle_high_boundary() <= upper_high() &&
 950          upper_high() <= upper_high_boundary(),
 951          "high address must be contained within the region");
 952   assert(lower_high_boundary() <= middle_high() &&
 953          middle_high() <= middle_high_boundary(),
 954          "high address must be contained within the region");
 955   assert(low_boundary() <= lower_high() &&
 956          lower_high() <= lower_high_boundary(),
 957          "high address must be contained within the region");
 958 
 959   // Uncommit
 960   if (upper_needs > 0) {
 961     assert(middle_high_boundary() <= aligned_upper_new_high &&
 962            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 963            "must not shrink beyond region");
 964     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 965       debug_only(warning("os::uncommit_memory failed"));
 966       return;
 967     } else {
 968       _upper_high -= upper_needs;
 969     }
 970   }
 971   if (middle_needs > 0) {
 972     assert(lower_high_boundary() <= aligned_middle_new_high &&
 973            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 974            "must not shrink beyond region");
 975     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 976       debug_only(warning("os::uncommit_memory failed"));
 977       return;
 978     } else {
 979       _middle_high -= middle_needs;
 980     }
 981   }
 982   if (lower_needs > 0) {
 983     assert(low_boundary() <= aligned_lower_new_high &&
 984            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 985            "must not shrink beyond region");
 986     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 987       debug_only(warning("os::uncommit_memory failed"));
 988       return;
 989     } else {
 990       _lower_high -= lower_needs;
 991     }
 992   }
 993 
 994   _high -= size;
 995 }
 996 
 997 #ifndef PRODUCT
 998 void VirtualSpace::check_for_contiguity() {
 999   // Check contiguity.
1000   assert(low_boundary() <= lower_high() &&
1001          lower_high() <= lower_high_boundary(),
1002          "high address must be contained within the region");
1003   assert(lower_high_boundary() <= middle_high() &&
1004          middle_high() <= middle_high_boundary(),
1005          "high address must be contained within the region");
1006   assert(middle_high_boundary() <= upper_high() &&
1007          upper_high() <= upper_high_boundary(),
1008          "high address must be contained within the region");
1009   assert(low() >= low_boundary(), "low");
1010   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1011   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1012   assert(high() <= upper_high(), "upper high");
1013 }
1014 
1015 void VirtualSpace::print_on(outputStream* out) {
1016   out->print   ("Virtual space:");
1017   if (special()) out->print(" (pinned in memory)");
1018   out->cr();
1019   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1020   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1021   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1022   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1023 }
1024 
1025 void VirtualSpace::print() {
1026   print_on(tty);
1027 }
1028 
1029 /////////////// Unit tests ///////////////
1030 
1031 #ifndef PRODUCT
1032 
1033 class TestReservedSpace : AllStatic {
1034  public:
1035   static void small_page_write(void* addr, size_t size) {
1036     size_t page_size = os::vm_page_size();
1037 
1038     char* end = (char*)addr + size;
1039     for (char* p = (char*)addr; p < end; p += page_size) {
1040       *p = 1;
1041     }
1042   }
1043 
1044   static void release_memory_for_test(ReservedSpace rs) {
1045     if (rs.special()) {
1046       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1047     } else {
1048       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1049     }
1050   }
1051 
1052   static void test_reserved_space1(size_t size, size_t alignment) {
1053     assert(is_aligned(size, alignment), "Incorrect input parameters");
1054 
1055     ReservedSpace rs(size,          // size
1056                      alignment,     // alignment
1057                      UseLargePages, // large
1058                      (char *)NULL); // requested_address
1059 
1060     assert(rs.base() != NULL, "Must be");
1061     assert(rs.size() == size, "Must be");
1062 
1063     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1064     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1065 
1066     if (rs.special()) {
1067       small_page_write(rs.base(), size);
1068     }
1069 
1070     release_memory_for_test(rs);
1071   }
1072 
1073   static void test_reserved_space2(size_t size) {
1074     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1075 
1076     ReservedSpace rs(size);
1077 
1078     assert(rs.base() != NULL, "Must be");
1079     assert(rs.size() == size, "Must be");
1080 
1081     if (rs.special()) {
1082       small_page_write(rs.base(), size);
1083     }
1084 
1085     release_memory_for_test(rs);
1086   }
1087 
1088   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1089     if (size < alignment) {
1090       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1091       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1092       return;
1093     }
1094 
1095     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1096     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1097 
1098     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1099 
1100     ReservedSpace rs(size, alignment, large);
1101 
1102     assert(rs.base() != NULL, "Must be");
1103     assert(rs.size() == size, "Must be");
1104 
1105     if (rs.special()) {
1106       small_page_write(rs.base(), size);
1107     }
1108 
1109     release_memory_for_test(rs);
1110   }
1111 
1112 
1113   static void test_reserved_space1() {
1114     size_t size = 2 * 1024 * 1024;
1115     size_t ag   = os::vm_allocation_granularity();
1116 
1117     test_reserved_space1(size,      ag);
1118     test_reserved_space1(size * 2,  ag);
1119     test_reserved_space1(size * 10, ag);
1120   }
1121 
1122   static void test_reserved_space2() {
1123     size_t size = 2 * 1024 * 1024;
1124     size_t ag = os::vm_allocation_granularity();
1125 
1126     test_reserved_space2(size * 1);
1127     test_reserved_space2(size * 2);
1128     test_reserved_space2(size * 10);
1129     test_reserved_space2(ag);
1130     test_reserved_space2(size - ag);
1131     test_reserved_space2(size);
1132     test_reserved_space2(size + ag);
1133     test_reserved_space2(size * 2);
1134     test_reserved_space2(size * 2 - ag);
1135     test_reserved_space2(size * 2 + ag);
1136     test_reserved_space2(size * 3);
1137     test_reserved_space2(size * 3 - ag);
1138     test_reserved_space2(size * 3 + ag);
1139     test_reserved_space2(size * 10);
1140     test_reserved_space2(size * 10 + size / 2);
1141   }
1142 
1143   static void test_reserved_space3() {
1144     size_t ag = os::vm_allocation_granularity();
1145 
1146     test_reserved_space3(ag,      ag    , false);
1147     test_reserved_space3(ag * 2,  ag    , false);
1148     test_reserved_space3(ag * 3,  ag    , false);
1149     test_reserved_space3(ag * 2,  ag * 2, false);
1150     test_reserved_space3(ag * 4,  ag * 2, false);
1151     test_reserved_space3(ag * 8,  ag * 2, false);
1152     test_reserved_space3(ag * 4,  ag * 4, false);
1153     test_reserved_space3(ag * 8,  ag * 4, false);
1154     test_reserved_space3(ag * 16, ag * 4, false);
1155 
1156     if (UseLargePages) {
1157       size_t lp = os::large_page_size();
1158 
1159       // Without large pages
1160       test_reserved_space3(lp,     ag * 4, false);
1161       test_reserved_space3(lp * 2, ag * 4, false);
1162       test_reserved_space3(lp * 4, ag * 4, false);
1163       test_reserved_space3(lp,     lp    , false);
1164       test_reserved_space3(lp * 2, lp    , false);
1165       test_reserved_space3(lp * 3, lp    , false);
1166       test_reserved_space3(lp * 2, lp * 2, false);
1167       test_reserved_space3(lp * 4, lp * 2, false);
1168       test_reserved_space3(lp * 8, lp * 2, false);
1169 
1170       // With large pages
1171       test_reserved_space3(lp, ag * 4    , true);
1172       test_reserved_space3(lp * 2, ag * 4, true);
1173       test_reserved_space3(lp * 4, ag * 4, true);
1174       test_reserved_space3(lp, lp        , true);
1175       test_reserved_space3(lp * 2, lp    , true);
1176       test_reserved_space3(lp * 3, lp    , true);
1177       test_reserved_space3(lp * 2, lp * 2, true);
1178       test_reserved_space3(lp * 4, lp * 2, true);
1179       test_reserved_space3(lp * 8, lp * 2, true);
1180     }
1181   }
1182 
1183   static void test_reserved_space() {
1184     test_reserved_space1();
1185     test_reserved_space2();
1186     test_reserved_space3();
1187   }
1188 };
1189 
1190 void TestReservedSpace_test() {
1191   TestReservedSpace::test_reserved_space();
1192 }
1193 
1194 #define assert_equals(actual, expected)  \
1195   assert(actual == expected,             \
1196          "Got " SIZE_FORMAT " expected " \
1197          SIZE_FORMAT, actual, expected);
1198 
1199 #define assert_ge(value1, value2)                  \
1200   assert(value1 >= value2,                         \
1201          "'" #value1 "': " SIZE_FORMAT " '"        \
1202          #value2 "': " SIZE_FORMAT, value1, value2);
1203 
1204 #define assert_lt(value1, value2)                  \
1205   assert(value1 < value2,                          \
1206          "'" #value1 "': " SIZE_FORMAT " '"        \
1207          #value2 "': " SIZE_FORMAT, value1, value2);
1208 
1209 
1210 class TestVirtualSpace : AllStatic {
1211   enum TestLargePages {
1212     Default,
1213     Disable,
1214     Reserve,
1215     Commit
1216   };
1217 
1218   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1219     switch(mode) {
1220     default:
1221     case Default:
1222     case Reserve:
1223       return ReservedSpace(reserve_size_aligned);
1224     case Disable:
1225     case Commit:
1226       return ReservedSpace(reserve_size_aligned,
1227                            os::vm_allocation_granularity(),
1228                            /* large */ false);
1229     }
1230   }
1231 
1232   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1233     switch(mode) {
1234     default:
1235     case Default:
1236     case Reserve:
1237       return vs.initialize(rs, 0);
1238     case Disable:
1239       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1240     case Commit:
1241       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1242     }
1243   }
1244 
1245  public:
1246   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1247                                                         TestLargePages mode = Default) {
1248     size_t granularity = os::vm_allocation_granularity();
1249     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1250 
1251     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1252 
1253     assert(reserved.is_reserved(), "Must be");
1254 
1255     VirtualSpace vs;
1256     bool initialized = initialize_virtual_space(vs, reserved, mode);
1257     assert(initialized, "Failed to initialize VirtualSpace");
1258 
1259     vs.expand_by(commit_size, false);
1260 
1261     if (vs.special()) {
1262       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1263     } else {
1264       assert_ge(vs.actual_committed_size(), commit_size);
1265       // Approximate the commit granularity.
1266       // Make sure that we don't commit using large pages
1267       // if large pages has been disabled for this VirtualSpace.
1268       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1269                                    os::vm_page_size() : os::large_page_size();
1270       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1271     }
1272 
1273     reserved.release();
1274   }
1275 
1276   static void test_virtual_space_actual_committed_space_one_large_page() {
1277     if (!UseLargePages) {
1278       return;
1279     }
1280 
1281     size_t large_page_size = os::large_page_size();
1282 
1283     ReservedSpace reserved(large_page_size, large_page_size, true);
1284 
1285     assert(reserved.is_reserved(), "Must be");
1286 
1287     VirtualSpace vs;
1288     bool initialized = vs.initialize(reserved, 0);
1289     assert(initialized, "Failed to initialize VirtualSpace");
1290 
1291     vs.expand_by(large_page_size, false);
1292 
1293     assert_equals(vs.actual_committed_size(), large_page_size);
1294 
1295     reserved.release();
1296   }
1297 
1298   static void test_virtual_space_actual_committed_space() {
1299     test_virtual_space_actual_committed_space(4 * K, 0);
1300     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1301     test_virtual_space_actual_committed_space(8 * K, 0);
1302     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1303     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1304     test_virtual_space_actual_committed_space(12 * K, 0);
1305     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1306     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1307     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1308     test_virtual_space_actual_committed_space(64 * K, 0);
1309     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1310     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1311     test_virtual_space_actual_committed_space(2 * M, 0);
1312     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1313     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1314     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1315     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1316     test_virtual_space_actual_committed_space(10 * M, 0);
1317     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1318     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1319     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1320     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1321     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1322     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1323   }
1324 
1325   static void test_virtual_space_disable_large_pages() {
1326     if (!UseLargePages) {
1327       return;
1328     }
1329     // These test cases verify that if we force VirtualSpace to disable large pages
1330     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1331     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1332     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1333     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1334     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1335     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1336     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1337 
1338     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1339     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1340     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1341     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1342     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1343     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1344     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1345 
1346     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1347     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1348     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1349     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1350     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1351     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1352     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1353   }
1354 
1355   static void test_virtual_space() {
1356     test_virtual_space_actual_committed_space();
1357     test_virtual_space_actual_committed_space_one_large_page();
1358     test_virtual_space_disable_large_pages();
1359   }
1360 };
1361 
1362 void TestVirtualSpace_test() {
1363   TestVirtualSpace::test_virtual_space();
1364 }
1365 
1366 #endif // PRODUCT
1367 
1368 #endif