1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  39     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1) {
  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1) {
  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 // Helper method
  74 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  75   if (is_file_mapped) {
  76     if (!os::unmap_memory(base, size)) {
  77       fatal("os::unmap_memory failed");
  78     }
  79   } else if (!os::release_memory(base, size)) {
  80     fatal("os::release_memory failed");
  81   }
  82 }
  83 
  84 // Helper method.
  85 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  86                                            const size_t size, bool special, bool is_file_mapped = false)
  87 {
  88   if (base == requested_address || requested_address == NULL)
  89     return false; // did not fail
  90 
  91   if (base != NULL) {
  92     // Different reserve address may be acceptable in other cases
  93     // but for compressed oops heap should be at requested address.
  94     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  95     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  96     // OS ignored requested address. Try different address.
  97     if (special) {
  98       if (!os::release_memory_special(base, size)) {
  99         fatal("os::release_memory_special failed");
 100       }
 101     } else {
 102       unmap_or_release_memory(base, size, is_file_mapped);
 103     }
 104   }
 105   return true;
 106 }
 107 
 108 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 109                                char* requested_address,
 110                                bool executable) {
 111   const size_t granularity = os::vm_allocation_granularity();
 112   assert((size & (granularity - 1)) == 0,
 113          "size not aligned to os::vm_allocation_granularity()");
 114   assert((alignment & (granularity - 1)) == 0,
 115          "alignment not aligned to os::vm_allocation_granularity()");
 116   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 117          "not a power of 2");
 118 
 119   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 120 
 121   _base = NULL;
 122   _size = 0;
 123   _special = false;
 124   _executable = executable;
 125   _alignment = 0;
 126   _noaccess_prefix = 0;
 127   if (size == 0) {
 128     return;
 129   }
 130 
 131   // If OS doesn't support demand paging for large page memory, we need
 132   // to use reserve_memory_special() to reserve and pin the entire region.
 133   // If there is a backing file directory for this space then whether
 134   // large pages are allocated is up to the filesystem of the backing file.
 135   // So we ignore the UseLargePages flag in this case.
 136   bool special = large && !os::can_commit_large_page_memory();
 137   if (special && _fd_for_heap != -1) {
 138     special = false;
 139     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 140       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 141       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 142     }
 143   }
 144 
 145   char* base = NULL;
 146 
 147   if (special) {
 148 
 149     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 150 
 151     if (base != NULL) {
 152       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 153         // OS ignored requested address. Try different address.
 154         return;
 155       }
 156       // Check alignment constraints.
 157       assert((uintptr_t) base % alignment == 0,
 158              "Large pages returned a non-aligned address, base: "
 159              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 160              p2i(base), alignment);
 161       _special = true;
 162     } else {
 163       // failed; try to reserve regular memory below
 164       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 165                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 166         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 167       }
 168     }
 169   }
 170 
 171   if (base == NULL) {
 172     // Optimistically assume that the OSes returns an aligned base pointer.
 173     // When reserving a large address range, most OSes seem to align to at
 174     // least 64K.
 175 
 176     // If the memory was requested at a particular address, use
 177     // os::attempt_reserve_memory_at() to avoid over mapping something
 178     // important.  If available space is not detected, return NULL.
 179 
 180     if (requested_address != 0) {
 181       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 182       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 183         // OS ignored requested address. Try different address.
 184         base = NULL;
 185       }
 186     } else {
 187       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 188     }
 189 
 190     if (base == NULL) return;
 191 
 192     // Check alignment constraints
 193     if ((((size_t)base) & (alignment - 1)) != 0) {
 194       // Base not aligned, retry
 195       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 196 
 197       // Make sure that size is aligned
 198       size = align_up(size, alignment);
 199       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 200 
 201       if (requested_address != 0 &&
 202           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 203         // As a result of the alignment constraints, the allocated base differs
 204         // from the requested address. Return back to the caller who can
 205         // take remedial action (like try again without a requested address).
 206         assert(_base == NULL, "should be");
 207         return;
 208       }
 209     }
 210   }
 211   // Done
 212   _base = base;
 213   _size = size;
 214   _alignment = alignment;
 215   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 216   if (_fd_for_heap != -1) {
 217     _special = true;
 218   }
 219 }
 220 
 221 
 222 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 223                              bool special, bool executable) {
 224   assert((size % os::vm_allocation_granularity()) == 0,
 225          "size not allocation aligned");
 226   _base = base;
 227   _size = size;
 228   _alignment = alignment;
 229   _noaccess_prefix = 0;
 230   _special = special;
 231   _executable = executable;
 232 }
 233 
 234 
 235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 236                                         bool split, bool realloc) {
 237   assert(partition_size <= size(), "partition failed");
 238   if (split) {
 239     os::split_reserved_memory(base(), size(), partition_size, realloc);
 240   }
 241   ReservedSpace result(base(), partition_size, alignment, special(),
 242                        executable());
 243   return result;
 244 }
 245 
 246 
 247 ReservedSpace
 248 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 249   assert(partition_size <= size(), "partition failed");
 250   ReservedSpace result(base() + partition_size, size() - partition_size,
 251                        alignment, special(), executable());
 252   return result;
 253 }
 254 
 255 
 256 size_t ReservedSpace::page_align_size_up(size_t size) {
 257   return align_up(size, os::vm_page_size());
 258 }
 259 
 260 
 261 size_t ReservedSpace::page_align_size_down(size_t size) {
 262   return align_down(size, os::vm_page_size());
 263 }
 264 
 265 
 266 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 267   return align_up(size, os::vm_allocation_granularity());
 268 }
 269 
 270 
 271 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 272   return align_down(size, os::vm_allocation_granularity());
 273 }
 274 
 275 
 276 void ReservedSpace::release() {
 277   if (is_reserved()) {
 278     char *real_base = _base - _noaccess_prefix;
 279     const size_t real_size = _size + _noaccess_prefix;
 280     if (special()) {
 281       if (_fd_for_heap != -1) {
 282         os::unmap_memory(real_base, real_size);
 283       } else {
 284         os::release_memory_special(real_base, real_size);
 285       }
 286     } else{
 287       os::release_memory(real_base, real_size);
 288     }
 289     _base = NULL;
 290     _size = 0;
 291     _noaccess_prefix = 0;
 292     _alignment = 0;
 293     _special = false;
 294     _executable = false;
 295   }
 296 }
 297 
 298 static size_t noaccess_prefix_size(size_t alignment) {
 299   return lcm(os::vm_page_size(), alignment);
 300 }
 301 
 302 void ReservedHeapSpace::establish_noaccess_prefix() {
 303   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       Universe::set_narrow_oop_use_implicit_null_checks(false);
 322     }
 323   }
 324 
 325   _base += _noaccess_prefix;
 326   _size -= _noaccess_prefix;
 327   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 328 }
 329 
 330 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 331 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 332 // might still fulfill the wishes of the caller.
 333 // Assures the memory is aligned to 'alignment'.
 334 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 335 void ReservedHeapSpace::try_reserve_heap(size_t size,
 336                                          size_t alignment,
 337                                          bool large,
 338                                          char* requested_address) {
 339   if (_base != NULL) {
 340     // We tried before, but we didn't like the address delivered.
 341     release();
 342   }
 343 
 344   // If OS doesn't support demand paging for large page memory, we need
 345   // to use reserve_memory_special() to reserve and pin the entire region.
 346   // If there is a backing file directory for this space then whether
 347   // large pages are allocated is up to the filesystem of the backing file.
 348   // So we ignore the UseLargePages flag in this case.
 349   bool special = large && !os::can_commit_large_page_memory();
 350   if (special && _fd_for_heap != -1) {
 351     special = false;
 352     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 353                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 354       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 355     }
 356   }
 357   char* base = NULL;
 358 
 359   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 360                              " heap of size " SIZE_FORMAT_HEX,
 361                              p2i(requested_address),
 362                              size);
 363 
 364   if (special) {
 365     base = os::reserve_memory_special(size, alignment, requested_address, false);
 366 
 367     if (base != NULL) {
 368       // Check alignment constraints.
 369       assert((uintptr_t) base % alignment == 0,
 370              "Large pages returned a non-aligned address, base: "
 371              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 372              p2i(base), alignment);
 373       _special = true;
 374     }
 375   }
 376 
 377   if (base == NULL) {
 378     // Failed; try to reserve regular memory below
 379     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 380                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 381       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 382     }
 383 
 384     // Optimistically assume that the OSes returns an aligned base pointer.
 385     // When reserving a large address range, most OSes seem to align to at
 386     // least 64K.
 387 
 388     // If the memory was requested at a particular address, use
 389     // os::attempt_reserve_memory_at() to avoid over mapping something
 390     // important.  If available space is not detected, return NULL.
 391 
 392     if (requested_address != 0) {
 393       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 394     } else {
 395       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 396     }
 397   }
 398   if (base == NULL) { return; }
 399 
 400   // Done
 401   _base = base;
 402   _size = size;
 403   _alignment = alignment;
 404 
 405   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 406   if (_fd_for_heap != -1) {
 407     _special = true;
 408   }
 409 
 410   // Check alignment constraints
 411   if ((((size_t)base) & (alignment - 1)) != 0) {
 412     // Base not aligned, retry.
 413     release();
 414   }
 415 }
 416 
 417 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 418                                           char *lowest_start,
 419                                           size_t attach_point_alignment,
 420                                           char *aligned_heap_base_min_address,
 421                                           char *upper_bound,
 422                                           size_t size,
 423                                           size_t alignment,
 424                                           bool large) {
 425   const size_t attach_range = highest_start - lowest_start;
 426   // Cap num_attempts at possible number.
 427   // At least one is possible even for 0 sized attach range.
 428   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 429   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 430 
 431   const size_t stepsize = (attach_range == 0) ? // Only one try.
 432     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 433 
 434   // Try attach points from top to bottom.
 435   char* attach_point = highest_start;
 436   while (attach_point >= lowest_start  &&
 437          attach_point <= highest_start &&  // Avoid wrap around.
 438          ((_base == NULL) ||
 439           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 440     try_reserve_heap(size, alignment, large, attach_point);
 441     attach_point -= stepsize;
 442   }
 443 }
 444 
 445 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 446 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 447 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 448 
 449 // Helper for heap allocation. Returns an array with addresses
 450 // (OS-specific) which are suited for disjoint base mode. Array is
 451 // NULL terminated.
 452 static char** get_attach_addresses_for_disjoint_mode() {
 453   static uint64_t addresses[] = {
 454      2 * SIZE_32G,
 455      3 * SIZE_32G,
 456      4 * SIZE_32G,
 457      8 * SIZE_32G,
 458     10 * SIZE_32G,
 459      1 * SIZE_64K * SIZE_32G,
 460      2 * SIZE_64K * SIZE_32G,
 461      3 * SIZE_64K * SIZE_32G,
 462      4 * SIZE_64K * SIZE_32G,
 463     16 * SIZE_64K * SIZE_32G,
 464     32 * SIZE_64K * SIZE_32G,
 465     34 * SIZE_64K * SIZE_32G,
 466     0
 467   };
 468 
 469   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 470   // the array is sorted.
 471   uint i = 0;
 472   while (addresses[i] != 0 &&
 473          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 474     i++;
 475   }
 476   uint start = i;
 477 
 478   // Avoid more steps than requested.
 479   i = 0;
 480   while (addresses[start+i] != 0) {
 481     if (i == HeapSearchSteps) {
 482       addresses[start+i] = 0;
 483       break;
 484     }
 485     i++;
 486   }
 487 
 488   return (char**) &addresses[start];
 489 }
 490 
 491 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 492   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 493             "can not allocate compressed oop heap for this size");
 494   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 495   assert(HeapBaseMinAddress > 0, "sanity");
 496 
 497   const size_t granularity = os::vm_allocation_granularity();
 498   assert((size & (granularity - 1)) == 0,
 499          "size not aligned to os::vm_allocation_granularity()");
 500   assert((alignment & (granularity - 1)) == 0,
 501          "alignment not aligned to os::vm_allocation_granularity()");
 502   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 503          "not a power of 2");
 504 
 505   // The necessary attach point alignment for generated wish addresses.
 506   // This is needed to increase the chance of attaching for mmap and shmat.
 507   const size_t os_attach_point_alignment =
 508     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 509     NOT_AIX(os::vm_allocation_granularity());
 510   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 511 
 512   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 513   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 514     noaccess_prefix_size(alignment) : 0;
 515 
 516   // Attempt to alloc at user-given address.
 517   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 518     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 519     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 520       release();
 521     }
 522   }
 523 
 524   // Keep heap at HeapBaseMinAddress.
 525   if (_base == NULL) {
 526 
 527     // Try to allocate the heap at addresses that allow efficient oop compression.
 528     // Different schemes are tried, in order of decreasing optimization potential.
 529     //
 530     // For this, try_reserve_heap() is called with the desired heap base addresses.
 531     // A call into the os layer to allocate at a given address can return memory
 532     // at a different address than requested.  Still, this might be memory at a useful
 533     // address. try_reserve_heap() always returns this allocated memory, as only here
 534     // the criteria for a good heap are checked.
 535 
 536     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 537     // Give it several tries from top of range to bottom.
 538     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 539 
 540       // Calc address range within we try to attach (range of possible start addresses).
 541       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 542       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 543       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 544                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 545     }
 546 
 547     // zerobased: Attempt to allocate in the lower 32G.
 548     // But leave room for the compressed class pointers, which is allocated above
 549     // the heap.
 550     char *zerobased_max = (char *)OopEncodingHeapMax;
 551     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 552     // For small heaps, save some space for compressed class pointer
 553     // space so it can be decoded with no base.
 554     if (UseCompressedClassPointers && !UseSharedSpaces &&
 555         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 556         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 557       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 558     }
 559 
 560     // Give it several tries from top of range to bottom.
 561     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 562         ((_base == NULL) ||                        // No previous try succeeded.
 563          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 564 
 565       // Calc address range within we try to attach (range of possible start addresses).
 566       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 567       // Need to be careful about size being guaranteed to be less
 568       // than UnscaledOopHeapMax due to type constraints.
 569       char *lowest_start = aligned_heap_base_min_address;
 570       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 571       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 572         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 573       }
 574       lowest_start = align_up(lowest_start, attach_point_alignment);
 575       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 576                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 577     }
 578 
 579     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 580     // implement null checks.
 581     noaccess_prefix = noaccess_prefix_size(alignment);
 582 
 583     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 584     char** addresses = get_attach_addresses_for_disjoint_mode();
 585     int i = 0;
 586     while (addresses[i] &&                                 // End of array not yet reached.
 587            ((_base == NULL) ||                             // No previous try succeeded.
 588             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 589              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 590       char* const attach_point = addresses[i];
 591       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 592       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 593       i++;
 594     }
 595 
 596     // Last, desperate try without any placement.
 597     if (_base == NULL) {
 598       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 599       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 600     }
 601   }
 602 }
 603 
 604 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 605 
 606   if (size == 0) {
 607     return;
 608   }
 609 
 610   if (heap_allocation_directory != NULL) {
 611     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 612     if (_fd_for_heap == -1) {
 613       vm_exit_during_initialization(
 614         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 615     }
 616   }
 617 
 618   // Heap size should be aligned to alignment, too.
 619   guarantee(is_aligned(size, alignment), "set by caller");
 620 
 621   if (UseCompressedOops) {
 622     initialize_compressed_heap(size, alignment, large);
 623     if (_size > size) {
 624       // We allocated heap with noaccess prefix.
 625       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 626       // if we had to try at arbitrary address.
 627       establish_noaccess_prefix();
 628     }
 629   } else {
 630     initialize(size, alignment, large, NULL, false);
 631   }
 632 
 633   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 634          "area must be distinguishable from marks for mark-sweep");
 635   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 636          "area must be distinguishable from marks for mark-sweep");
 637 
 638   if (base() != NULL) {
 639     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 640   }
 641 
 642   if (_fd_for_heap != -1) {
 643     os::close(_fd_for_heap);
 644   }
 645 }
 646 
 647 // Reserve space for code segment.  Same as Java heap only we mark this as
 648 // executable.
 649 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 650                                      size_t rs_align,
 651                                      bool large) :
 652   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 653   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 654 }
 655 
 656 // VirtualSpace
 657 
 658 VirtualSpace::VirtualSpace() {
 659   _low_boundary           = NULL;
 660   _high_boundary          = NULL;
 661   _low                    = NULL;
 662   _high                   = NULL;
 663   _lower_high             = NULL;
 664   _middle_high            = NULL;
 665   _upper_high             = NULL;
 666   _lower_high_boundary    = NULL;
 667   _middle_high_boundary   = NULL;
 668   _upper_high_boundary    = NULL;
 669   _lower_alignment        = 0;
 670   _middle_alignment       = 0;
 671   _upper_alignment        = 0;
 672   _special                = false;
 673   _executable             = false;
 674 }
 675 
 676 
 677 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 678   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 679   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 680 }
 681 
 682 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 683   if(!rs.is_reserved()) return false;  // allocation failed.
 684   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 685   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 686 
 687   _low_boundary  = rs.base();
 688   _high_boundary = low_boundary() + rs.size();
 689 
 690   _low = low_boundary();
 691   _high = low();
 692 
 693   _special = rs.special();
 694   _executable = rs.executable();
 695 
 696   // When a VirtualSpace begins life at a large size, make all future expansion
 697   // and shrinking occur aligned to a granularity of large pages.  This avoids
 698   // fragmentation of physical addresses that inhibits the use of large pages
 699   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 700   // page size, the only spaces that get handled this way are codecache and
 701   // the heap itself, both of which provide a substantial performance
 702   // boost in many benchmarks when covered by large pages.
 703   //
 704   // No attempt is made to force large page alignment at the very top and
 705   // bottom of the space if they are not aligned so already.
 706   _lower_alignment  = os::vm_page_size();
 707   _middle_alignment = max_commit_granularity;
 708   _upper_alignment  = os::vm_page_size();
 709 
 710   // End of each region
 711   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 712   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 713   _upper_high_boundary = high_boundary();
 714 
 715   // High address of each region
 716   _lower_high = low_boundary();
 717   _middle_high = lower_high_boundary();
 718   _upper_high = middle_high_boundary();
 719 
 720   // commit to initial size
 721   if (committed_size > 0) {
 722     if (!expand_by(committed_size)) {
 723       return false;
 724     }
 725   }
 726   return true;
 727 }
 728 
 729 
 730 VirtualSpace::~VirtualSpace() {
 731   release();
 732 }
 733 
 734 
 735 void VirtualSpace::release() {
 736   // This does not release memory it reserved.
 737   // Caller must release via rs.release();
 738   _low_boundary           = NULL;
 739   _high_boundary          = NULL;
 740   _low                    = NULL;
 741   _high                   = NULL;
 742   _lower_high             = NULL;
 743   _middle_high            = NULL;
 744   _upper_high             = NULL;
 745   _lower_high_boundary    = NULL;
 746   _middle_high_boundary   = NULL;
 747   _upper_high_boundary    = NULL;
 748   _lower_alignment        = 0;
 749   _middle_alignment       = 0;
 750   _upper_alignment        = 0;
 751   _special                = false;
 752   _executable             = false;
 753 }
 754 
 755 
 756 size_t VirtualSpace::committed_size() const {
 757   return pointer_delta(high(), low(), sizeof(char));
 758 }
 759 
 760 
 761 size_t VirtualSpace::reserved_size() const {
 762   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 763 }
 764 
 765 
 766 size_t VirtualSpace::uncommitted_size()  const {
 767   return reserved_size() - committed_size();
 768 }
 769 
 770 size_t VirtualSpace::actual_committed_size() const {
 771   // Special VirtualSpaces commit all reserved space up front.
 772   if (special()) {
 773     return reserved_size();
 774   }
 775 
 776   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 777   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 778   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 779 
 780 #ifdef ASSERT
 781   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 782   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 783   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 784 
 785   if (committed_high > 0) {
 786     assert(committed_low == lower, "Must be");
 787     assert(committed_middle == middle, "Must be");
 788   }
 789 
 790   if (committed_middle > 0) {
 791     assert(committed_low == lower, "Must be");
 792   }
 793   if (committed_middle < middle) {
 794     assert(committed_high == 0, "Must be");
 795   }
 796 
 797   if (committed_low < lower) {
 798     assert(committed_high == 0, "Must be");
 799     assert(committed_middle == 0, "Must be");
 800   }
 801 #endif
 802 
 803   return committed_low + committed_middle + committed_high;
 804 }
 805 
 806 
 807 bool VirtualSpace::contains(const void* p) const {
 808   return low() <= (const char*) p && (const char*) p < high();
 809 }
 810 
 811 static void pretouch_expanded_memory(void* start, void* end) {
 812   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 813   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 814 
 815   os::pretouch_memory(start, end);
 816 }
 817 
 818 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 819   if (os::commit_memory(start, size, alignment, executable)) {
 820     if (pre_touch || AlwaysPreTouch) {
 821       pretouch_expanded_memory(start, start + size);
 822     }
 823     return true;
 824   }
 825 
 826   debug_only(warning(
 827       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 828       " size=" SIZE_FORMAT ", executable=%d) failed",
 829       p2i(start), p2i(start + size), size, executable);)
 830 
 831   return false;
 832 }
 833 
 834 /*
 835    First we need to determine if a particular virtual space is using large
 836    pages.  This is done at the initialize function and only virtual spaces
 837    that are larger than LargePageSizeInBytes use large pages.  Once we
 838    have determined this, all expand_by and shrink_by calls must grow and
 839    shrink by large page size chunks.  If a particular request
 840    is within the current large page, the call to commit and uncommit memory
 841    can be ignored.  In the case that the low and high boundaries of this
 842    space is not large page aligned, the pages leading to the first large
 843    page address and the pages after the last large page address must be
 844    allocated with default pages.
 845 */
 846 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 847   if (uncommitted_size() < bytes) {
 848     return false;
 849   }
 850 
 851   if (special()) {
 852     // don't commit memory if the entire space is pinned in memory
 853     _high += bytes;
 854     return true;
 855   }
 856 
 857   char* previous_high = high();
 858   char* unaligned_new_high = high() + bytes;
 859   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 860 
 861   // Calculate where the new high for each of the regions should be.  If
 862   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 863   // then the unaligned lower and upper new highs would be the
 864   // lower_high() and upper_high() respectively.
 865   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 866   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 867   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 868 
 869   // Align the new highs based on the regions alignment.  lower and upper
 870   // alignment will always be default page size.  middle alignment will be
 871   // LargePageSizeInBytes if the actual size of the virtual space is in
 872   // fact larger than LargePageSizeInBytes.
 873   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 874   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 875   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 876 
 877   // Determine which regions need to grow in this expand_by call.
 878   // If you are growing in the lower region, high() must be in that
 879   // region so calculate the size based on high().  For the middle and
 880   // upper regions, determine the starting point of growth based on the
 881   // location of high().  By getting the MAX of the region's low address
 882   // (or the previous region's high address) and high(), we can tell if it
 883   // is an intra or inter region growth.
 884   size_t lower_needs = 0;
 885   if (aligned_lower_new_high > lower_high()) {
 886     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 887   }
 888   size_t middle_needs = 0;
 889   if (aligned_middle_new_high > middle_high()) {
 890     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 891   }
 892   size_t upper_needs = 0;
 893   if (aligned_upper_new_high > upper_high()) {
 894     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 895   }
 896 
 897   // Check contiguity.
 898   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 899          "high address must be contained within the region");
 900   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 901          "high address must be contained within the region");
 902   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 903          "high address must be contained within the region");
 904 
 905   // Commit regions
 906   if (lower_needs > 0) {
 907     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 908     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 909       return false;
 910     }
 911     _lower_high += lower_needs;
 912   }
 913 
 914   if (middle_needs > 0) {
 915     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 916     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 917       return false;
 918     }
 919     _middle_high += middle_needs;
 920   }
 921 
 922   if (upper_needs > 0) {
 923     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 924     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 925       return false;
 926     }
 927     _upper_high += upper_needs;
 928   }
 929 
 930   _high += bytes;
 931   return true;
 932 }
 933 
 934 // A page is uncommitted if the contents of the entire page is deemed unusable.
 935 // Continue to decrement the high() pointer until it reaches a page boundary
 936 // in which case that particular page can now be uncommitted.
 937 void VirtualSpace::shrink_by(size_t size) {
 938   if (committed_size() < size)
 939     fatal("Cannot shrink virtual space to negative size");
 940 
 941   if (special()) {
 942     // don't uncommit if the entire space is pinned in memory
 943     _high -= size;
 944     return;
 945   }
 946 
 947   char* unaligned_new_high = high() - size;
 948   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 949 
 950   // Calculate new unaligned address
 951   char* unaligned_upper_new_high =
 952     MAX2(unaligned_new_high, middle_high_boundary());
 953   char* unaligned_middle_new_high =
 954     MAX2(unaligned_new_high, lower_high_boundary());
 955   char* unaligned_lower_new_high =
 956     MAX2(unaligned_new_high, low_boundary());
 957 
 958   // Align address to region's alignment
 959   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 960   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 961   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 962 
 963   // Determine which regions need to shrink
 964   size_t upper_needs = 0;
 965   if (aligned_upper_new_high < upper_high()) {
 966     upper_needs =
 967       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 968   }
 969   size_t middle_needs = 0;
 970   if (aligned_middle_new_high < middle_high()) {
 971     middle_needs =
 972       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 973   }
 974   size_t lower_needs = 0;
 975   if (aligned_lower_new_high < lower_high()) {
 976     lower_needs =
 977       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 978   }
 979 
 980   // Check contiguity.
 981   assert(middle_high_boundary() <= upper_high() &&
 982          upper_high() <= upper_high_boundary(),
 983          "high address must be contained within the region");
 984   assert(lower_high_boundary() <= middle_high() &&
 985          middle_high() <= middle_high_boundary(),
 986          "high address must be contained within the region");
 987   assert(low_boundary() <= lower_high() &&
 988          lower_high() <= lower_high_boundary(),
 989          "high address must be contained within the region");
 990 
 991   // Uncommit
 992   if (upper_needs > 0) {
 993     assert(middle_high_boundary() <= aligned_upper_new_high &&
 994            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 995            "must not shrink beyond region");
 996     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 997       debug_only(warning("os::uncommit_memory failed"));
 998       return;
 999     } else {
1000       _upper_high -= upper_needs;
1001     }
1002   }
1003   if (middle_needs > 0) {
1004     assert(lower_high_boundary() <= aligned_middle_new_high &&
1005            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1006            "must not shrink beyond region");
1007     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1008       debug_only(warning("os::uncommit_memory failed"));
1009       return;
1010     } else {
1011       _middle_high -= middle_needs;
1012     }
1013   }
1014   if (lower_needs > 0) {
1015     assert(low_boundary() <= aligned_lower_new_high &&
1016            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1017            "must not shrink beyond region");
1018     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1019       debug_only(warning("os::uncommit_memory failed"));
1020       return;
1021     } else {
1022       _lower_high -= lower_needs;
1023     }
1024   }
1025 
1026   _high -= size;
1027 }
1028 
1029 #ifndef PRODUCT
1030 void VirtualSpace::check_for_contiguity() {
1031   // Check contiguity.
1032   assert(low_boundary() <= lower_high() &&
1033          lower_high() <= lower_high_boundary(),
1034          "high address must be contained within the region");
1035   assert(lower_high_boundary() <= middle_high() &&
1036          middle_high() <= middle_high_boundary(),
1037          "high address must be contained within the region");
1038   assert(middle_high_boundary() <= upper_high() &&
1039          upper_high() <= upper_high_boundary(),
1040          "high address must be contained within the region");
1041   assert(low() >= low_boundary(), "low");
1042   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1043   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1044   assert(high() <= upper_high(), "upper high");
1045 }
1046 
1047 void VirtualSpace::print_on(outputStream* out) {
1048   out->print   ("Virtual space:");
1049   if (special()) out->print(" (pinned in memory)");
1050   out->cr();
1051   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1052   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1053   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1054   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1055 }
1056 
1057 void VirtualSpace::print() {
1058   print_on(tty);
1059 }
1060 
1061 /////////////// Unit tests ///////////////
1062 
1063 #ifndef PRODUCT
1064 
1065 #define test_log(...) \
1066   do {\
1067     if (VerboseInternalVMTests) { \
1068       tty->print_cr(__VA_ARGS__); \
1069       tty->flush(); \
1070     }\
1071   } while (false)
1072 
1073 class TestReservedSpace : AllStatic {
1074  public:
1075   static void small_page_write(void* addr, size_t size) {
1076     size_t page_size = os::vm_page_size();
1077 
1078     char* end = (char*)addr + size;
1079     for (char* p = (char*)addr; p < end; p += page_size) {
1080       *p = 1;
1081     }
1082   }
1083 
1084   static void release_memory_for_test(ReservedSpace rs) {
1085     if (rs.special()) {
1086       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1087     } else {
1088       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1089     }
1090   }
1091 
1092   static void test_reserved_space1(size_t size, size_t alignment) {
1093     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1094 
1095     assert(is_aligned(size, alignment), "Incorrect input parameters");
1096 
1097     ReservedSpace rs(size,          // size
1098                      alignment,     // alignment
1099                      UseLargePages, // large
1100                      (char *)NULL); // requested_address
1101 
1102     test_log(" rs.special() == %d", rs.special());
1103 
1104     assert(rs.base() != NULL, "Must be");
1105     assert(rs.size() == size, "Must be");
1106 
1107     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1108     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1109 
1110     if (rs.special()) {
1111       small_page_write(rs.base(), size);
1112     }
1113 
1114     release_memory_for_test(rs);
1115   }
1116 
1117   static void test_reserved_space2(size_t size) {
1118     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1119 
1120     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1121 
1122     ReservedSpace rs(size);
1123 
1124     test_log(" rs.special() == %d", rs.special());
1125 
1126     assert(rs.base() != NULL, "Must be");
1127     assert(rs.size() == size, "Must be");
1128 
1129     if (rs.special()) {
1130       small_page_write(rs.base(), size);
1131     }
1132 
1133     release_memory_for_test(rs);
1134   }
1135 
1136   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1137     test_log("test_reserved_space3(%p, %p, %d)",
1138         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1139 
1140     if (size < alignment) {
1141       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1142       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1143       return;
1144     }
1145 
1146     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1147     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1148 
1149     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1150 
1151     ReservedSpace rs(size, alignment, large, false);
1152 
1153     test_log(" rs.special() == %d", rs.special());
1154 
1155     assert(rs.base() != NULL, "Must be");
1156     assert(rs.size() == size, "Must be");
1157 
1158     if (rs.special()) {
1159       small_page_write(rs.base(), size);
1160     }
1161 
1162     release_memory_for_test(rs);
1163   }
1164 
1165 
1166   static void test_reserved_space1() {
1167     size_t size = 2 * 1024 * 1024;
1168     size_t ag   = os::vm_allocation_granularity();
1169 
1170     test_reserved_space1(size,      ag);
1171     test_reserved_space1(size * 2,  ag);
1172     test_reserved_space1(size * 10, ag);
1173   }
1174 
1175   static void test_reserved_space2() {
1176     size_t size = 2 * 1024 * 1024;
1177     size_t ag = os::vm_allocation_granularity();
1178 
1179     test_reserved_space2(size * 1);
1180     test_reserved_space2(size * 2);
1181     test_reserved_space2(size * 10);
1182     test_reserved_space2(ag);
1183     test_reserved_space2(size - ag);
1184     test_reserved_space2(size);
1185     test_reserved_space2(size + ag);
1186     test_reserved_space2(size * 2);
1187     test_reserved_space2(size * 2 - ag);
1188     test_reserved_space2(size * 2 + ag);
1189     test_reserved_space2(size * 3);
1190     test_reserved_space2(size * 3 - ag);
1191     test_reserved_space2(size * 3 + ag);
1192     test_reserved_space2(size * 10);
1193     test_reserved_space2(size * 10 + size / 2);
1194   }
1195 
1196   static void test_reserved_space3() {
1197     size_t ag = os::vm_allocation_granularity();
1198 
1199     test_reserved_space3(ag,      ag    , false);
1200     test_reserved_space3(ag * 2,  ag    , false);
1201     test_reserved_space3(ag * 3,  ag    , false);
1202     test_reserved_space3(ag * 2,  ag * 2, false);
1203     test_reserved_space3(ag * 4,  ag * 2, false);
1204     test_reserved_space3(ag * 8,  ag * 2, false);
1205     test_reserved_space3(ag * 4,  ag * 4, false);
1206     test_reserved_space3(ag * 8,  ag * 4, false);
1207     test_reserved_space3(ag * 16, ag * 4, false);
1208 
1209     if (UseLargePages) {
1210       size_t lp = os::large_page_size();
1211 
1212       // Without large pages
1213       test_reserved_space3(lp,     ag * 4, false);
1214       test_reserved_space3(lp * 2, ag * 4, false);
1215       test_reserved_space3(lp * 4, ag * 4, false);
1216       test_reserved_space3(lp,     lp    , false);
1217       test_reserved_space3(lp * 2, lp    , false);
1218       test_reserved_space3(lp * 3, lp    , false);
1219       test_reserved_space3(lp * 2, lp * 2, false);
1220       test_reserved_space3(lp * 4, lp * 2, false);
1221       test_reserved_space3(lp * 8, lp * 2, false);
1222 
1223       // With large pages
1224       test_reserved_space3(lp, ag * 4    , true);
1225       test_reserved_space3(lp * 2, ag * 4, true);
1226       test_reserved_space3(lp * 4, ag * 4, true);
1227       test_reserved_space3(lp, lp        , true);
1228       test_reserved_space3(lp * 2, lp    , true);
1229       test_reserved_space3(lp * 3, lp    , true);
1230       test_reserved_space3(lp * 2, lp * 2, true);
1231       test_reserved_space3(lp * 4, lp * 2, true);
1232       test_reserved_space3(lp * 8, lp * 2, true);
1233     }
1234   }
1235 
1236   static void test_reserved_space() {
1237     test_reserved_space1();
1238     test_reserved_space2();
1239     test_reserved_space3();
1240   }
1241 };
1242 
1243 void TestReservedSpace_test() {
1244   TestReservedSpace::test_reserved_space();
1245 }
1246 
1247 #define assert_equals(actual, expected)  \
1248   assert(actual == expected,             \
1249          "Got " SIZE_FORMAT " expected " \
1250          SIZE_FORMAT, actual, expected);
1251 
1252 #define assert_ge(value1, value2)                  \
1253   assert(value1 >= value2,                         \
1254          "'" #value1 "': " SIZE_FORMAT " '"        \
1255          #value2 "': " SIZE_FORMAT, value1, value2);
1256 
1257 #define assert_lt(value1, value2)                  \
1258   assert(value1 < value2,                          \
1259          "'" #value1 "': " SIZE_FORMAT " '"        \
1260          #value2 "': " SIZE_FORMAT, value1, value2);
1261 
1262 
1263 class TestVirtualSpace : AllStatic {
1264   enum TestLargePages {
1265     Default,
1266     Disable,
1267     Reserve,
1268     Commit
1269   };
1270 
1271   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1272     switch(mode) {
1273     default:
1274     case Default:
1275     case Reserve:
1276       return ReservedSpace(reserve_size_aligned);
1277     case Disable:
1278     case Commit:
1279       return ReservedSpace(reserve_size_aligned,
1280                            os::vm_allocation_granularity(),
1281                            /* large */ false, /* exec */ false);
1282     }
1283   }
1284 
1285   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1286     switch(mode) {
1287     default:
1288     case Default:
1289     case Reserve:
1290       return vs.initialize(rs, 0);
1291     case Disable:
1292       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1293     case Commit:
1294       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1295     }
1296   }
1297 
1298  public:
1299   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1300                                                         TestLargePages mode = Default) {
1301     size_t granularity = os::vm_allocation_granularity();
1302     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1303 
1304     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1305 
1306     assert(reserved.is_reserved(), "Must be");
1307 
1308     VirtualSpace vs;
1309     bool initialized = initialize_virtual_space(vs, reserved, mode);
1310     assert(initialized, "Failed to initialize VirtualSpace");
1311 
1312     vs.expand_by(commit_size, false);
1313 
1314     if (vs.special()) {
1315       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1316     } else {
1317       assert_ge(vs.actual_committed_size(), commit_size);
1318       // Approximate the commit granularity.
1319       // Make sure that we don't commit using large pages
1320       // if large pages has been disabled for this VirtualSpace.
1321       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1322                                    os::vm_page_size() : os::large_page_size();
1323       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1324     }
1325 
1326     reserved.release();
1327   }
1328 
1329   static void test_virtual_space_actual_committed_space_one_large_page() {
1330     if (!UseLargePages) {
1331       return;
1332     }
1333 
1334     size_t large_page_size = os::large_page_size();
1335 
1336     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1337 
1338     assert(reserved.is_reserved(), "Must be");
1339 
1340     VirtualSpace vs;
1341     bool initialized = vs.initialize(reserved, 0);
1342     assert(initialized, "Failed to initialize VirtualSpace");
1343 
1344     vs.expand_by(large_page_size, false);
1345 
1346     assert_equals(vs.actual_committed_size(), large_page_size);
1347 
1348     reserved.release();
1349   }
1350 
1351   static void test_virtual_space_actual_committed_space() {
1352     test_virtual_space_actual_committed_space(4 * K, 0);
1353     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1354     test_virtual_space_actual_committed_space(8 * K, 0);
1355     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1356     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1357     test_virtual_space_actual_committed_space(12 * K, 0);
1358     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1359     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1360     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1361     test_virtual_space_actual_committed_space(64 * K, 0);
1362     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1363     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1364     test_virtual_space_actual_committed_space(2 * M, 0);
1365     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1366     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1367     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1368     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1369     test_virtual_space_actual_committed_space(10 * M, 0);
1370     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1371     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1372     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1373     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1374     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1375     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1376   }
1377 
1378   static void test_virtual_space_disable_large_pages() {
1379     if (!UseLargePages) {
1380       return;
1381     }
1382     // These test cases verify that if we force VirtualSpace to disable large pages
1383     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1384     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1385     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1386     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1387     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1388     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1389     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1390 
1391     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1392     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1393     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1394     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1395     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1396     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1397     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1398 
1399     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1400     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1401     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1402     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1403     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1404     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1405     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1406   }
1407 
1408   static void test_virtual_space() {
1409     test_virtual_space_actual_committed_space();
1410     test_virtual_space_actual_committed_space_one_large_page();
1411     test_virtual_space_disable_large_pages();
1412   }
1413 };
1414 
1415 void TestVirtualSpace_test() {
1416   TestVirtualSpace::test_virtual_space();
1417 }
1418 
1419 #endif // PRODUCT
1420 
1421 #endif