1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  39     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1) {
  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1) {
  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 // Helper method
  74 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  75   if (is_file_mapped) {
  76     if (!os::unmap_memory(base, size)) {
  77       fatal("os::unmap_memory failed");
  78     }
  79   } else if (!os::release_memory(base, size)) {
  80     fatal("os::release_memory failed");
  81   }
  82 }
  83 
  84 // Helper method.
  85 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  86                                            const size_t size, bool special, bool is_file_mapped = false)
  87 {
  88   if (base == requested_address || requested_address == NULL)
  89     return false; // did not fail
  90 
  91   if (base != NULL) {
  92     // Different reserve address may be acceptable in other cases
  93     // but for compressed oops heap should be at requested address.
  94     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  95     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  96     // OS ignored requested address. Try different address.
  97     if (special) {
  98       if (!os::release_memory_special(base, size)) {
  99         fatal("os::release_memory_special failed");
 100       }
 101     } else {
 102       unmap_or_release_memory(base, size, is_file_mapped);
 103     }
 104   }
 105   return true;
 106 }
 107 
 108 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 109                                char* requested_address,
 110                                bool executable) {
 111   const size_t granularity = os::vm_allocation_granularity();
 112   assert((size & (granularity - 1)) == 0,
 113          "size not aligned to os::vm_allocation_granularity()");
 114   assert((alignment & (granularity - 1)) == 0,
 115          "alignment not aligned to os::vm_allocation_granularity()");
 116   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 117          "not a power of 2");
 118 
 119   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 120 
 121   _base = NULL;
 122   _size = 0;
 123   _special = false;
 124   _executable = executable;
 125   _alignment = 0;
 126   _noaccess_prefix = 0;
 127   if (size == 0) {
 128     return;
 129   }
 130 
 131   // If OS doesn't support demand paging for large page memory, we need
 132   // to use reserve_memory_special() to reserve and pin the entire region.
 133   // If there is a backing file directory for this space then whether
 134   // large pages are allocated is up to the filesystem of the backing file.
 135   // So we ignore the UseLargePages flag in this case.
 136   bool special = large && !os::can_commit_large_page_memory();
 137   if (special && _fd_for_heap != -1) {
 138     special = false;
 139     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 140       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 141       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 142     }
 143   }
 144 
 145   char* base = NULL;
 146 
 147   if (special) {
 148 
 149     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 150 
 151     if (base != NULL) {
 152       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 153         // OS ignored requested address. Try different address.
 154         return;
 155       }
 156       // Check alignment constraints.
 157       assert((uintptr_t) base % alignment == 0,
 158              "Large pages returned a non-aligned address, base: "
 159              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 160              p2i(base), alignment);
 161       _special = true;
 162     } else {
 163       // failed; try to reserve regular memory below
 164       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 165                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 166         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 167       }
 168     }
 169   }
 170 
 171   if (base == NULL) {
 172     // Optimistically assume that the OSes returns an aligned base pointer.
 173     // When reserving a large address range, most OSes seem to align to at
 174     // least 64K.
 175 
 176     // If the memory was requested at a particular address, use
 177     // os::attempt_reserve_memory_at() to avoid over mapping something
 178     // important.  If available space is not detected, return NULL.
 179 
 180     if (requested_address != 0) {
 181       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 182       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 183         // OS ignored requested address. Try different address.
 184         base = NULL;
 185       }
 186     } else {
 187       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 188     }
 189 
 190     if (base == NULL) return;
 191 
 192     // Check alignment constraints
 193     if ((((size_t)base) & (alignment - 1)) != 0) {
 194       // Base not aligned, retry
 195       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 196 
 197       // Make sure that size is aligned
 198       size = align_up(size, alignment);
 199       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 200 
 201       if (requested_address != 0 &&
 202           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 203         // As a result of the alignment constraints, the allocated base differs
 204         // from the requested address. Return back to the caller who can
 205         // take remedial action (like try again without a requested address).
 206         assert(_base == NULL, "should be");
 207         return;
 208       }
 209     }
 210   }
 211   // Done
 212   _base = base;
 213   _size = size;
 214   _alignment = alignment;
 215   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 216   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 217     _special = true;
 218   }
 219 }
 220 
 221 
 222 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 223                              bool special, bool executable) {
 224   assert((size % os::vm_allocation_granularity()) == 0,
 225          "size not allocation aligned");
 226   _base = base;
 227   _size = size;
 228   _alignment = alignment;
 229   _noaccess_prefix = 0;
 230   _special = special;
 231   _executable = executable;
 232 }
 233 
 234 
 235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 236                                         bool split, bool realloc) {
 237   assert(partition_size <= size(), "partition failed");
 238   if (split) {
 239     os::split_reserved_memory(base(), size(), partition_size, realloc);
 240   }
 241   ReservedSpace result(base(), partition_size, alignment, special(),
 242                        executable());
 243   return result;
 244 }
 245 
 246 
 247 ReservedSpace
 248 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 249   assert(partition_size <= size(), "partition failed");
 250   ReservedSpace result(base() + partition_size, size() - partition_size,
 251                        alignment, special(), executable());
 252   return result;
 253 }
 254 
 255 
 256 size_t ReservedSpace::page_align_size_up(size_t size) {
 257   return align_up(size, os::vm_page_size());
 258 }
 259 
 260 
 261 size_t ReservedSpace::page_align_size_down(size_t size) {
 262   return align_down(size, os::vm_page_size());
 263 }
 264 
 265 
 266 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 267   return align_up(size, os::vm_allocation_granularity());
 268 }
 269 
 270 
 271 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 272   return align_down(size, os::vm_allocation_granularity());
 273 }
 274 
 275 
 276 void ReservedSpace::release() {
 277   if (is_reserved()) {
 278     char *real_base = _base - _noaccess_prefix;
 279     const size_t real_size = _size + _noaccess_prefix;
 280     if (special()) {
 281       if (_fd_for_heap != -1) {
 282         os::unmap_memory(real_base, real_size);
 283       } else {
 284         os::release_memory_special(real_base, real_size);
 285       }
 286     } else{
 287       os::release_memory(real_base, real_size);
 288     }
 289     _base = NULL;
 290     _size = 0;
 291     _noaccess_prefix = 0;
 292     _alignment = 0;
 293     _special = false;
 294     _executable = false;
 295   }
 296 }
 297 
 298 static size_t noaccess_prefix_size(size_t alignment) {
 299   return lcm(os::vm_page_size(), alignment);
 300 }
 301 
 302 void ReservedHeapSpace::establish_noaccess_prefix() {
 303   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       Universe::set_narrow_oop_use_implicit_null_checks(false);
 322     }
 323   }
 324   _base += _noaccess_prefix;
 325   _size -= _noaccess_prefix;
 326   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 327 }
 328 
 329 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 330 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 331 // might still fulfill the wishes of the caller.
 332 // Assures the memory is aligned to 'alignment'.
 333 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 334 void ReservedHeapSpace::try_reserve_heap(size_t size,
 335                                          size_t alignment,
 336                                          bool large,
 337                                          char* requested_address) {
 338   if (_base != NULL) {
 339     // We tried before, but we didn't like the address delivered.
 340     release();
 341   }
 342 
 343   // If OS doesn't support demand paging for large page memory, we need
 344   // to use reserve_memory_special() to reserve and pin the entire region.
 345   // If there is a backing file directory for this space then whether
 346   // large pages are allocated is up to the filesystem of the backing file.
 347   // So we ignore the UseLargePages flag in this case.
 348   bool special = large && !os::can_commit_large_page_memory();
 349   if (special && _fd_for_heap != -1) {
 350     special = false;
 351     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 352                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 353       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 354     }
 355   }
 356   char* base = NULL;
 357 
 358   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 359                              " heap of size " SIZE_FORMAT_HEX,
 360                              p2i(requested_address),
 361                              size);
 362 
 363   if (special) {
 364     base = os::reserve_memory_special(size, alignment, requested_address, false);
 365 
 366     if (base != NULL) {
 367       // Check alignment constraints.
 368       assert((uintptr_t) base % alignment == 0,
 369              "Large pages returned a non-aligned address, base: "
 370              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 371              p2i(base), alignment);
 372       _special = true;
 373     }
 374   }
 375 
 376   if (base == NULL) {
 377     // Failed; try to reserve regular memory below
 378     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 379                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 380       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 381     }
 382 
 383     // Optimistically assume that the OSes returns an aligned base pointer.
 384     // When reserving a large address range, most OSes seem to align to at
 385     // least 64K.
 386 
 387     // If the memory was requested at a particular address, use
 388     // os::attempt_reserve_memory_at() to avoid over mapping something
 389     // important.  If available space is not detected, return NULL.
 390 
 391     if (requested_address != 0) {
 392       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 393     } else {
 394       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 395     }
 396   }
 397   if (base == NULL) { return; }
 398 
 399   // Done
 400   _base = base;
 401   _size = size;
 402   _alignment = alignment;
 403 
 404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 405   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 406     _special = true;
 407   }
 408 
 409   // Check alignment constraints
 410   if ((((size_t)base) & (alignment - 1)) != 0) {
 411     // Base not aligned, retry.
 412     release();
 413   }
 414 }
 415 
 416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 417                                           char *lowest_start,
 418                                           size_t attach_point_alignment,
 419                                           char *aligned_heap_base_min_address,
 420                                           char *upper_bound,
 421                                           size_t size,
 422                                           size_t alignment,
 423                                           bool large) {
 424   const size_t attach_range = highest_start - lowest_start;
 425   // Cap num_attempts at possible number.
 426   // At least one is possible even for 0 sized attach range.
 427   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 428   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 429 
 430   const size_t stepsize = (attach_range == 0) ? // Only one try.
 431     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 432 
 433   // Try attach points from top to bottom.
 434   char* attach_point = highest_start;
 435   while (attach_point >= lowest_start  &&
 436          attach_point <= highest_start &&  // Avoid wrap around.
 437          ((_base == NULL) ||
 438           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 439     try_reserve_heap(size, alignment, large, attach_point);
 440     attach_point -= stepsize;
 441   }
 442 }
 443 
 444 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 445 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 446 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 447 
 448 // Helper for heap allocation. Returns an array with addresses
 449 // (OS-specific) which are suited for disjoint base mode. Array is
 450 // NULL terminated.
 451 static char** get_attach_addresses_for_disjoint_mode() {
 452   static uint64_t addresses[] = {
 453      2 * SIZE_32G,
 454      3 * SIZE_32G,
 455      4 * SIZE_32G,
 456      8 * SIZE_32G,
 457     10 * SIZE_32G,
 458      1 * SIZE_64K * SIZE_32G,
 459      2 * SIZE_64K * SIZE_32G,
 460      3 * SIZE_64K * SIZE_32G,
 461      4 * SIZE_64K * SIZE_32G,
 462     16 * SIZE_64K * SIZE_32G,
 463     32 * SIZE_64K * SIZE_32G,
 464     34 * SIZE_64K * SIZE_32G,
 465     0
 466   };
 467 
 468   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 469   // the array is sorted.
 470   uint i = 0;
 471   while (addresses[i] != 0 &&
 472          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 473     i++;
 474   }
 475   uint start = i;
 476 
 477   // Avoid more steps than requested.
 478   i = 0;
 479   while (addresses[start+i] != 0) {
 480     if (i == HeapSearchSteps) {
 481       addresses[start+i] = 0;
 482       break;
 483     }
 484     i++;
 485   }
 486 
 487   return (char**) &addresses[start];
 488 }
 489 
 490 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 491   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 492             "can not allocate compressed oop heap for this size");
 493   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 494   assert(HeapBaseMinAddress > 0, "sanity");
 495 
 496   const size_t granularity = os::vm_allocation_granularity();
 497   assert((size & (granularity - 1)) == 0,
 498          "size not aligned to os::vm_allocation_granularity()");
 499   assert((alignment & (granularity - 1)) == 0,
 500          "alignment not aligned to os::vm_allocation_granularity()");
 501   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 502          "not a power of 2");
 503 
 504   // The necessary attach point alignment for generated wish addresses.
 505   // This is needed to increase the chance of attaching for mmap and shmat.
 506   const size_t os_attach_point_alignment =
 507     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 508     NOT_AIX(os::vm_allocation_granularity());
 509   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 510 
 511   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 512   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 513     noaccess_prefix_size(alignment) : 0;
 514 
 515   // Attempt to alloc at user-given address.
 516   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 517     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 518     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 519       release();
 520     }
 521   }
 522 
 523   // Keep heap at HeapBaseMinAddress.
 524   if (_base == NULL) {
 525 
 526     // Try to allocate the heap at addresses that allow efficient oop compression.
 527     // Different schemes are tried, in order of decreasing optimization potential.
 528     //
 529     // For this, try_reserve_heap() is called with the desired heap base addresses.
 530     // A call into the os layer to allocate at a given address can return memory
 531     // at a different address than requested.  Still, this might be memory at a useful
 532     // address. try_reserve_heap() always returns this allocated memory, as only here
 533     // the criteria for a good heap are checked.
 534 
 535     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 536     // Give it several tries from top of range to bottom.
 537     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 538 
 539       // Calc address range within we try to attach (range of possible start addresses).
 540       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 541       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 542       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 543                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 544     }
 545 
 546     // zerobased: Attempt to allocate in the lower 32G.
 547     // But leave room for the compressed class pointers, which is allocated above
 548     // the heap.
 549     char *zerobased_max = (char *)OopEncodingHeapMax;
 550     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 551     // For small heaps, save some space for compressed class pointer
 552     // space so it can be decoded with no base.
 553     if (UseCompressedClassPointers && !UseSharedSpaces &&
 554         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 555         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 556       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 557     }
 558 
 559     // Give it several tries from top of range to bottom.
 560     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 561         ((_base == NULL) ||                        // No previous try succeeded.
 562          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 566       // Need to be careful about size being guaranteed to be less
 567       // than UnscaledOopHeapMax due to type constraints.
 568       char *lowest_start = aligned_heap_base_min_address;
 569       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 570       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 571         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 572       }
 573       lowest_start = align_up(lowest_start, attach_point_alignment);
 574       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 575                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 576     }
 577 
 578     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 579     // implement null checks.
 580     noaccess_prefix = noaccess_prefix_size(alignment);
 581 
 582     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 583     char** addresses = get_attach_addresses_for_disjoint_mode();
 584     int i = 0;
 585     while (addresses[i] &&                                 // End of array not yet reached.
 586            ((_base == NULL) ||                             // No previous try succeeded.
 587             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 588              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 589       char* const attach_point = addresses[i];
 590       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 591       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 592       i++;
 593     }
 594 
 595     // Last, desperate try without any placement.
 596     if (_base == NULL) {
 597       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 599     }
 600   }
 601 }
 602 
 603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 604 
 605   if (size == 0) {
 606     return;
 607   }
 608 
 609   // Open AllocateOldGenAt file
 610   if (AllocateOldGenAt != NULL) {
 611     _fd_for_heap = os::create_file_for_heap(AllocateOldGenAt);
 612     if (_fd_for_heap== -1) {
 613       vm_exit_during_initialization(
 614         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 615     }
 616     if (UseParallelOldGC) {
 617       // For ParallelOldGC, adaptive sizing picks _old_gen virtual space sizes as needed.
 618       // allocate Xmx on NVDIMM as adaptive sizing may put lot of pressure on NVDIMM.
 619       os::allocate_file(_fd_for_heap, MaxHeapSize);
 620       os::set_nvdimm_fd(_fd_for_heap);
 621       os::set_nvdimm_present(true);
 622     }
 623   } else {
 624     _fd_for_heap = -1;
 625   }
 626 
 627   if (heap_allocation_directory != NULL) {
 628     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 629     if (_fd_for_heap == -1) {
 630       vm_exit_during_initialization(
 631         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 632     }
 633   }
 634 
 635   // Heap size should be aligned to alignment, too.
 636   guarantee(is_aligned(size, alignment), "set by caller");
 637 
 638   if (UseCompressedOops) {
 639     initialize_compressed_heap(size, alignment, large);
 640     if (_size > size) {
 641       // We allocated heap with noaccess prefix.
 642       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 643       // if we had to try at arbitrary address.
 644       establish_noaccess_prefix();
 645     }
 646   } else {
 647     initialize(size, alignment, large, NULL, false);
 648   }
 649 
 650   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 651          "area must be distinguishable from marks for mark-sweep");
 652   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 653          "area must be distinguishable from marks for mark-sweep");
 654 
 655   if (base() != NULL) {
 656     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 657     if (AllocateOldGenAt != NULL && _fd_for_heap != -1) {
 658       os::set_nvdimm_present(true);
 659       os::set_nvdimm_heapbase((address)_base);
 660       os::set_nvdimm_fd(_fd_for_heap);
 661     }
 662   }
 663 
 664   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 665     os::close(_fd_for_heap);
 666   }
 667 }
 668 
 669 // Reserve space for code segment.  Same as Java heap only we mark this as
 670 // executable.
 671 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 672                                      size_t rs_align,
 673                                      bool large) :
 674   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 675   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 676 }
 677 
 678 // VirtualSpace
 679 
 680 VirtualSpace::VirtualSpace() {
 681   _low_boundary           = NULL;
 682   _high_boundary          = NULL;
 683   _low                    = NULL;
 684   _high                   = NULL;
 685   _lower_high             = NULL;
 686   _middle_high            = NULL;
 687   _upper_high             = NULL;
 688   _lower_high_boundary    = NULL;
 689   _middle_high_boundary   = NULL;
 690   _upper_high_boundary    = NULL;
 691   _lower_alignment        = 0;
 692   _middle_alignment       = 0;
 693   _upper_alignment        = 0;
 694   _special                = false;
 695   _executable             = false;
 696 }
 697 
 698 
 699 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 700   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 701   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 702 }
 703 
 704 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 705   if(!rs.is_reserved()) return false;  // allocation failed.
 706   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 707   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 708 
 709   _low_boundary  = rs.base();
 710   _high_boundary = low_boundary() + rs.size();
 711 
 712   _low = low_boundary();
 713   _high = low();
 714 
 715   _special = rs.special();
 716   _executable = rs.executable();
 717 
 718   // When a VirtualSpace begins life at a large size, make all future expansion
 719   // and shrinking occur aligned to a granularity of large pages.  This avoids
 720   // fragmentation of physical addresses that inhibits the use of large pages
 721   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 722   // page size, the only spaces that get handled this way are codecache and
 723   // the heap itself, both of which provide a substantial performance
 724   // boost in many benchmarks when covered by large pages.
 725   //
 726   // No attempt is made to force large page alignment at the very top and
 727   // bottom of the space if they are not aligned so already.
 728   _lower_alignment  = os::vm_page_size();
 729   _middle_alignment = max_commit_granularity;
 730   _upper_alignment  = os::vm_page_size();
 731 
 732   // End of each region
 733   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 734   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 735   _upper_high_boundary = high_boundary();
 736 
 737   // High address of each region
 738   _lower_high = low_boundary();
 739   _middle_high = lower_high_boundary();
 740   _upper_high = middle_high_boundary();
 741 
 742   // commit to initial size
 743   if (committed_size > 0) {
 744     if (!expand_by(committed_size)) {
 745       return false;
 746     }
 747   }
 748   return true;
 749 }
 750 
 751 
 752 VirtualSpace::~VirtualSpace() {
 753   release();
 754 }
 755 
 756 
 757 void VirtualSpace::release() {
 758   // This does not release memory it reserved.
 759   // Caller must release via rs.release();
 760   _low_boundary           = NULL;
 761   _high_boundary          = NULL;
 762   _low                    = NULL;
 763   _high                   = NULL;
 764   _lower_high             = NULL;
 765   _middle_high            = NULL;
 766   _upper_high             = NULL;
 767   _lower_high_boundary    = NULL;
 768   _middle_high_boundary   = NULL;
 769   _upper_high_boundary    = NULL;
 770   _lower_alignment        = 0;
 771   _middle_alignment       = 0;
 772   _upper_alignment        = 0;
 773   _special                = false;
 774   _executable             = false;
 775 }
 776 
 777 
 778 size_t VirtualSpace::committed_size() const {
 779   return pointer_delta(high(), low(), sizeof(char));
 780 }
 781 
 782 
 783 size_t VirtualSpace::reserved_size() const {
 784   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 785 }
 786 
 787 
 788 size_t VirtualSpace::uncommitted_size()  const {
 789   return reserved_size() - committed_size();
 790 }
 791 
 792 size_t VirtualSpace::actual_committed_size() const {
 793   // Special VirtualSpaces commit all reserved space up front.
 794   if (special()) {
 795     return reserved_size();
 796   }
 797 
 798   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 799   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 800   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 801 
 802 #ifdef ASSERT
 803   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 804   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 805   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 806 
 807   if (committed_high > 0) {
 808     assert(committed_low == lower, "Must be");
 809     assert(committed_middle == middle, "Must be");
 810   }
 811 
 812   if (committed_middle > 0) {
 813     assert(committed_low == lower, "Must be");
 814   }
 815   if (committed_middle < middle) {
 816     assert(committed_high == 0, "Must be");
 817   }
 818 
 819   if (committed_low < lower) {
 820     assert(committed_high == 0, "Must be");
 821     assert(committed_middle == 0, "Must be");
 822   }
 823 #endif
 824 
 825   return committed_low + committed_middle + committed_high;
 826 }
 827 
 828 
 829 bool VirtualSpace::contains(const void* p) const {
 830   return low() <= (const char*) p && (const char*) p < high();
 831 }
 832 
 833 static void pretouch_expanded_memory(void* start, void* end) {
 834   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 835   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 836 
 837   os::pretouch_memory(start, end);
 838 }
 839 
 840 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 841   if (os::commit_memory(start, size, alignment, executable)) {
 842     if (pre_touch || AlwaysPreTouch) {
 843       pretouch_expanded_memory(start, start + size);
 844     }
 845     return true;
 846   }
 847 
 848   debug_only(warning(
 849       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 850       " size=" SIZE_FORMAT ", executable=%d) failed",
 851       p2i(start), p2i(start + size), size, executable);)
 852 
 853   return false;
 854 }
 855 
 856 /*
 857    First we need to determine if a particular virtual space is using large
 858    pages.  This is done at the initialize function and only virtual spaces
 859    that are larger than LargePageSizeInBytes use large pages.  Once we
 860    have determined this, all expand_by and shrink_by calls must grow and
 861    shrink by large page size chunks.  If a particular request
 862    is within the current large page, the call to commit and uncommit memory
 863    can be ignored.  In the case that the low and high boundaries of this
 864    space is not large page aligned, the pages leading to the first large
 865    page address and the pages after the last large page address must be
 866    allocated with default pages.
 867 */
 868 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 869   if (uncommitted_size() < bytes) {
 870     return false;
 871   }
 872 
 873   if (special()) {
 874     // don't commit memory if the entire space is pinned in memory
 875     _high += bytes;
 876     return true;
 877   }
 878 
 879   char* previous_high = high();
 880   char* unaligned_new_high = high() + bytes;
 881   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 882 
 883   // Calculate where the new high for each of the regions should be.  If
 884   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 885   // then the unaligned lower and upper new highs would be the
 886   // lower_high() and upper_high() respectively.
 887   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 888   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 889   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 890 
 891   // Align the new highs based on the regions alignment.  lower and upper
 892   // alignment will always be default page size.  middle alignment will be
 893   // LargePageSizeInBytes if the actual size of the virtual space is in
 894   // fact larger than LargePageSizeInBytes.
 895   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 896   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 897   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 898 
 899   // Determine which regions need to grow in this expand_by call.
 900   // If you are growing in the lower region, high() must be in that
 901   // region so calculate the size based on high().  For the middle and
 902   // upper regions, determine the starting point of growth based on the
 903   // location of high().  By getting the MAX of the region's low address
 904   // (or the previous region's high address) and high(), we can tell if it
 905   // is an intra or inter region growth.
 906   size_t lower_needs = 0;
 907   if (aligned_lower_new_high > lower_high()) {
 908     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 909   }
 910   size_t middle_needs = 0;
 911   if (aligned_middle_new_high > middle_high()) {
 912     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 913   }
 914   size_t upper_needs = 0;
 915   if (aligned_upper_new_high > upper_high()) {
 916     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 917   }
 918 
 919   // Check contiguity.
 920   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 921          "high address must be contained within the region");
 922   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 923          "high address must be contained within the region");
 924   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 925          "high address must be contained within the region");
 926 
 927   // Commit regions
 928   if (lower_needs > 0) {
 929     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 930     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 931       return false;
 932     }
 933     _lower_high += lower_needs;
 934   }
 935 
 936   if (middle_needs > 0) {
 937     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 938     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 939       return false;
 940     }
 941     _middle_high += middle_needs;
 942   }
 943 
 944   if (upper_needs > 0) {
 945     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 946     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 947       return false;
 948     }
 949     _upper_high += upper_needs;
 950   }
 951 
 952   _high += bytes;
 953   return true;
 954 }
 955 
 956 // A page is uncommitted if the contents of the entire page is deemed unusable.
 957 // Continue to decrement the high() pointer until it reaches a page boundary
 958 // in which case that particular page can now be uncommitted.
 959 void VirtualSpace::shrink_by(size_t size) {
 960   if (committed_size() < size)
 961     fatal("Cannot shrink virtual space to negative size");
 962 
 963   if (special()) {
 964     // don't uncommit if the entire space is pinned in memory
 965     _high -= size;
 966     return;
 967   }
 968 
 969   char* unaligned_new_high = high() - size;
 970   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 971 
 972   // Calculate new unaligned address
 973   char* unaligned_upper_new_high =
 974     MAX2(unaligned_new_high, middle_high_boundary());
 975   char* unaligned_middle_new_high =
 976     MAX2(unaligned_new_high, lower_high_boundary());
 977   char* unaligned_lower_new_high =
 978     MAX2(unaligned_new_high, low_boundary());
 979 
 980   // Align address to region's alignment
 981   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 982   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 983   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 984 
 985   // Determine which regions need to shrink
 986   size_t upper_needs = 0;
 987   if (aligned_upper_new_high < upper_high()) {
 988     upper_needs =
 989       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 990   }
 991   size_t middle_needs = 0;
 992   if (aligned_middle_new_high < middle_high()) {
 993     middle_needs =
 994       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 995   }
 996   size_t lower_needs = 0;
 997   if (aligned_lower_new_high < lower_high()) {
 998     lower_needs =
 999       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
1000   }
1001 
1002   // Check contiguity.
1003   assert(middle_high_boundary() <= upper_high() &&
1004          upper_high() <= upper_high_boundary(),
1005          "high address must be contained within the region");
1006   assert(lower_high_boundary() <= middle_high() &&
1007          middle_high() <= middle_high_boundary(),
1008          "high address must be contained within the region");
1009   assert(low_boundary() <= lower_high() &&
1010          lower_high() <= lower_high_boundary(),
1011          "high address must be contained within the region");
1012 
1013   // Uncommit
1014   if (upper_needs > 0) {
1015     assert(middle_high_boundary() <= aligned_upper_new_high &&
1016            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
1017            "must not shrink beyond region");
1018     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
1019       debug_only(warning("os::uncommit_memory failed"));
1020       return;
1021     } else {
1022       _upper_high -= upper_needs;
1023     }
1024   }
1025   if (middle_needs > 0) {
1026     assert(lower_high_boundary() <= aligned_middle_new_high &&
1027            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1028            "must not shrink beyond region");
1029     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1030       debug_only(warning("os::uncommit_memory failed"));
1031       return;
1032     } else {
1033       _middle_high -= middle_needs;
1034     }
1035   }
1036   if (lower_needs > 0) {
1037     assert(low_boundary() <= aligned_lower_new_high &&
1038            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1039            "must not shrink beyond region");
1040     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1041       debug_only(warning("os::uncommit_memory failed"));
1042       return;
1043     } else {
1044       _lower_high -= lower_needs;
1045     }
1046   }
1047 
1048   _high -= size;
1049 }
1050 
1051 #ifndef PRODUCT
1052 void VirtualSpace::check_for_contiguity() {
1053   // Check contiguity.
1054   assert(low_boundary() <= lower_high() &&
1055          lower_high() <= lower_high_boundary(),
1056          "high address must be contained within the region");
1057   assert(lower_high_boundary() <= middle_high() &&
1058          middle_high() <= middle_high_boundary(),
1059          "high address must be contained within the region");
1060   assert(middle_high_boundary() <= upper_high() &&
1061          upper_high() <= upper_high_boundary(),
1062          "high address must be contained within the region");
1063   assert(low() >= low_boundary(), "low");
1064   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1065   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1066   assert(high() <= upper_high(), "upper high");
1067 }
1068 
1069 void VirtualSpace::print_on(outputStream* out) {
1070   out->print   ("Virtual space:");
1071   if (special()) out->print(" (pinned in memory)");
1072   out->cr();
1073   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1074   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1075   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1076   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1077 }
1078 
1079 void VirtualSpace::print() {
1080   print_on(tty);
1081 }
1082 
1083 /////////////// Unit tests ///////////////
1084 
1085 #ifndef PRODUCT
1086 
1087 #define test_log(...) \
1088   do {\
1089     if (VerboseInternalVMTests) { \
1090       tty->print_cr(__VA_ARGS__); \
1091       tty->flush(); \
1092     }\
1093   } while (false)
1094 
1095 class TestReservedSpace : AllStatic {
1096  public:
1097   static void small_page_write(void* addr, size_t size) {
1098     size_t page_size = os::vm_page_size();
1099 
1100     char* end = (char*)addr + size;
1101     for (char* p = (char*)addr; p < end; p += page_size) {
1102       *p = 1;
1103     }
1104   }
1105 
1106   static void release_memory_for_test(ReservedSpace rs) {
1107     if (rs.special()) {
1108       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1109     } else {
1110       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1111     }
1112   }
1113 
1114   static void test_reserved_space1(size_t size, size_t alignment) {
1115     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1116 
1117     assert(is_aligned(size, alignment), "Incorrect input parameters");
1118 
1119     ReservedSpace rs(size,          // size
1120                      alignment,     // alignment
1121                      UseLargePages, // large
1122                      (char *)NULL); // requested_address
1123 
1124     test_log(" rs.special() == %d", rs.special());
1125 
1126     assert(rs.base() != NULL, "Must be");
1127     assert(rs.size() == size, "Must be");
1128 
1129     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1130     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1131 
1132     if (rs.special()) {
1133       small_page_write(rs.base(), size);
1134     }
1135 
1136     release_memory_for_test(rs);
1137   }
1138 
1139   static void test_reserved_space2(size_t size) {
1140     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1141 
1142     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1143 
1144     ReservedSpace rs(size);
1145 
1146     test_log(" rs.special() == %d", rs.special());
1147 
1148     assert(rs.base() != NULL, "Must be");
1149     assert(rs.size() == size, "Must be");
1150 
1151     if (rs.special()) {
1152       small_page_write(rs.base(), size);
1153     }
1154 
1155     release_memory_for_test(rs);
1156   }
1157 
1158   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1159     test_log("test_reserved_space3(%p, %p, %d)",
1160         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1161 
1162     if (size < alignment) {
1163       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1164       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1165       return;
1166     }
1167 
1168     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1169     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1170 
1171     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1172 
1173     ReservedSpace rs(size, alignment, large, false);
1174 
1175     test_log(" rs.special() == %d", rs.special());
1176 
1177     assert(rs.base() != NULL, "Must be");
1178     assert(rs.size() == size, "Must be");
1179 
1180     if (rs.special()) {
1181       small_page_write(rs.base(), size);
1182     }
1183 
1184     release_memory_for_test(rs);
1185   }
1186 
1187 
1188   static void test_reserved_space1() {
1189     size_t size = 2 * 1024 * 1024;
1190     size_t ag   = os::vm_allocation_granularity();
1191 
1192     test_reserved_space1(size,      ag);
1193     test_reserved_space1(size * 2,  ag);
1194     test_reserved_space1(size * 10, ag);
1195   }
1196 
1197   static void test_reserved_space2() {
1198     size_t size = 2 * 1024 * 1024;
1199     size_t ag = os::vm_allocation_granularity();
1200 
1201     test_reserved_space2(size * 1);
1202     test_reserved_space2(size * 2);
1203     test_reserved_space2(size * 10);
1204     test_reserved_space2(ag);
1205     test_reserved_space2(size - ag);
1206     test_reserved_space2(size);
1207     test_reserved_space2(size + ag);
1208     test_reserved_space2(size * 2);
1209     test_reserved_space2(size * 2 - ag);
1210     test_reserved_space2(size * 2 + ag);
1211     test_reserved_space2(size * 3);
1212     test_reserved_space2(size * 3 - ag);
1213     test_reserved_space2(size * 3 + ag);
1214     test_reserved_space2(size * 10);
1215     test_reserved_space2(size * 10 + size / 2);
1216   }
1217 
1218   static void test_reserved_space3() {
1219     size_t ag = os::vm_allocation_granularity();
1220 
1221     test_reserved_space3(ag,      ag    , false);
1222     test_reserved_space3(ag * 2,  ag    , false);
1223     test_reserved_space3(ag * 3,  ag    , false);
1224     test_reserved_space3(ag * 2,  ag * 2, false);
1225     test_reserved_space3(ag * 4,  ag * 2, false);
1226     test_reserved_space3(ag * 8,  ag * 2, false);
1227     test_reserved_space3(ag * 4,  ag * 4, false);
1228     test_reserved_space3(ag * 8,  ag * 4, false);
1229     test_reserved_space3(ag * 16, ag * 4, false);
1230 
1231     if (UseLargePages) {
1232       size_t lp = os::large_page_size();
1233 
1234       // Without large pages
1235       test_reserved_space3(lp,     ag * 4, false);
1236       test_reserved_space3(lp * 2, ag * 4, false);
1237       test_reserved_space3(lp * 4, ag * 4, false);
1238       test_reserved_space3(lp,     lp    , false);
1239       test_reserved_space3(lp * 2, lp    , false);
1240       test_reserved_space3(lp * 3, lp    , false);
1241       test_reserved_space3(lp * 2, lp * 2, false);
1242       test_reserved_space3(lp * 4, lp * 2, false);
1243       test_reserved_space3(lp * 8, lp * 2, false);
1244 
1245       // With large pages
1246       test_reserved_space3(lp, ag * 4    , true);
1247       test_reserved_space3(lp * 2, ag * 4, true);
1248       test_reserved_space3(lp * 4, ag * 4, true);
1249       test_reserved_space3(lp, lp        , true);
1250       test_reserved_space3(lp * 2, lp    , true);
1251       test_reserved_space3(lp * 3, lp    , true);
1252       test_reserved_space3(lp * 2, lp * 2, true);
1253       test_reserved_space3(lp * 4, lp * 2, true);
1254       test_reserved_space3(lp * 8, lp * 2, true);
1255     }
1256   }
1257 
1258   static void test_reserved_space() {
1259     test_reserved_space1();
1260     test_reserved_space2();
1261     test_reserved_space3();
1262   }
1263 };
1264 
1265 void TestReservedSpace_test() {
1266   TestReservedSpace::test_reserved_space();
1267 }
1268 
1269 #define assert_equals(actual, expected)  \
1270   assert(actual == expected,             \
1271          "Got " SIZE_FORMAT " expected " \
1272          SIZE_FORMAT, actual, expected);
1273 
1274 #define assert_ge(value1, value2)                  \
1275   assert(value1 >= value2,                         \
1276          "'" #value1 "': " SIZE_FORMAT " '"        \
1277          #value2 "': " SIZE_FORMAT, value1, value2);
1278 
1279 #define assert_lt(value1, value2)                  \
1280   assert(value1 < value2,                          \
1281          "'" #value1 "': " SIZE_FORMAT " '"        \
1282          #value2 "': " SIZE_FORMAT, value1, value2);
1283 
1284 
1285 class TestVirtualSpace : AllStatic {
1286   enum TestLargePages {
1287     Default,
1288     Disable,
1289     Reserve,
1290     Commit
1291   };
1292 
1293   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1294     switch(mode) {
1295     default:
1296     case Default:
1297     case Reserve:
1298       return ReservedSpace(reserve_size_aligned);
1299     case Disable:
1300     case Commit:
1301       return ReservedSpace(reserve_size_aligned,
1302                            os::vm_allocation_granularity(),
1303                            /* large */ false, /* exec */ false);
1304     }
1305   }
1306 
1307   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1308     switch(mode) {
1309     default:
1310     case Default:
1311     case Reserve:
1312       return vs.initialize(rs, 0);
1313     case Disable:
1314       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1315     case Commit:
1316       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1317     }
1318   }
1319 
1320  public:
1321   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1322                                                         TestLargePages mode = Default) {
1323     size_t granularity = os::vm_allocation_granularity();
1324     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1325 
1326     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1327 
1328     assert(reserved.is_reserved(), "Must be");
1329 
1330     VirtualSpace vs;
1331     bool initialized = initialize_virtual_space(vs, reserved, mode);
1332     assert(initialized, "Failed to initialize VirtualSpace");
1333 
1334     vs.expand_by(commit_size, false);
1335 
1336     if (vs.special()) {
1337       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1338     } else {
1339       assert_ge(vs.actual_committed_size(), commit_size);
1340       // Approximate the commit granularity.
1341       // Make sure that we don't commit using large pages
1342       // if large pages has been disabled for this VirtualSpace.
1343       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1344                                    os::vm_page_size() : os::large_page_size();
1345       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1346     }
1347 
1348     reserved.release();
1349   }
1350 
1351   static void test_virtual_space_actual_committed_space_one_large_page() {
1352     if (!UseLargePages) {
1353       return;
1354     }
1355 
1356     size_t large_page_size = os::large_page_size();
1357 
1358     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1359 
1360     assert(reserved.is_reserved(), "Must be");
1361 
1362     VirtualSpace vs;
1363     bool initialized = vs.initialize(reserved, 0);
1364     assert(initialized, "Failed to initialize VirtualSpace");
1365 
1366     vs.expand_by(large_page_size, false);
1367 
1368     assert_equals(vs.actual_committed_size(), large_page_size);
1369 
1370     reserved.release();
1371   }
1372 
1373   static void test_virtual_space_actual_committed_space() {
1374     test_virtual_space_actual_committed_space(4 * K, 0);
1375     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1376     test_virtual_space_actual_committed_space(8 * K, 0);
1377     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1378     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1379     test_virtual_space_actual_committed_space(12 * K, 0);
1380     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1381     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1382     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1383     test_virtual_space_actual_committed_space(64 * K, 0);
1384     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1385     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1386     test_virtual_space_actual_committed_space(2 * M, 0);
1387     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1388     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1389     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1390     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1391     test_virtual_space_actual_committed_space(10 * M, 0);
1392     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1393     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1394     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1395     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1396     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1397     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1398   }
1399 
1400   static void test_virtual_space_disable_large_pages() {
1401     if (!UseLargePages) {
1402       return;
1403     }
1404     // These test cases verify that if we force VirtualSpace to disable large pages
1405     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1406     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1407     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1408     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1409     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1410     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1411     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1412 
1413     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1414     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1415     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1416     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1417     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1418     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1419     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1420 
1421     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1422     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1423     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1424     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1425     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1426     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1427     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1428   }
1429 
1430   static void test_virtual_space() {
1431     test_virtual_space_actual_committed_space();
1432     test_virtual_space_actual_committed_space_one_large_page();
1433     test_virtual_space_disable_large_pages();
1434   }
1435 };
1436 
1437 void TestVirtualSpace_test() {
1438   TestVirtualSpace::test_virtual_space();
1439 }
1440 
1441 #endif // PRODUCT
1442 
1443 #endif