1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  39     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1) {
  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1) {
  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 // Helper method
  74 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  75   if (is_file_mapped) {
  76     if (!os::unmap_memory(base, size)) {
  77       fatal("os::unmap_memory failed");
  78     }
  79   } else if (!os::release_memory(base, size)) {
  80     fatal("os::release_memory failed");
  81   }
  82 }
  83 
  84 // Helper method.
  85 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  86                                            const size_t size, bool special, bool is_file_mapped = false)
  87 {
  88   if (base == requested_address || requested_address == NULL)
  89     return false; // did not fail
  90 
  91   if (base != NULL) {
  92     // Different reserve address may be acceptable in other cases
  93     // but for compressed oops heap should be at requested address.
  94     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  95     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  96     // OS ignored requested address. Try different address.
  97     if (special) {
  98       if (!os::release_memory_special(base, size)) {
  99         fatal("os::release_memory_special failed");
 100       }
 101     } else {
 102       unmap_or_release_memory(base, size, is_file_mapped);
 103     }
 104   }
 105   return true;
 106 }
 107 
 108 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 109                                char* requested_address,
 110                                bool executable) {
 111   const size_t granularity = os::vm_allocation_granularity();
 112   assert((size & (granularity - 1)) == 0,
 113          "size not aligned to os::vm_allocation_granularity()");
 114   assert((alignment & (granularity - 1)) == 0,
 115          "alignment not aligned to os::vm_allocation_granularity()");
 116   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 117          "not a power of 2");
 118 
 119   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 120 
 121   _base = NULL;
 122   _size = 0;
 123   _special = false;
 124   _executable = executable;
 125   _alignment = 0;
 126   _noaccess_prefix = 0;
 127   if (size == 0) {
 128     return;
 129   }
 130 
 131   // If OS doesn't support demand paging for large page memory, we need
 132   // to use reserve_memory_special() to reserve and pin the entire region.
 133   // If there is a backing file directory for this space then whether
 134   // large pages are allocated is up to the filesystem of the backing file.
 135   // So we ignore the UseLargePages flag in this case.
 136   bool special = large && !os::can_commit_large_page_memory();
 137   if (special && _fd_for_heap != -1) {
 138     special = false;
 139     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 140       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 141       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 142     }
 143   }
 144 
 145   char* base = NULL;
 146 
 147   if (special) {
 148 
 149     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 150 
 151     if (base != NULL) {
 152       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 153         // OS ignored requested address. Try different address.
 154         return;
 155       }
 156       // Check alignment constraints.
 157       assert((uintptr_t) base % alignment == 0,
 158              "Large pages returned a non-aligned address, base: "
 159              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 160              p2i(base), alignment);
 161       _special = true;
 162     } else {
 163       // failed; try to reserve regular memory below
 164       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 165                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 166         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 167       }
 168     }
 169   }
 170 
 171   if (base == NULL) {
 172     // Optimistically assume that the OSes returns an aligned base pointer.
 173     // When reserving a large address range, most OSes seem to align to at
 174     // least 64K.
 175 
 176     // If the memory was requested at a particular address, use
 177     // os::attempt_reserve_memory_at() to avoid over mapping something
 178     // important.  If available space is not detected, return NULL.
 179 
 180     if (requested_address != 0) {
 181       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 182       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 183         // OS ignored requested address. Try different address.
 184         base = NULL;
 185       }
 186     } else {
 187       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 188     }
 189 
 190     if (base == NULL) return;
 191 
 192     // Check alignment constraints
 193     if ((((size_t)base) & (alignment - 1)) != 0) {
 194       // Base not aligned, retry
 195       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 196 
 197       // Make sure that size is aligned
 198       size = align_up(size, alignment);
 199       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 200 
 201       if (requested_address != 0 &&
 202           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 203         // As a result of the alignment constraints, the allocated base differs
 204         // from the requested address. Return back to the caller who can
 205         // take remedial action (like try again without a requested address).
 206         assert(_base == NULL, "should be");
 207         return;
 208       }
 209     }
 210   }
 211   // Done
 212   _base = base;
 213   _size = size;
 214   _alignment = alignment;
 215   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 216   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 217     _special = true;
 218   }
 219 }
 220 
 221 
 222 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 223                              bool special, bool executable) {
 224   assert((size % os::vm_allocation_granularity()) == 0,
 225          "size not allocation aligned");
 226   _base = base;
 227   _size = size;
 228   _alignment = alignment;
 229   _noaccess_prefix = 0;
 230   _special = special;
 231   _executable = executable;
 232 }
 233 
 234 
 235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 236                                         bool split, bool realloc) {
 237   assert(partition_size <= size(), "partition failed");
 238   if (split) {
 239     os::split_reserved_memory(base(), size(), partition_size, realloc);
 240   }
 241   ReservedSpace result(base(), partition_size, alignment, special(),
 242                        executable());
 243   return result;
 244 }
 245 
 246 
 247 ReservedSpace
 248 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 249   assert(partition_size <= size(), "partition failed");
 250   ReservedSpace result(base() + partition_size, size() - partition_size,
 251                        alignment, special(), executable());
 252   return result;
 253 }
 254 
 255 
 256 size_t ReservedSpace::page_align_size_up(size_t size) {
 257   return align_up(size, os::vm_page_size());
 258 }
 259 
 260 
 261 size_t ReservedSpace::page_align_size_down(size_t size) {
 262   return align_down(size, os::vm_page_size());
 263 }
 264 
 265 
 266 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 267   return align_up(size, os::vm_allocation_granularity());
 268 }
 269 
 270 
 271 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 272   return align_down(size, os::vm_allocation_granularity());
 273 }
 274 
 275 
 276 void ReservedSpace::release() {
 277   if (is_reserved()) {
 278     char *real_base = _base - _noaccess_prefix;
 279     const size_t real_size = _size + _noaccess_prefix;
 280     if (special()) {
 281       if (_fd_for_heap != -1) {
 282         os::unmap_memory(real_base, real_size);
 283       } else {
 284         os::release_memory_special(real_base, real_size);
 285       }
 286     } else{
 287       os::release_memory(real_base, real_size);
 288     }
 289     _base = NULL;
 290     _size = 0;
 291     _noaccess_prefix = 0;
 292     _alignment = 0;
 293     _special = false;
 294     _executable = false;
 295   }
 296 }
 297 
 298 static size_t noaccess_prefix_size(size_t alignment) {
 299   return lcm(os::vm_page_size(), alignment);
 300 }
 301 
 302 void ReservedHeapSpace::establish_noaccess_prefix() {
 303   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       Universe::set_narrow_oop_use_implicit_null_checks(false);
 322     }
 323   }
 324   _base += _noaccess_prefix;
 325   _size -= _noaccess_prefix;
 326   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 327 }
 328 
 329 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 330 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 331 // might still fulfill the wishes of the caller.
 332 // Assures the memory is aligned to 'alignment'.
 333 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 334 void ReservedHeapSpace::try_reserve_heap(size_t size,
 335                                          size_t alignment,
 336                                          bool large,
 337                                          char* requested_address) {
 338   if (_base != NULL) {
 339     // We tried before, but we didn't like the address delivered.
 340     release();
 341   }
 342 
 343   // If OS doesn't support demand paging for large page memory, we need
 344   // to use reserve_memory_special() to reserve and pin the entire region.
 345   // If there is a backing file directory for this space then whether
 346   // large pages are allocated is up to the filesystem of the backing file.
 347   // So we ignore the UseLargePages flag in this case.
 348   bool special = large && !os::can_commit_large_page_memory();
 349   if (special && _fd_for_heap != -1) {
 350     special = false;
 351     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 352                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 353       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 354     }
 355   }
 356   char* base = NULL;
 357 
 358   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 359                              " heap of size " SIZE_FORMAT_HEX,
 360                              p2i(requested_address),
 361                              size);
 362 
 363   if (special) {
 364     base = os::reserve_memory_special(size, alignment, requested_address, false);
 365 
 366     if (base != NULL) {
 367       // Check alignment constraints.
 368       assert((uintptr_t) base % alignment == 0,
 369              "Large pages returned a non-aligned address, base: "
 370              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 371              p2i(base), alignment);
 372       _special = true;
 373     }
 374   }
 375 
 376   if (base == NULL) {
 377     // Failed; try to reserve regular memory below
 378     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 379                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 380       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 381     }
 382 
 383     // Optimistically assume that the OSes returns an aligned base pointer.
 384     // When reserving a large address range, most OSes seem to align to at
 385     // least 64K.
 386 
 387     // If the memory was requested at a particular address, use
 388     // os::attempt_reserve_memory_at() to avoid over mapping something
 389     // important.  If available space is not detected, return NULL.
 390 
 391     if (requested_address != 0) {
 392       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 393     } else {
 394       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 395     }
 396   }
 397   if (base == NULL) { return; }
 398 
 399   // Done
 400   _base = base;
 401   _size = size;
 402   _alignment = alignment;
 403 
 404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 405   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 406     _special = true;
 407   }
 408 
 409   // Check alignment constraints
 410   if ((((size_t)base) & (alignment - 1)) != 0) {
 411     // Base not aligned, retry.
 412     release();
 413   }
 414 }
 415 
 416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 417                                           char *lowest_start,
 418                                           size_t attach_point_alignment,
 419                                           char *aligned_heap_base_min_address,
 420                                           char *upper_bound,
 421                                           size_t size,
 422                                           size_t alignment,
 423                                           bool large) {
 424   const size_t attach_range = highest_start - lowest_start;
 425   // Cap num_attempts at possible number.
 426   // At least one is possible even for 0 sized attach range.
 427   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 428   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 429 
 430   const size_t stepsize = (attach_range == 0) ? // Only one try.
 431     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 432 
 433   // Try attach points from top to bottom.
 434   char* attach_point = highest_start;
 435   while (attach_point >= lowest_start  &&
 436          attach_point <= highest_start &&  // Avoid wrap around.
 437          ((_base == NULL) ||
 438           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 439     try_reserve_heap(size, alignment, large, attach_point);
 440     attach_point -= stepsize;
 441   }
 442 }
 443 
 444 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 445 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 446 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 447 
 448 // Helper for heap allocation. Returns an array with addresses
 449 // (OS-specific) which are suited for disjoint base mode. Array is
 450 // NULL terminated.
 451 static char** get_attach_addresses_for_disjoint_mode() {
 452   static uint64_t addresses[] = {
 453      2 * SIZE_32G,
 454      3 * SIZE_32G,
 455      4 * SIZE_32G,
 456      8 * SIZE_32G,
 457     10 * SIZE_32G,
 458      1 * SIZE_64K * SIZE_32G,
 459      2 * SIZE_64K * SIZE_32G,
 460      3 * SIZE_64K * SIZE_32G,
 461      4 * SIZE_64K * SIZE_32G,
 462     16 * SIZE_64K * SIZE_32G,
 463     32 * SIZE_64K * SIZE_32G,
 464     34 * SIZE_64K * SIZE_32G,
 465     0
 466   };
 467 
 468   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 469   // the array is sorted.
 470   uint i = 0;
 471   while (addresses[i] != 0 &&
 472          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 473     i++;
 474   }
 475   uint start = i;
 476 
 477   // Avoid more steps than requested.
 478   i = 0;
 479   while (addresses[start+i] != 0) {
 480     if (i == HeapSearchSteps) {
 481       addresses[start+i] = 0;
 482       break;
 483     }
 484     i++;
 485   }
 486 
 487   return (char**) &addresses[start];
 488 }
 489 
 490 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 491   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 492             "can not allocate compressed oop heap for this size");
 493   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 494   assert(HeapBaseMinAddress > 0, "sanity");
 495 
 496   const size_t granularity = os::vm_allocation_granularity();
 497   assert((size & (granularity - 1)) == 0,
 498          "size not aligned to os::vm_allocation_granularity()");
 499   assert((alignment & (granularity - 1)) == 0,
 500          "alignment not aligned to os::vm_allocation_granularity()");
 501   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 502          "not a power of 2");
 503 
 504   // The necessary attach point alignment for generated wish addresses.
 505   // This is needed to increase the chance of attaching for mmap and shmat.
 506   const size_t os_attach_point_alignment =
 507     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 508     NOT_AIX(os::vm_allocation_granularity());
 509   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 510 
 511   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 512   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 513     noaccess_prefix_size(alignment) : 0;
 514 
 515   // Attempt to alloc at user-given address.
 516   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 517     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 518     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 519       release();
 520     }
 521   }
 522 
 523   // Keep heap at HeapBaseMinAddress.
 524   if (_base == NULL) {
 525 
 526     // Try to allocate the heap at addresses that allow efficient oop compression.
 527     // Different schemes are tried, in order of decreasing optimization potential.
 528     //
 529     // For this, try_reserve_heap() is called with the desired heap base addresses.
 530     // A call into the os layer to allocate at a given address can return memory
 531     // at a different address than requested.  Still, this might be memory at a useful
 532     // address. try_reserve_heap() always returns this allocated memory, as only here
 533     // the criteria for a good heap are checked.
 534 
 535     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 536     // Give it several tries from top of range to bottom.
 537     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 538 
 539       // Calc address range within we try to attach (range of possible start addresses).
 540       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 541       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 542       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 543                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 544     }
 545 
 546     // zerobased: Attempt to allocate in the lower 32G.
 547     // But leave room for the compressed class pointers, which is allocated above
 548     // the heap.
 549     char *zerobased_max = (char *)OopEncodingHeapMax;
 550     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 551     // For small heaps, save some space for compressed class pointer
 552     // space so it can be decoded with no base.
 553     if (UseCompressedClassPointers && !UseSharedSpaces &&
 554         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 555         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 556       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 557     }
 558 
 559     // Give it several tries from top of range to bottom.
 560     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 561         ((_base == NULL) ||                        // No previous try succeeded.
 562          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 566       // Need to be careful about size being guaranteed to be less
 567       // than UnscaledOopHeapMax due to type constraints.
 568       char *lowest_start = aligned_heap_base_min_address;
 569       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 570       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 571         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 572       }
 573       lowest_start = align_up(lowest_start, attach_point_alignment);
 574       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 575                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 576     }
 577 
 578     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 579     // implement null checks.
 580     noaccess_prefix = noaccess_prefix_size(alignment);
 581 
 582     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 583     char** addresses = get_attach_addresses_for_disjoint_mode();
 584     int i = 0;
 585     while (addresses[i] &&                                 // End of array not yet reached.
 586            ((_base == NULL) ||                             // No previous try succeeded.
 587             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 588              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 589       char* const attach_point = addresses[i];
 590       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 591       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 592       i++;
 593     }
 594 
 595     // Last, desperate try without any placement.
 596     if (_base == NULL) {
 597       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 599     }
 600   }
 601 }
 602 
 603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 604 
 605   if (size == 0) {
 606     return;
 607   }
 608 
 609   // Open AllocateOldGenAt file
 610   if (AllocateOldGenAt != NULL) {
 611     _fd_for_heap = os::create_file_for_heap(AllocateOldGenAt);
 612     if (_fd_for_heap== -1) {
 613       vm_exit_during_initialization(
 614         err_msg("Could not create file for Heap at location %s", AllocateOldGenAt));
 615     }
 616     // Allocate space on device.
 617     os::allocate_file(_fd_for_heap, MaxHeapSize);
 618     os::set_nvdimm_fd(_fd_for_heap);
 619     os::set_nvdimm_present(true);
 620   } else {
 621     _fd_for_heap = -1;
 622   }
 623 
 624   if (heap_allocation_directory != NULL) {
 625     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 626     if (_fd_for_heap == -1) {
 627       vm_exit_during_initialization(
 628         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 629     }
 630   }
 631 
 632   // Heap size should be aligned to alignment, too.
 633   guarantee(is_aligned(size, alignment), "set by caller");
 634 
 635   if (UseCompressedOops) {
 636     initialize_compressed_heap(size, alignment, large);
 637     if (_size > size) {
 638       // We allocated heap with noaccess prefix.
 639       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 640       // if we had to try at arbitrary address.
 641       establish_noaccess_prefix();
 642     }
 643   } else {
 644     initialize(size, alignment, large, NULL, false);
 645   }
 646 
 647   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 648          "area must be distinguishable from marks for mark-sweep");
 649   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 650          "area must be distinguishable from marks for mark-sweep");
 651 
 652   if (base() != NULL) {
 653     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 654     if (AllocateOldGenAt != NULL && _fd_for_heap != -1) {
 655       os::set_nvdimm_heapbase((address)_base);
 656     }
 657   }
 658 
 659   if (_fd_for_heap != -1 && AllocateOldGenAt == NULL) {
 660     os::close(_fd_for_heap);
 661   }
 662 }
 663 
 664 // Reserve space for code segment.  Same as Java heap only we mark this as
 665 // executable.
 666 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 667                                      size_t rs_align,
 668                                      bool large) :
 669   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 670   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 671 }
 672 
 673 // VirtualSpace
 674 
 675 VirtualSpace::VirtualSpace() {
 676   _low_boundary           = NULL;
 677   _high_boundary          = NULL;
 678   _low                    = NULL;
 679   _high                   = NULL;
 680   _lower_high             = NULL;
 681   _middle_high            = NULL;
 682   _upper_high             = NULL;
 683   _lower_high_boundary    = NULL;
 684   _middle_high_boundary   = NULL;
 685   _upper_high_boundary    = NULL;
 686   _lower_alignment        = 0;
 687   _middle_alignment       = 0;
 688   _upper_alignment        = 0;
 689   _special                = false;
 690   _executable             = false;
 691 }
 692 
 693 
 694 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 695   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 696   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 697 }
 698 
 699 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 700   if(!rs.is_reserved()) return false;  // allocation failed.
 701   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 702   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 703 
 704   _low_boundary  = rs.base();
 705   _high_boundary = low_boundary() + rs.size();
 706 
 707   _low = low_boundary();
 708   _high = low();
 709 
 710   _special = rs.special();
 711   _executable = rs.executable();
 712 
 713   // When a VirtualSpace begins life at a large size, make all future expansion
 714   // and shrinking occur aligned to a granularity of large pages.  This avoids
 715   // fragmentation of physical addresses that inhibits the use of large pages
 716   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 717   // page size, the only spaces that get handled this way are codecache and
 718   // the heap itself, both of which provide a substantial performance
 719   // boost in many benchmarks when covered by large pages.
 720   //
 721   // No attempt is made to force large page alignment at the very top and
 722   // bottom of the space if they are not aligned so already.
 723   _lower_alignment  = os::vm_page_size();
 724   _middle_alignment = max_commit_granularity;
 725   _upper_alignment  = os::vm_page_size();
 726 
 727   // End of each region
 728   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 729   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 730   _upper_high_boundary = high_boundary();
 731 
 732   // High address of each region
 733   _lower_high = low_boundary();
 734   _middle_high = lower_high_boundary();
 735   _upper_high = middle_high_boundary();
 736 
 737   // commit to initial size
 738   if (committed_size > 0) {
 739     if (!expand_by(committed_size)) {
 740       return false;
 741     }
 742   }
 743   return true;
 744 }
 745 
 746 
 747 VirtualSpace::~VirtualSpace() {
 748   release();
 749 }
 750 
 751 
 752 void VirtualSpace::release() {
 753   // This does not release memory it reserved.
 754   // Caller must release via rs.release();
 755   _low_boundary           = NULL;
 756   _high_boundary          = NULL;
 757   _low                    = NULL;
 758   _high                   = NULL;
 759   _lower_high             = NULL;
 760   _middle_high            = NULL;
 761   _upper_high             = NULL;
 762   _lower_high_boundary    = NULL;
 763   _middle_high_boundary   = NULL;
 764   _upper_high_boundary    = NULL;
 765   _lower_alignment        = 0;
 766   _middle_alignment       = 0;
 767   _upper_alignment        = 0;
 768   _special                = false;
 769   _executable             = false;
 770 }
 771 
 772 
 773 size_t VirtualSpace::committed_size() const {
 774   return pointer_delta(high(), low(), sizeof(char));
 775 }
 776 
 777 
 778 size_t VirtualSpace::reserved_size() const {
 779   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 780 }
 781 
 782 
 783 size_t VirtualSpace::uncommitted_size()  const {
 784   return reserved_size() - committed_size();
 785 }
 786 
 787 size_t VirtualSpace::actual_committed_size() const {
 788   // Special VirtualSpaces commit all reserved space up front.
 789   if (special()) {
 790     return reserved_size();
 791   }
 792 
 793   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 794   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 795   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 796 
 797 #ifdef ASSERT
 798   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 799   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 800   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 801 
 802   if (committed_high > 0) {
 803     assert(committed_low == lower, "Must be");
 804     assert(committed_middle == middle, "Must be");
 805   }
 806 
 807   if (committed_middle > 0) {
 808     assert(committed_low == lower, "Must be");
 809   }
 810   if (committed_middle < middle) {
 811     assert(committed_high == 0, "Must be");
 812   }
 813 
 814   if (committed_low < lower) {
 815     assert(committed_high == 0, "Must be");
 816     assert(committed_middle == 0, "Must be");
 817   }
 818 #endif
 819 
 820   return committed_low + committed_middle + committed_high;
 821 }
 822 
 823 
 824 bool VirtualSpace::contains(const void* p) const {
 825   return low() <= (const char*) p && (const char*) p < high();
 826 }
 827 
 828 static void pretouch_expanded_memory(void* start, void* end) {
 829   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 830   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 831 
 832   os::pretouch_memory(start, end);
 833 }
 834 
 835 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 836   if (os::commit_memory(start, size, alignment, executable)) {
 837     if (pre_touch || AlwaysPreTouch) {
 838       pretouch_expanded_memory(start, start + size);
 839     }
 840     return true;
 841   }
 842 
 843   debug_only(warning(
 844       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 845       " size=" SIZE_FORMAT ", executable=%d) failed",
 846       p2i(start), p2i(start + size), size, executable);)
 847 
 848   return false;
 849 }
 850 
 851 /*
 852    First we need to determine if a particular virtual space is using large
 853    pages.  This is done at the initialize function and only virtual spaces
 854    that are larger than LargePageSizeInBytes use large pages.  Once we
 855    have determined this, all expand_by and shrink_by calls must grow and
 856    shrink by large page size chunks.  If a particular request
 857    is within the current large page, the call to commit and uncommit memory
 858    can be ignored.  In the case that the low and high boundaries of this
 859    space is not large page aligned, the pages leading to the first large
 860    page address and the pages after the last large page address must be
 861    allocated with default pages.
 862 */
 863 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 864   if (uncommitted_size() < bytes) {
 865     return false;
 866   }
 867 
 868   if (special()) {
 869     // don't commit memory if the entire space is pinned in memory
 870     _high += bytes;
 871     return true;
 872   }
 873 
 874   char* previous_high = high();
 875   char* unaligned_new_high = high() + bytes;
 876   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 877 
 878   // Calculate where the new high for each of the regions should be.  If
 879   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 880   // then the unaligned lower and upper new highs would be the
 881   // lower_high() and upper_high() respectively.
 882   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 883   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 884   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 885 
 886   // Align the new highs based on the regions alignment.  lower and upper
 887   // alignment will always be default page size.  middle alignment will be
 888   // LargePageSizeInBytes if the actual size of the virtual space is in
 889   // fact larger than LargePageSizeInBytes.
 890   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 891   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 892   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 893 
 894   // Determine which regions need to grow in this expand_by call.
 895   // If you are growing in the lower region, high() must be in that
 896   // region so calculate the size based on high().  For the middle and
 897   // upper regions, determine the starting point of growth based on the
 898   // location of high().  By getting the MAX of the region's low address
 899   // (or the previous region's high address) and high(), we can tell if it
 900   // is an intra or inter region growth.
 901   size_t lower_needs = 0;
 902   if (aligned_lower_new_high > lower_high()) {
 903     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 904   }
 905   size_t middle_needs = 0;
 906   if (aligned_middle_new_high > middle_high()) {
 907     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 908   }
 909   size_t upper_needs = 0;
 910   if (aligned_upper_new_high > upper_high()) {
 911     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 912   }
 913 
 914   // Check contiguity.
 915   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 916          "high address must be contained within the region");
 917   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 918          "high address must be contained within the region");
 919   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 920          "high address must be contained within the region");
 921 
 922   // Commit regions
 923   if (lower_needs > 0) {
 924     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 925     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 926       return false;
 927     }
 928     _lower_high += lower_needs;
 929   }
 930 
 931   if (middle_needs > 0) {
 932     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 933     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 934       return false;
 935     }
 936     _middle_high += middle_needs;
 937   }
 938 
 939   if (upper_needs > 0) {
 940     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 941     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 942       return false;
 943     }
 944     _upper_high += upper_needs;
 945   }
 946 
 947   _high += bytes;
 948   return true;
 949 }
 950 
 951 // A page is uncommitted if the contents of the entire page is deemed unusable.
 952 // Continue to decrement the high() pointer until it reaches a page boundary
 953 // in which case that particular page can now be uncommitted.
 954 void VirtualSpace::shrink_by(size_t size) {
 955   if (committed_size() < size)
 956     fatal("Cannot shrink virtual space to negative size");
 957 
 958   if (special()) {
 959     // don't uncommit if the entire space is pinned in memory
 960     _high -= size;
 961     return;
 962   }
 963 
 964   char* unaligned_new_high = high() - size;
 965   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 966 
 967   // Calculate new unaligned address
 968   char* unaligned_upper_new_high =
 969     MAX2(unaligned_new_high, middle_high_boundary());
 970   char* unaligned_middle_new_high =
 971     MAX2(unaligned_new_high, lower_high_boundary());
 972   char* unaligned_lower_new_high =
 973     MAX2(unaligned_new_high, low_boundary());
 974 
 975   // Align address to region's alignment
 976   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 977   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 978   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 979 
 980   // Determine which regions need to shrink
 981   size_t upper_needs = 0;
 982   if (aligned_upper_new_high < upper_high()) {
 983     upper_needs =
 984       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 985   }
 986   size_t middle_needs = 0;
 987   if (aligned_middle_new_high < middle_high()) {
 988     middle_needs =
 989       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 990   }
 991   size_t lower_needs = 0;
 992   if (aligned_lower_new_high < lower_high()) {
 993     lower_needs =
 994       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 995   }
 996 
 997   // Check contiguity.
 998   assert(middle_high_boundary() <= upper_high() &&
 999          upper_high() <= upper_high_boundary(),
1000          "high address must be contained within the region");
1001   assert(lower_high_boundary() <= middle_high() &&
1002          middle_high() <= middle_high_boundary(),
1003          "high address must be contained within the region");
1004   assert(low_boundary() <= lower_high() &&
1005          lower_high() <= lower_high_boundary(),
1006          "high address must be contained within the region");
1007 
1008   // Uncommit
1009   if (upper_needs > 0) {
1010     assert(middle_high_boundary() <= aligned_upper_new_high &&
1011            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
1012            "must not shrink beyond region");
1013     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
1014       debug_only(warning("os::uncommit_memory failed"));
1015       return;
1016     } else {
1017       _upper_high -= upper_needs;
1018     }
1019   }
1020   if (middle_needs > 0) {
1021     assert(lower_high_boundary() <= aligned_middle_new_high &&
1022            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1023            "must not shrink beyond region");
1024     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1025       debug_only(warning("os::uncommit_memory failed"));
1026       return;
1027     } else {
1028       _middle_high -= middle_needs;
1029     }
1030   }
1031   if (lower_needs > 0) {
1032     assert(low_boundary() <= aligned_lower_new_high &&
1033            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1034            "must not shrink beyond region");
1035     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1036       debug_only(warning("os::uncommit_memory failed"));
1037       return;
1038     } else {
1039       _lower_high -= lower_needs;
1040     }
1041   }
1042 
1043   _high -= size;
1044 }
1045 
1046 #ifndef PRODUCT
1047 void VirtualSpace::check_for_contiguity() {
1048   // Check contiguity.
1049   assert(low_boundary() <= lower_high() &&
1050          lower_high() <= lower_high_boundary(),
1051          "high address must be contained within the region");
1052   assert(lower_high_boundary() <= middle_high() &&
1053          middle_high() <= middle_high_boundary(),
1054          "high address must be contained within the region");
1055   assert(middle_high_boundary() <= upper_high() &&
1056          upper_high() <= upper_high_boundary(),
1057          "high address must be contained within the region");
1058   assert(low() >= low_boundary(), "low");
1059   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1060   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1061   assert(high() <= upper_high(), "upper high");
1062 }
1063 
1064 void VirtualSpace::print_on(outputStream* out) {
1065   out->print   ("Virtual space:");
1066   if (special()) out->print(" (pinned in memory)");
1067   out->cr();
1068   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1069   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1070   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1071   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1072 }
1073 
1074 void VirtualSpace::print() {
1075   print_on(tty);
1076 }
1077 
1078 /////////////// Unit tests ///////////////
1079 
1080 #ifndef PRODUCT
1081 
1082 #define test_log(...) \
1083   do {\
1084     if (VerboseInternalVMTests) { \
1085       tty->print_cr(__VA_ARGS__); \
1086       tty->flush(); \
1087     }\
1088   } while (false)
1089 
1090 class TestReservedSpace : AllStatic {
1091  public:
1092   static void small_page_write(void* addr, size_t size) {
1093     size_t page_size = os::vm_page_size();
1094 
1095     char* end = (char*)addr + size;
1096     for (char* p = (char*)addr; p < end; p += page_size) {
1097       *p = 1;
1098     }
1099   }
1100 
1101   static void release_memory_for_test(ReservedSpace rs) {
1102     if (rs.special()) {
1103       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1104     } else {
1105       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1106     }
1107   }
1108 
1109   static void test_reserved_space1(size_t size, size_t alignment) {
1110     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1111 
1112     assert(is_aligned(size, alignment), "Incorrect input parameters");
1113 
1114     ReservedSpace rs(size,          // size
1115                      alignment,     // alignment
1116                      UseLargePages, // large
1117                      (char *)NULL); // requested_address
1118 
1119     test_log(" rs.special() == %d", rs.special());
1120 
1121     assert(rs.base() != NULL, "Must be");
1122     assert(rs.size() == size, "Must be");
1123 
1124     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1125     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1126 
1127     if (rs.special()) {
1128       small_page_write(rs.base(), size);
1129     }
1130 
1131     release_memory_for_test(rs);
1132   }
1133 
1134   static void test_reserved_space2(size_t size) {
1135     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1136 
1137     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1138 
1139     ReservedSpace rs(size);
1140 
1141     test_log(" rs.special() == %d", rs.special());
1142 
1143     assert(rs.base() != NULL, "Must be");
1144     assert(rs.size() == size, "Must be");
1145 
1146     if (rs.special()) {
1147       small_page_write(rs.base(), size);
1148     }
1149 
1150     release_memory_for_test(rs);
1151   }
1152 
1153   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1154     test_log("test_reserved_space3(%p, %p, %d)",
1155         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1156 
1157     if (size < alignment) {
1158       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1159       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1160       return;
1161     }
1162 
1163     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1164     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1165 
1166     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1167 
1168     ReservedSpace rs(size, alignment, large, false);
1169 
1170     test_log(" rs.special() == %d", rs.special());
1171 
1172     assert(rs.base() != NULL, "Must be");
1173     assert(rs.size() == size, "Must be");
1174 
1175     if (rs.special()) {
1176       small_page_write(rs.base(), size);
1177     }
1178 
1179     release_memory_for_test(rs);
1180   }
1181 
1182 
1183   static void test_reserved_space1() {
1184     size_t size = 2 * 1024 * 1024;
1185     size_t ag   = os::vm_allocation_granularity();
1186 
1187     test_reserved_space1(size,      ag);
1188     test_reserved_space1(size * 2,  ag);
1189     test_reserved_space1(size * 10, ag);
1190   }
1191 
1192   static void test_reserved_space2() {
1193     size_t size = 2 * 1024 * 1024;
1194     size_t ag = os::vm_allocation_granularity();
1195 
1196     test_reserved_space2(size * 1);
1197     test_reserved_space2(size * 2);
1198     test_reserved_space2(size * 10);
1199     test_reserved_space2(ag);
1200     test_reserved_space2(size - ag);
1201     test_reserved_space2(size);
1202     test_reserved_space2(size + ag);
1203     test_reserved_space2(size * 2);
1204     test_reserved_space2(size * 2 - ag);
1205     test_reserved_space2(size * 2 + ag);
1206     test_reserved_space2(size * 3);
1207     test_reserved_space2(size * 3 - ag);
1208     test_reserved_space2(size * 3 + ag);
1209     test_reserved_space2(size * 10);
1210     test_reserved_space2(size * 10 + size / 2);
1211   }
1212 
1213   static void test_reserved_space3() {
1214     size_t ag = os::vm_allocation_granularity();
1215 
1216     test_reserved_space3(ag,      ag    , false);
1217     test_reserved_space3(ag * 2,  ag    , false);
1218     test_reserved_space3(ag * 3,  ag    , false);
1219     test_reserved_space3(ag * 2,  ag * 2, false);
1220     test_reserved_space3(ag * 4,  ag * 2, false);
1221     test_reserved_space3(ag * 8,  ag * 2, false);
1222     test_reserved_space3(ag * 4,  ag * 4, false);
1223     test_reserved_space3(ag * 8,  ag * 4, false);
1224     test_reserved_space3(ag * 16, ag * 4, false);
1225 
1226     if (UseLargePages) {
1227       size_t lp = os::large_page_size();
1228 
1229       // Without large pages
1230       test_reserved_space3(lp,     ag * 4, false);
1231       test_reserved_space3(lp * 2, ag * 4, false);
1232       test_reserved_space3(lp * 4, ag * 4, false);
1233       test_reserved_space3(lp,     lp    , false);
1234       test_reserved_space3(lp * 2, lp    , false);
1235       test_reserved_space3(lp * 3, lp    , false);
1236       test_reserved_space3(lp * 2, lp * 2, false);
1237       test_reserved_space3(lp * 4, lp * 2, false);
1238       test_reserved_space3(lp * 8, lp * 2, false);
1239 
1240       // With large pages
1241       test_reserved_space3(lp, ag * 4    , true);
1242       test_reserved_space3(lp * 2, ag * 4, true);
1243       test_reserved_space3(lp * 4, ag * 4, true);
1244       test_reserved_space3(lp, lp        , true);
1245       test_reserved_space3(lp * 2, lp    , true);
1246       test_reserved_space3(lp * 3, lp    , true);
1247       test_reserved_space3(lp * 2, lp * 2, true);
1248       test_reserved_space3(lp * 4, lp * 2, true);
1249       test_reserved_space3(lp * 8, lp * 2, true);
1250     }
1251   }
1252 
1253   static void test_reserved_space() {
1254     test_reserved_space1();
1255     test_reserved_space2();
1256     test_reserved_space3();
1257   }
1258 };
1259 
1260 void TestReservedSpace_test() {
1261   TestReservedSpace::test_reserved_space();
1262 }
1263 
1264 #define assert_equals(actual, expected)  \
1265   assert(actual == expected,             \
1266          "Got " SIZE_FORMAT " expected " \
1267          SIZE_FORMAT, actual, expected);
1268 
1269 #define assert_ge(value1, value2)                  \
1270   assert(value1 >= value2,                         \
1271          "'" #value1 "': " SIZE_FORMAT " '"        \
1272          #value2 "': " SIZE_FORMAT, value1, value2);
1273 
1274 #define assert_lt(value1, value2)                  \
1275   assert(value1 < value2,                          \
1276          "'" #value1 "': " SIZE_FORMAT " '"        \
1277          #value2 "': " SIZE_FORMAT, value1, value2);
1278 
1279 
1280 class TestVirtualSpace : AllStatic {
1281   enum TestLargePages {
1282     Default,
1283     Disable,
1284     Reserve,
1285     Commit
1286   };
1287 
1288   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1289     switch(mode) {
1290     default:
1291     case Default:
1292     case Reserve:
1293       return ReservedSpace(reserve_size_aligned);
1294     case Disable:
1295     case Commit:
1296       return ReservedSpace(reserve_size_aligned,
1297                            os::vm_allocation_granularity(),
1298                            /* large */ false, /* exec */ false);
1299     }
1300   }
1301 
1302   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1303     switch(mode) {
1304     default:
1305     case Default:
1306     case Reserve:
1307       return vs.initialize(rs, 0);
1308     case Disable:
1309       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1310     case Commit:
1311       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1312     }
1313   }
1314 
1315  public:
1316   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1317                                                         TestLargePages mode = Default) {
1318     size_t granularity = os::vm_allocation_granularity();
1319     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1320 
1321     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1322 
1323     assert(reserved.is_reserved(), "Must be");
1324 
1325     VirtualSpace vs;
1326     bool initialized = initialize_virtual_space(vs, reserved, mode);
1327     assert(initialized, "Failed to initialize VirtualSpace");
1328 
1329     vs.expand_by(commit_size, false);
1330 
1331     if (vs.special()) {
1332       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1333     } else {
1334       assert_ge(vs.actual_committed_size(), commit_size);
1335       // Approximate the commit granularity.
1336       // Make sure that we don't commit using large pages
1337       // if large pages has been disabled for this VirtualSpace.
1338       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1339                                    os::vm_page_size() : os::large_page_size();
1340       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1341     }
1342 
1343     reserved.release();
1344   }
1345 
1346   static void test_virtual_space_actual_committed_space_one_large_page() {
1347     if (!UseLargePages) {
1348       return;
1349     }
1350 
1351     size_t large_page_size = os::large_page_size();
1352 
1353     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1354 
1355     assert(reserved.is_reserved(), "Must be");
1356 
1357     VirtualSpace vs;
1358     bool initialized = vs.initialize(reserved, 0);
1359     assert(initialized, "Failed to initialize VirtualSpace");
1360 
1361     vs.expand_by(large_page_size, false);
1362 
1363     assert_equals(vs.actual_committed_size(), large_page_size);
1364 
1365     reserved.release();
1366   }
1367 
1368   static void test_virtual_space_actual_committed_space() {
1369     test_virtual_space_actual_committed_space(4 * K, 0);
1370     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1371     test_virtual_space_actual_committed_space(8 * K, 0);
1372     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1373     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1374     test_virtual_space_actual_committed_space(12 * K, 0);
1375     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1376     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1377     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1378     test_virtual_space_actual_committed_space(64 * K, 0);
1379     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1380     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1381     test_virtual_space_actual_committed_space(2 * M, 0);
1382     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1383     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1384     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1385     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1386     test_virtual_space_actual_committed_space(10 * M, 0);
1387     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1388     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1389     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1390     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1391     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1392     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1393   }
1394 
1395   static void test_virtual_space_disable_large_pages() {
1396     if (!UseLargePages) {
1397       return;
1398     }
1399     // These test cases verify that if we force VirtualSpace to disable large pages
1400     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1401     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1402     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1403     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1404     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1405     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1406     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1407 
1408     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1409     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1410     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1411     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1412     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1413     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1414     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1415 
1416     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1417     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1418     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1419     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1420     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1421     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1422     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1423   }
1424 
1425   static void test_virtual_space() {
1426     test_virtual_space_actual_committed_space();
1427     test_virtual_space_actual_committed_space_one_large_page();
1428     test_virtual_space_disable_large_pages();
1429   }
1430 };
1431 
1432 void TestVirtualSpace_test() {
1433   TestVirtualSpace::test_virtual_space();
1434 }
1435 
1436 #endif // PRODUCT
1437 
1438 #endif