1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/compressedOops.hpp"
  30 #include "oops/markWord.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/os.inline.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/powerOfTwo.hpp"
  36 
  37 // ReservedSpace
  38 
  39 // Dummy constructor
  40 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  41     _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
  42 }
  43 
  44 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1) {
  66   initialize(size, alignment, large, requested_address, false);
  67 }
  68 
  69 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  70                              bool special, bool executable) : _fd_for_heap(-1) {
  71   assert((size % os::vm_allocation_granularity()) == 0,
  72          "size not allocation aligned");
  73   _base = base;
  74   _size = size;
  75   _alignment = alignment;
  76   _noaccess_prefix = 0;
  77   _special = special;
  78   _executable = executable;
  79 }
  80 
  81 // Helper method
  82 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  83   if (is_file_mapped) {
  84     if (!os::unmap_memory(base, size)) {
  85       fatal("os::unmap_memory failed");
  86     }
  87   } else if (!os::release_memory(base, size)) {
  88     fatal("os::release_memory failed");
  89   }
  90 }
  91 
  92 // Helper method.
  93 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  94                                            const size_t size, bool special, bool is_file_mapped = false)
  95 {
  96   if (base == requested_address || requested_address == NULL)
  97     return false; // did not fail
  98 
  99   if (base != NULL) {
 100     // Different reserve address may be acceptable in other cases
 101     // but for compressed oops heap should be at requested address.
 102     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 103     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 104     // OS ignored requested address. Try different address.
 105     if (special) {
 106       if (!os::release_memory_special(base, size)) {
 107         fatal("os::release_memory_special failed");
 108       }
 109     } else {
 110       unmap_or_release_memory(base, size, is_file_mapped);
 111     }
 112   }
 113   return true;
 114 }
 115 
 116 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 117                                char* requested_address,
 118                                bool executable) {
 119   const size_t granularity = os::vm_allocation_granularity();
 120   assert((size & (granularity - 1)) == 0,
 121          "size not aligned to os::vm_allocation_granularity()");
 122   assert((alignment & (granularity - 1)) == 0,
 123          "alignment not aligned to os::vm_allocation_granularity()");
 124   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 125          "not a power of 2");
 126 
 127   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 128 
 129   _base = NULL;
 130   _size = 0;
 131   _special = false;
 132   _executable = executable;
 133   _alignment = 0;
 134   _noaccess_prefix = 0;
 135   if (size == 0) {
 136     return;
 137   }
 138 
 139   // If OS doesn't support demand paging for large page memory, we need
 140   // to use reserve_memory_special() to reserve and pin the entire region.
 141   // If there is a backing file directory for this space then whether
 142   // large pages are allocated is up to the filesystem of the backing file.
 143   // So we ignore the UseLargePages flag in this case.
 144   bool special = large && !os::can_commit_large_page_memory();
 145   if (special && _fd_for_heap != -1) {
 146     special = false;
 147     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 148       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 149       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 150     }
 151   }
 152 
 153   char* base = NULL;
 154 
 155   if (special) {
 156 
 157     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 158 
 159     if (base != NULL) {
 160       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 161         // OS ignored requested address. Try different address.
 162         return;
 163       }
 164       // Check alignment constraints.
 165       assert((uintptr_t) base % alignment == 0,
 166              "Large pages returned a non-aligned address, base: "
 167              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 168              p2i(base), alignment);
 169       _special = true;
 170     } else {
 171       // failed; try to reserve regular memory below
 172       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 173                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 174         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 175       }
 176     }
 177   }
 178 
 179   if (base == NULL) {
 180     // Optimistically assume that the OSes returns an aligned base pointer.
 181     // When reserving a large address range, most OSes seem to align to at
 182     // least 64K.
 183 
 184     // If the memory was requested at a particular address, use
 185     // os::attempt_reserve_memory_at() to avoid over mapping something
 186     // important.  If available space is not detected, return NULL.
 187 
 188     if (requested_address != 0) {
 189       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 190       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 191         // OS ignored requested address. Try different address.
 192         base = NULL;
 193       }
 194     } else {
 195       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 196     }
 197 
 198     if (base == NULL) return;
 199 
 200     // Check alignment constraints
 201     if ((((size_t)base) & (alignment - 1)) != 0) {
 202       // Base not aligned, retry
 203       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 204 
 205       // Make sure that size is aligned
 206       size = align_up(size, alignment);
 207       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 208 
 209       if (requested_address != 0 &&
 210           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 211         // As a result of the alignment constraints, the allocated base differs
 212         // from the requested address. Return back to the caller who can
 213         // take remedial action (like try again without a requested address).
 214         assert(_base == NULL, "should be");
 215         return;
 216       }
 217     }
 218   }
 219   // Done
 220   _base = base;
 221   _size = size;
 222   _alignment = alignment;
 223   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 224   if (_fd_for_heap != -1) {
 225     _special = true;
 226   }
 227 }
 228 
 229 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, bool split) {
 230   assert(partition_size <= size(), "partition failed");
 231   if (split && partition_size > 0 && partition_size < size()) {
 232     os::split_reserved_memory(base(), size(), partition_size);
 233   }
 234   ReservedSpace result(base(), partition_size, alignment, special(),
 235                        executable());
 236   return result;
 237 }
 238 
 239 
 240 ReservedSpace
 241 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 242   assert(partition_size <= size(), "partition failed");
 243   ReservedSpace result(base() + partition_size, size() - partition_size,
 244                        alignment, special(), executable());
 245   return result;
 246 }
 247 
 248 
 249 size_t ReservedSpace::page_align_size_up(size_t size) {
 250   return align_up(size, os::vm_page_size());
 251 }
 252 
 253 
 254 size_t ReservedSpace::page_align_size_down(size_t size) {
 255   return align_down(size, os::vm_page_size());
 256 }
 257 
 258 
 259 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 260   return align_up(size, os::vm_allocation_granularity());
 261 }
 262 
 263 
 264 void ReservedSpace::release() {
 265   if (is_reserved()) {
 266     char *real_base = _base - _noaccess_prefix;
 267     const size_t real_size = _size + _noaccess_prefix;
 268     if (special()) {
 269       if (_fd_for_heap != -1) {
 270         os::unmap_memory(real_base, real_size);
 271       } else {
 272         os::release_memory_special(real_base, real_size);
 273       }
 274     } else{
 275       os::release_memory(real_base, real_size);
 276     }
 277     _base = NULL;
 278     _size = 0;
 279     _noaccess_prefix = 0;
 280     _alignment = 0;
 281     _special = false;
 282     _executable = false;
 283   }
 284 }
 285 
 286 static size_t noaccess_prefix_size(size_t alignment) {
 287   return lcm(os::vm_page_size(), alignment);
 288 }
 289 
 290 void ReservedHeapSpace::establish_noaccess_prefix() {
 291   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 292   _noaccess_prefix = noaccess_prefix_size(_alignment);
 293 
 294   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 295     if (true
 296         WIN64_ONLY(&& !UseLargePages)
 297         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 298       // Protect memory at the base of the allocated region.
 299       // If special, the page was committed (only matters on windows)
 300       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 301         fatal("cannot protect protection page");
 302       }
 303       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 304                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 305                                  p2i(_base),
 306                                  _noaccess_prefix);
 307       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
 308     } else {
 309       CompressedOops::set_use_implicit_null_checks(false);
 310     }
 311   }
 312 
 313   _base += _noaccess_prefix;
 314   _size -= _noaccess_prefix;
 315   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 316 }
 317 
 318 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 319 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 320 // might still fulfill the wishes of the caller.
 321 // Assures the memory is aligned to 'alignment'.
 322 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 323 void ReservedHeapSpace::try_reserve_heap(size_t size,
 324                                          size_t alignment,
 325                                          bool large,
 326                                          char* requested_address) {
 327   if (_base != NULL) {
 328     // We tried before, but we didn't like the address delivered.
 329     release();
 330   }
 331 
 332   // If OS doesn't support demand paging for large page memory, we need
 333   // to use reserve_memory_special() to reserve and pin the entire region.
 334   // If there is a backing file directory for this space then whether
 335   // large pages are allocated is up to the filesystem of the backing file.
 336   // So we ignore the UseLargePages flag in this case.
 337   bool special = large && !os::can_commit_large_page_memory();
 338   if (special && _fd_for_heap != -1) {
 339     special = false;
 340     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 341                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 342       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 343     }
 344   }
 345   char* base = NULL;
 346 
 347   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 348                              " heap of size " SIZE_FORMAT_HEX,
 349                              p2i(requested_address),
 350                              size);
 351 
 352   if (special) {
 353     base = os::reserve_memory_special(size, alignment, requested_address, false);
 354 
 355     if (base != NULL) {
 356       // Check alignment constraints.
 357       assert((uintptr_t) base % alignment == 0,
 358              "Large pages returned a non-aligned address, base: "
 359              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 360              p2i(base), alignment);
 361       _special = true;
 362     }
 363   }
 364 
 365   if (base == NULL) {
 366     // Failed; try to reserve regular memory below
 367     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 368                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 369       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 370     }
 371 
 372     // Optimistically assume that the OSes returns an aligned base pointer.
 373     // When reserving a large address range, most OSes seem to align to at
 374     // least 64K.
 375 
 376     // If the memory was requested at a particular address, use
 377     // os::attempt_reserve_memory_at() to avoid over mapping something
 378     // important.  If available space is not detected, return NULL.
 379 
 380     if (requested_address != 0) {
 381       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 382     } else {
 383       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 384     }
 385   }
 386   if (base == NULL) { return; }
 387 
 388   // Done
 389   _base = base;
 390   _size = size;
 391   _alignment = alignment;
 392 
 393   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 394   if (_fd_for_heap != -1) {
 395     _special = true;
 396   }
 397 
 398   // Check alignment constraints
 399   if ((((size_t)base) & (alignment - 1)) != 0) {
 400     // Base not aligned, retry.
 401     release();
 402   }
 403 }
 404 
 405 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 406                                           char *lowest_start,
 407                                           size_t attach_point_alignment,
 408                                           char *aligned_heap_base_min_address,
 409                                           char *upper_bound,
 410                                           size_t size,
 411                                           size_t alignment,
 412                                           bool large) {
 413   const size_t attach_range = highest_start - lowest_start;
 414   // Cap num_attempts at possible number.
 415   // At least one is possible even for 0 sized attach range.
 416   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 417   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 418 
 419   const size_t stepsize = (attach_range == 0) ? // Only one try.
 420     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 421 
 422   // Try attach points from top to bottom.
 423   char* attach_point = highest_start;
 424   while (attach_point >= lowest_start  &&
 425          attach_point <= highest_start &&  // Avoid wrap around.
 426          ((_base == NULL) ||
 427           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 428     try_reserve_heap(size, alignment, large, attach_point);
 429     attach_point -= stepsize;
 430   }
 431 }
 432 
 433 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 434 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 435 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 436 
 437 // Helper for heap allocation. Returns an array with addresses
 438 // (OS-specific) which are suited for disjoint base mode. Array is
 439 // NULL terminated.
 440 static char** get_attach_addresses_for_disjoint_mode() {
 441   static uint64_t addresses[] = {
 442      2 * SIZE_32G,
 443      3 * SIZE_32G,
 444      4 * SIZE_32G,
 445      8 * SIZE_32G,
 446     10 * SIZE_32G,
 447      1 * SIZE_64K * SIZE_32G,
 448      2 * SIZE_64K * SIZE_32G,
 449      3 * SIZE_64K * SIZE_32G,
 450      4 * SIZE_64K * SIZE_32G,
 451     16 * SIZE_64K * SIZE_32G,
 452     32 * SIZE_64K * SIZE_32G,
 453     34 * SIZE_64K * SIZE_32G,
 454     0
 455   };
 456 
 457   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 458   // the array is sorted.
 459   uint i = 0;
 460   while (addresses[i] != 0 &&
 461          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 462     i++;
 463   }
 464   uint start = i;
 465 
 466   // Avoid more steps than requested.
 467   i = 0;
 468   while (addresses[start+i] != 0) {
 469     if (i == HeapSearchSteps) {
 470       addresses[start+i] = 0;
 471       break;
 472     }
 473     i++;
 474   }
 475 
 476   return (char**) &addresses[start];
 477 }
 478 
 479 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 480   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 481             "can not allocate compressed oop heap for this size");
 482   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 483 
 484   const size_t granularity = os::vm_allocation_granularity();
 485   assert((size & (granularity - 1)) == 0,
 486          "size not aligned to os::vm_allocation_granularity()");
 487   assert((alignment & (granularity - 1)) == 0,
 488          "alignment not aligned to os::vm_allocation_granularity()");
 489   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 490          "not a power of 2");
 491 
 492   // The necessary attach point alignment for generated wish addresses.
 493   // This is needed to increase the chance of attaching for mmap and shmat.
 494   const size_t os_attach_point_alignment =
 495     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 496     NOT_AIX(os::vm_allocation_granularity());
 497   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 498 
 499   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 500   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 501     noaccess_prefix_size(alignment) : 0;
 502 
 503   // Attempt to alloc at user-given address.
 504   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 505     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 506     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 507       release();
 508     }
 509   }
 510 
 511   // Keep heap at HeapBaseMinAddress.
 512   if (_base == NULL) {
 513 
 514     // Try to allocate the heap at addresses that allow efficient oop compression.
 515     // Different schemes are tried, in order of decreasing optimization potential.
 516     //
 517     // For this, try_reserve_heap() is called with the desired heap base addresses.
 518     // A call into the os layer to allocate at a given address can return memory
 519     // at a different address than requested.  Still, this might be memory at a useful
 520     // address. try_reserve_heap() always returns this allocated memory, as only here
 521     // the criteria for a good heap are checked.
 522 
 523     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 524     // Give it several tries from top of range to bottom.
 525     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 526 
 527       // Calc address range within we try to attach (range of possible start addresses).
 528       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 529       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 530       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 531                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 532     }
 533 
 534     // zerobased: Attempt to allocate in the lower 32G.
 535     // But leave room for the compressed class pointers, which is allocated above
 536     // the heap.
 537     char *zerobased_max = (char *)OopEncodingHeapMax;
 538     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 539     // For small heaps, save some space for compressed class pointer
 540     // space so it can be decoded with no base.
 541     if (UseCompressedClassPointers && !UseSharedSpaces &&
 542         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 543         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 544       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 545     }
 546 
 547     // Give it several tries from top of range to bottom.
 548     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 549         ((_base == NULL) ||                        // No previous try succeeded.
 550          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 551 
 552       // Calc address range within we try to attach (range of possible start addresses).
 553       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 554       // Need to be careful about size being guaranteed to be less
 555       // than UnscaledOopHeapMax due to type constraints.
 556       char *lowest_start = aligned_heap_base_min_address;
 557       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 558       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 559         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 560       }
 561       lowest_start = align_up(lowest_start, attach_point_alignment);
 562       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 563                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 564     }
 565 
 566     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 567     // implement null checks.
 568     noaccess_prefix = noaccess_prefix_size(alignment);
 569 
 570     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 571     char** addresses = get_attach_addresses_for_disjoint_mode();
 572     int i = 0;
 573     while (addresses[i] &&                                 // End of array not yet reached.
 574            ((_base == NULL) ||                             // No previous try succeeded.
 575             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 576              !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 577       char* const attach_point = addresses[i];
 578       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 579       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 580       i++;
 581     }
 582 
 583     // Last, desperate try without any placement.
 584     if (_base == NULL) {
 585       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 586       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 587     }
 588   }
 589 }
 590 
 591 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 592 
 593   if (size == 0) {
 594     return;
 595   }
 596 
 597   if (heap_allocation_directory != NULL) {
 598     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 599     if (_fd_for_heap == -1) {
 600       vm_exit_during_initialization(
 601         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 602     }
 603   }
 604 
 605   // Heap size should be aligned to alignment, too.
 606   guarantee(is_aligned(size, alignment), "set by caller");
 607 
 608   if (UseCompressedOops) {
 609     initialize_compressed_heap(size, alignment, large);
 610     if (_size > size) {
 611       // We allocated heap with noaccess prefix.
 612       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 613       // if we had to try at arbitrary address.
 614       establish_noaccess_prefix();
 615     }
 616   } else {
 617     initialize(size, alignment, large, NULL, false);
 618   }
 619 
 620   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 621          "area must be distinguishable from marks for mark-sweep");
 622   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 623          "area must be distinguishable from marks for mark-sweep");
 624 
 625   if (base() != NULL) {
 626     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 627   }
 628 
 629   if (_fd_for_heap != -1) {
 630     os::close(_fd_for_heap);
 631   }
 632 }
 633 
 634 MemRegion ReservedHeapSpace::region() const {
 635   return MemRegion((HeapWord*)base(), (HeapWord*)end());
 636 }
 637 
 638 // Reserve space for code segment.  Same as Java heap only we mark this as
 639 // executable.
 640 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 641                                      size_t rs_align,
 642                                      bool large) : ReservedSpace() {
 643   initialize(r_size, rs_align, large, /*requested address*/ NULL, /*executable*/ true);
 644   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 645 }
 646 
 647 // VirtualSpace
 648 
 649 VirtualSpace::VirtualSpace() {
 650   _low_boundary           = NULL;
 651   _high_boundary          = NULL;
 652   _low                    = NULL;
 653   _high                   = NULL;
 654   _lower_high             = NULL;
 655   _middle_high            = NULL;
 656   _upper_high             = NULL;
 657   _lower_high_boundary    = NULL;
 658   _middle_high_boundary   = NULL;
 659   _upper_high_boundary    = NULL;
 660   _lower_alignment        = 0;
 661   _middle_alignment       = 0;
 662   _upper_alignment        = 0;
 663   _special                = false;
 664   _executable             = false;
 665 }
 666 
 667 
 668 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 669   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 670   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 671 }
 672 
 673 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 674   if(!rs.is_reserved()) return false;  // allocation failed.
 675   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 676   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 677 
 678   _low_boundary  = rs.base();
 679   _high_boundary = low_boundary() + rs.size();
 680 
 681   _low = low_boundary();
 682   _high = low();
 683 
 684   _special = rs.special();
 685   _executable = rs.executable();
 686 
 687   // When a VirtualSpace begins life at a large size, make all future expansion
 688   // and shrinking occur aligned to a granularity of large pages.  This avoids
 689   // fragmentation of physical addresses that inhibits the use of large pages
 690   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 691   // page size, the only spaces that get handled this way are codecache and
 692   // the heap itself, both of which provide a substantial performance
 693   // boost in many benchmarks when covered by large pages.
 694   //
 695   // No attempt is made to force large page alignment at the very top and
 696   // bottom of the space if they are not aligned so already.
 697   _lower_alignment  = os::vm_page_size();
 698   _middle_alignment = max_commit_granularity;
 699   _upper_alignment  = os::vm_page_size();
 700 
 701   // End of each region
 702   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 703   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 704   _upper_high_boundary = high_boundary();
 705 
 706   // High address of each region
 707   _lower_high = low_boundary();
 708   _middle_high = lower_high_boundary();
 709   _upper_high = middle_high_boundary();
 710 
 711   // commit to initial size
 712   if (committed_size > 0) {
 713     if (!expand_by(committed_size)) {
 714       return false;
 715     }
 716   }
 717   return true;
 718 }
 719 
 720 
 721 VirtualSpace::~VirtualSpace() {
 722   release();
 723 }
 724 
 725 
 726 void VirtualSpace::release() {
 727   // This does not release memory it reserved.
 728   // Caller must release via rs.release();
 729   _low_boundary           = NULL;
 730   _high_boundary          = NULL;
 731   _low                    = NULL;
 732   _high                   = NULL;
 733   _lower_high             = NULL;
 734   _middle_high            = NULL;
 735   _upper_high             = NULL;
 736   _lower_high_boundary    = NULL;
 737   _middle_high_boundary   = NULL;
 738   _upper_high_boundary    = NULL;
 739   _lower_alignment        = 0;
 740   _middle_alignment       = 0;
 741   _upper_alignment        = 0;
 742   _special                = false;
 743   _executable             = false;
 744 }
 745 
 746 
 747 size_t VirtualSpace::committed_size() const {
 748   return pointer_delta(high(), low(), sizeof(char));
 749 }
 750 
 751 
 752 size_t VirtualSpace::reserved_size() const {
 753   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 754 }
 755 
 756 
 757 size_t VirtualSpace::uncommitted_size()  const {
 758   return reserved_size() - committed_size();
 759 }
 760 
 761 size_t VirtualSpace::actual_committed_size() const {
 762   // Special VirtualSpaces commit all reserved space up front.
 763   if (special()) {
 764     return reserved_size();
 765   }
 766 
 767   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 768   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 769   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 770 
 771 #ifdef ASSERT
 772   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 773   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 774   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 775 
 776   if (committed_high > 0) {
 777     assert(committed_low == lower, "Must be");
 778     assert(committed_middle == middle, "Must be");
 779   }
 780 
 781   if (committed_middle > 0) {
 782     assert(committed_low == lower, "Must be");
 783   }
 784   if (committed_middle < middle) {
 785     assert(committed_high == 0, "Must be");
 786   }
 787 
 788   if (committed_low < lower) {
 789     assert(committed_high == 0, "Must be");
 790     assert(committed_middle == 0, "Must be");
 791   }
 792 #endif
 793 
 794   return committed_low + committed_middle + committed_high;
 795 }
 796 
 797 
 798 bool VirtualSpace::contains(const void* p) const {
 799   return low() <= (const char*) p && (const char*) p < high();
 800 }
 801 
 802 static void pretouch_expanded_memory(void* start, void* end) {
 803   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 804   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 805 
 806   os::pretouch_memory(start, end);
 807 }
 808 
 809 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 810   if (os::commit_memory(start, size, alignment, executable)) {
 811     if (pre_touch || AlwaysPreTouch) {
 812       pretouch_expanded_memory(start, start + size);
 813     }
 814     return true;
 815   }
 816 
 817   debug_only(warning(
 818       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 819       " size=" SIZE_FORMAT ", executable=%d) failed",
 820       p2i(start), p2i(start + size), size, executable);)
 821 
 822   return false;
 823 }
 824 
 825 /*
 826    First we need to determine if a particular virtual space is using large
 827    pages.  This is done at the initialize function and only virtual spaces
 828    that are larger than LargePageSizeInBytes use large pages.  Once we
 829    have determined this, all expand_by and shrink_by calls must grow and
 830    shrink by large page size chunks.  If a particular request
 831    is within the current large page, the call to commit and uncommit memory
 832    can be ignored.  In the case that the low and high boundaries of this
 833    space is not large page aligned, the pages leading to the first large
 834    page address and the pages after the last large page address must be
 835    allocated with default pages.
 836 */
 837 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 838   if (uncommitted_size() < bytes) {
 839     return false;
 840   }
 841 
 842   if (special()) {
 843     // don't commit memory if the entire space is pinned in memory
 844     _high += bytes;
 845     return true;
 846   }
 847 
 848   char* previous_high = high();
 849   char* unaligned_new_high = high() + bytes;
 850   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 851 
 852   // Calculate where the new high for each of the regions should be.  If
 853   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 854   // then the unaligned lower and upper new highs would be the
 855   // lower_high() and upper_high() respectively.
 856   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 857   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 858   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 859 
 860   // Align the new highs based on the regions alignment.  lower and upper
 861   // alignment will always be default page size.  middle alignment will be
 862   // LargePageSizeInBytes if the actual size of the virtual space is in
 863   // fact larger than LargePageSizeInBytes.
 864   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 865   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 866   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 867 
 868   // Determine which regions need to grow in this expand_by call.
 869   // If you are growing in the lower region, high() must be in that
 870   // region so calculate the size based on high().  For the middle and
 871   // upper regions, determine the starting point of growth based on the
 872   // location of high().  By getting the MAX of the region's low address
 873   // (or the previous region's high address) and high(), we can tell if it
 874   // is an intra or inter region growth.
 875   size_t lower_needs = 0;
 876   if (aligned_lower_new_high > lower_high()) {
 877     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 878   }
 879   size_t middle_needs = 0;
 880   if (aligned_middle_new_high > middle_high()) {
 881     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 882   }
 883   size_t upper_needs = 0;
 884   if (aligned_upper_new_high > upper_high()) {
 885     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 886   }
 887 
 888   // Check contiguity.
 889   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 890          "high address must be contained within the region");
 891   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 892          "high address must be contained within the region");
 893   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 894          "high address must be contained within the region");
 895 
 896   // Commit regions
 897   if (lower_needs > 0) {
 898     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 899     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 900       return false;
 901     }
 902     _lower_high += lower_needs;
 903   }
 904 
 905   if (middle_needs > 0) {
 906     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 907     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 908       return false;
 909     }
 910     _middle_high += middle_needs;
 911   }
 912 
 913   if (upper_needs > 0) {
 914     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 915     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 916       return false;
 917     }
 918     _upper_high += upper_needs;
 919   }
 920 
 921   _high += bytes;
 922   return true;
 923 }
 924 
 925 // A page is uncommitted if the contents of the entire page is deemed unusable.
 926 // Continue to decrement the high() pointer until it reaches a page boundary
 927 // in which case that particular page can now be uncommitted.
 928 void VirtualSpace::shrink_by(size_t size) {
 929   if (committed_size() < size)
 930     fatal("Cannot shrink virtual space to negative size");
 931 
 932   if (special()) {
 933     // don't uncommit if the entire space is pinned in memory
 934     _high -= size;
 935     return;
 936   }
 937 
 938   char* unaligned_new_high = high() - size;
 939   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 940 
 941   // Calculate new unaligned address
 942   char* unaligned_upper_new_high =
 943     MAX2(unaligned_new_high, middle_high_boundary());
 944   char* unaligned_middle_new_high =
 945     MAX2(unaligned_new_high, lower_high_boundary());
 946   char* unaligned_lower_new_high =
 947     MAX2(unaligned_new_high, low_boundary());
 948 
 949   // Align address to region's alignment
 950   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 951   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 952   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 953 
 954   // Determine which regions need to shrink
 955   size_t upper_needs = 0;
 956   if (aligned_upper_new_high < upper_high()) {
 957     upper_needs =
 958       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 959   }
 960   size_t middle_needs = 0;
 961   if (aligned_middle_new_high < middle_high()) {
 962     middle_needs =
 963       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 964   }
 965   size_t lower_needs = 0;
 966   if (aligned_lower_new_high < lower_high()) {
 967     lower_needs =
 968       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 969   }
 970 
 971   // Check contiguity.
 972   assert(middle_high_boundary() <= upper_high() &&
 973          upper_high() <= upper_high_boundary(),
 974          "high address must be contained within the region");
 975   assert(lower_high_boundary() <= middle_high() &&
 976          middle_high() <= middle_high_boundary(),
 977          "high address must be contained within the region");
 978   assert(low_boundary() <= lower_high() &&
 979          lower_high() <= lower_high_boundary(),
 980          "high address must be contained within the region");
 981 
 982   // Uncommit
 983   if (upper_needs > 0) {
 984     assert(middle_high_boundary() <= aligned_upper_new_high &&
 985            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 986            "must not shrink beyond region");
 987     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 988       debug_only(warning("os::uncommit_memory failed"));
 989       return;
 990     } else {
 991       _upper_high -= upper_needs;
 992     }
 993   }
 994   if (middle_needs > 0) {
 995     assert(lower_high_boundary() <= aligned_middle_new_high &&
 996            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 997            "must not shrink beyond region");
 998     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 999       debug_only(warning("os::uncommit_memory failed"));
1000       return;
1001     } else {
1002       _middle_high -= middle_needs;
1003     }
1004   }
1005   if (lower_needs > 0) {
1006     assert(low_boundary() <= aligned_lower_new_high &&
1007            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1008            "must not shrink beyond region");
1009     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1010       debug_only(warning("os::uncommit_memory failed"));
1011       return;
1012     } else {
1013       _lower_high -= lower_needs;
1014     }
1015   }
1016 
1017   _high -= size;
1018 }
1019 
1020 #ifndef PRODUCT
1021 void VirtualSpace::check_for_contiguity() {
1022   // Check contiguity.
1023   assert(low_boundary() <= lower_high() &&
1024          lower_high() <= lower_high_boundary(),
1025          "high address must be contained within the region");
1026   assert(lower_high_boundary() <= middle_high() &&
1027          middle_high() <= middle_high_boundary(),
1028          "high address must be contained within the region");
1029   assert(middle_high_boundary() <= upper_high() &&
1030          upper_high() <= upper_high_boundary(),
1031          "high address must be contained within the region");
1032   assert(low() >= low_boundary(), "low");
1033   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1034   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1035   assert(high() <= upper_high(), "upper high");
1036 }
1037 
1038 void VirtualSpace::print_on(outputStream* out) {
1039   out->print   ("Virtual space:");
1040   if (special()) out->print(" (pinned in memory)");
1041   out->cr();
1042   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1043   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1044   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1045   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1046 }
1047 
1048 void VirtualSpace::print() {
1049   print_on(tty);
1050 }
1051 
1052 /////////////// Unit tests ///////////////
1053 
1054 #ifndef PRODUCT
1055 
1056 class TestReservedSpace : AllStatic {
1057  public:
1058   static void small_page_write(void* addr, size_t size) {
1059     size_t page_size = os::vm_page_size();
1060 
1061     char* end = (char*)addr + size;
1062     for (char* p = (char*)addr; p < end; p += page_size) {
1063       *p = 1;
1064     }
1065   }
1066 
1067   static void release_memory_for_test(ReservedSpace rs) {
1068     if (rs.special()) {
1069       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1070     } else {
1071       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1072     }
1073   }
1074 
1075   static void test_reserved_space1(size_t size, size_t alignment) {
1076     assert(is_aligned(size, alignment), "Incorrect input parameters");
1077 
1078     ReservedSpace rs(size,          // size
1079                      alignment,     // alignment
1080                      UseLargePages, // large
1081                      (char *)NULL); // requested_address
1082 
1083     assert(rs.base() != NULL, "Must be");
1084     assert(rs.size() == size, "Must be");
1085 
1086     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1087     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1088 
1089     if (rs.special()) {
1090       small_page_write(rs.base(), size);
1091     }
1092 
1093     release_memory_for_test(rs);
1094   }
1095 
1096   static void test_reserved_space2(size_t size) {
1097     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1098 
1099     ReservedSpace rs(size);
1100 
1101     assert(rs.base() != NULL, "Must be");
1102     assert(rs.size() == size, "Must be");
1103 
1104     if (rs.special()) {
1105       small_page_write(rs.base(), size);
1106     }
1107 
1108     release_memory_for_test(rs);
1109   }
1110 
1111   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1112     if (size < alignment) {
1113       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1114       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1115       return;
1116     }
1117 
1118     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1119     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1120 
1121     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1122 
1123     ReservedSpace rs(size, alignment, large);
1124 
1125     assert(rs.base() != NULL, "Must be");
1126     assert(rs.size() == size, "Must be");
1127 
1128     if (rs.special()) {
1129       small_page_write(rs.base(), size);
1130     }
1131 
1132     release_memory_for_test(rs);
1133   }
1134 
1135 
1136   static void test_reserved_space1() {
1137     size_t size = 2 * 1024 * 1024;
1138     size_t ag   = os::vm_allocation_granularity();
1139 
1140     test_reserved_space1(size,      ag);
1141     test_reserved_space1(size * 2,  ag);
1142     test_reserved_space1(size * 10, ag);
1143   }
1144 
1145   static void test_reserved_space2() {
1146     size_t size = 2 * 1024 * 1024;
1147     size_t ag = os::vm_allocation_granularity();
1148 
1149     test_reserved_space2(size * 1);
1150     test_reserved_space2(size * 2);
1151     test_reserved_space2(size * 10);
1152     test_reserved_space2(ag);
1153     test_reserved_space2(size - ag);
1154     test_reserved_space2(size);
1155     test_reserved_space2(size + ag);
1156     test_reserved_space2(size * 2);
1157     test_reserved_space2(size * 2 - ag);
1158     test_reserved_space2(size * 2 + ag);
1159     test_reserved_space2(size * 3);
1160     test_reserved_space2(size * 3 - ag);
1161     test_reserved_space2(size * 3 + ag);
1162     test_reserved_space2(size * 10);
1163     test_reserved_space2(size * 10 + size / 2);
1164   }
1165 
1166   static void test_reserved_space3() {
1167     size_t ag = os::vm_allocation_granularity();
1168 
1169     test_reserved_space3(ag,      ag    , false);
1170     test_reserved_space3(ag * 2,  ag    , false);
1171     test_reserved_space3(ag * 3,  ag    , false);
1172     test_reserved_space3(ag * 2,  ag * 2, false);
1173     test_reserved_space3(ag * 4,  ag * 2, false);
1174     test_reserved_space3(ag * 8,  ag * 2, false);
1175     test_reserved_space3(ag * 4,  ag * 4, false);
1176     test_reserved_space3(ag * 8,  ag * 4, false);
1177     test_reserved_space3(ag * 16, ag * 4, false);
1178 
1179     if (UseLargePages) {
1180       size_t lp = os::large_page_size();
1181 
1182       // Without large pages
1183       test_reserved_space3(lp,     ag * 4, false);
1184       test_reserved_space3(lp * 2, ag * 4, false);
1185       test_reserved_space3(lp * 4, ag * 4, false);
1186       test_reserved_space3(lp,     lp    , false);
1187       test_reserved_space3(lp * 2, lp    , false);
1188       test_reserved_space3(lp * 3, lp    , false);
1189       test_reserved_space3(lp * 2, lp * 2, false);
1190       test_reserved_space3(lp * 4, lp * 2, false);
1191       test_reserved_space3(lp * 8, lp * 2, false);
1192 
1193       // With large pages
1194       test_reserved_space3(lp, ag * 4    , true);
1195       test_reserved_space3(lp * 2, ag * 4, true);
1196       test_reserved_space3(lp * 4, ag * 4, true);
1197       test_reserved_space3(lp, lp        , true);
1198       test_reserved_space3(lp * 2, lp    , true);
1199       test_reserved_space3(lp * 3, lp    , true);
1200       test_reserved_space3(lp * 2, lp * 2, true);
1201       test_reserved_space3(lp * 4, lp * 2, true);
1202       test_reserved_space3(lp * 8, lp * 2, true);
1203     }
1204   }
1205 
1206   static void test_reserved_space() {
1207     test_reserved_space1();
1208     test_reserved_space2();
1209     test_reserved_space3();
1210   }
1211 };
1212 
1213 void TestReservedSpace_test() {
1214   TestReservedSpace::test_reserved_space();
1215 }
1216 
1217 #define assert_equals(actual, expected)  \
1218   assert(actual == expected,             \
1219          "Got " SIZE_FORMAT " expected " \
1220          SIZE_FORMAT, actual, expected);
1221 
1222 #define assert_ge(value1, value2)                  \
1223   assert(value1 >= value2,                         \
1224          "'" #value1 "': " SIZE_FORMAT " '"        \
1225          #value2 "': " SIZE_FORMAT, value1, value2);
1226 
1227 #define assert_lt(value1, value2)                  \
1228   assert(value1 < value2,                          \
1229          "'" #value1 "': " SIZE_FORMAT " '"        \
1230          #value2 "': " SIZE_FORMAT, value1, value2);
1231 
1232 
1233 class TestVirtualSpace : AllStatic {
1234   enum TestLargePages {
1235     Default,
1236     Disable,
1237     Reserve,
1238     Commit
1239   };
1240 
1241   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1242     switch(mode) {
1243     default:
1244     case Default:
1245     case Reserve:
1246       return ReservedSpace(reserve_size_aligned);
1247     case Disable:
1248     case Commit:
1249       return ReservedSpace(reserve_size_aligned,
1250                            os::vm_allocation_granularity(),
1251                            /* large */ false);
1252     }
1253   }
1254 
1255   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1256     switch(mode) {
1257     default:
1258     case Default:
1259     case Reserve:
1260       return vs.initialize(rs, 0);
1261     case Disable:
1262       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1263     case Commit:
1264       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1265     }
1266   }
1267 
1268  public:
1269   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1270                                                         TestLargePages mode = Default) {
1271     size_t granularity = os::vm_allocation_granularity();
1272     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1273 
1274     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1275 
1276     assert(reserved.is_reserved(), "Must be");
1277 
1278     VirtualSpace vs;
1279     bool initialized = initialize_virtual_space(vs, reserved, mode);
1280     assert(initialized, "Failed to initialize VirtualSpace");
1281 
1282     vs.expand_by(commit_size, false);
1283 
1284     if (vs.special()) {
1285       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1286     } else {
1287       assert_ge(vs.actual_committed_size(), commit_size);
1288       // Approximate the commit granularity.
1289       // Make sure that we don't commit using large pages
1290       // if large pages has been disabled for this VirtualSpace.
1291       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1292                                    os::vm_page_size() : os::large_page_size();
1293       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1294     }
1295 
1296     reserved.release();
1297   }
1298 
1299   static void test_virtual_space_actual_committed_space_one_large_page() {
1300     if (!UseLargePages) {
1301       return;
1302     }
1303 
1304     size_t large_page_size = os::large_page_size();
1305 
1306     ReservedSpace reserved(large_page_size, large_page_size, true);
1307 
1308     assert(reserved.is_reserved(), "Must be");
1309 
1310     VirtualSpace vs;
1311     bool initialized = vs.initialize(reserved, 0);
1312     assert(initialized, "Failed to initialize VirtualSpace");
1313 
1314     vs.expand_by(large_page_size, false);
1315 
1316     assert_equals(vs.actual_committed_size(), large_page_size);
1317 
1318     reserved.release();
1319   }
1320 
1321   static void test_virtual_space_actual_committed_space() {
1322     test_virtual_space_actual_committed_space(4 * K, 0);
1323     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1324     test_virtual_space_actual_committed_space(8 * K, 0);
1325     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1326     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1327     test_virtual_space_actual_committed_space(12 * K, 0);
1328     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1329     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1330     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1331     test_virtual_space_actual_committed_space(64 * K, 0);
1332     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1333     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1334     test_virtual_space_actual_committed_space(2 * M, 0);
1335     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1336     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1337     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1338     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1339     test_virtual_space_actual_committed_space(10 * M, 0);
1340     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1341     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1342     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1343     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1344     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1345     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1346   }
1347 
1348   static void test_virtual_space_disable_large_pages() {
1349     if (!UseLargePages) {
1350       return;
1351     }
1352     // These test cases verify that if we force VirtualSpace to disable large pages
1353     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1354     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1355     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1356     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1357     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1358     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1359     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1360 
1361     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1362     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1363     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1364     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1365     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1366     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1367     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1368 
1369     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1370     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1371     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1372     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1373     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1374     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1375     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1376   }
1377 
1378   static void test_virtual_space() {
1379     test_virtual_space_actual_committed_space();
1380     test_virtual_space_actual_committed_space_one_large_page();
1381     test_virtual_space_disable_large_pages();
1382   }
1383 };
1384 
1385 void TestVirtualSpace_test() {
1386   TestVirtualSpace::test_virtual_space();
1387 }
1388 
1389 #endif // PRODUCT
1390 
1391 #endif