1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "runtime/os.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 #include "utilities/align.hpp"
  34 
  35 // ReservedSpace
  36 
  37 // Dummy constructor
  38 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  39     _alignment(0), _special(false), _fd_for_heap(-1), _actual_page_size(0), _executable(false) {
  40 }
  41 
  42 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1), _actual_page_size(0) {
  43   bool has_preferred_page_size = preferred_page_size != 0;
  44   // Want to use large pages where possible and pad with small pages.
  45   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  46   bool large_pages = page_size != (size_t)os::vm_page_size();
  47   size_t alignment;
  48   if (large_pages && has_preferred_page_size) {
  49     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  50     // ReservedSpace initialization requires size to be aligned to the given
  51     // alignment. Align the size up.
  52     size = align_up(size, alignment);
  53   } else {
  54     // Don't force the alignment to be large page aligned,
  55     // since that will waste memory.
  56     alignment = os::vm_allocation_granularity();
  57   }
  58   initialize(size, alignment, large_pages, NULL, false);
  59 }
  60 
  61 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  62                              bool large,
  63                              char* requested_address) : _fd_for_heap(-1), _actual_page_size(0) {
  64   initialize(size, alignment, large, requested_address, false);
  65 }
  66 
  67 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  68                              bool large,
  69                              bool executable) : _fd_for_heap(-1), _actual_page_size(0) {
  70   initialize(size, alignment, large, NULL, executable);
  71 }
  72 
  73 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  74                              bool special, bool executable) : _fd_for_heap(-1), _actual_page_size(0) {
  75   assert((size % os::vm_allocation_granularity()) == 0,
  76          "size not allocation aligned");
  77   _base = base;
  78   _size = size;
  79   _alignment = alignment;
  80   _noaccess_prefix = 0;
  81   _special = special;
  82   _executable = executable;
  83 }
  84 
  85 // Helper method
  86 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  87   if (is_file_mapped) {
  88     if (!os::unmap_memory(base, size)) {
  89       fatal("os::unmap_memory failed");
  90     }
  91   } else if (!os::release_memory(base, size)) {
  92     fatal("os::release_memory failed");
  93   }
  94 }
  95 
  96 // Helper method.
  97 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  98                                            const size_t size, bool special, bool is_file_mapped = false)
  99 {
 100   if (base == requested_address || requested_address == NULL)
 101     return false; // did not fail
 102 
 103   if (base != NULL) {
 104     // Different reserve address may be acceptable in other cases
 105     // but for compressed oops heap should be at requested address.
 106     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 107     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 108     // OS ignored requested address. Try different address.
 109     if (special) {
 110       if (!os::release_memory_special(base, size)) {
 111         fatal("os::release_memory_special failed");
 112       }
 113     } else {
 114       unmap_or_release_memory(base, size, is_file_mapped);
 115     }
 116   }
 117   return true;
 118 }
 119 
 120 // Should be called after _special is decided.
 121 void ReservedSpace::update_actual_page_size(bool large_page) {
 122   // There are two ways to manage large page memory in ReservedSpace.
 123   // 1. OS supports committing large page memory.
 124   // 2. OS doesn't support committing large page memory so ReservedSpace manages it specially.
 125   //    When succeeded reserving it, '_special' will be set.
 126   if (large_page && (os::can_commit_large_page_memory() || _special)) {
 127     _actual_page_size = os::large_page_size();
 128   } else {
 129     _actual_page_size = os::vm_page_size();
 130   }
 131 }
 132 
 133 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 134                                char* requested_address,
 135                                bool executable) {
 136   const size_t granularity = os::vm_allocation_granularity();
 137   assert((size & (granularity - 1)) == 0,
 138          "size not aligned to os::vm_allocation_granularity()");
 139   assert((alignment & (granularity - 1)) == 0,
 140          "alignment not aligned to os::vm_allocation_granularity()");
 141   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 142          "not a power of 2");
 143 
 144   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 145 
 146   _base = NULL;
 147   _size = 0;
 148   _special = false;
 149   _executable = executable;
 150   _alignment = 0;
 151   _noaccess_prefix = 0;
 152   if (size == 0) {
 153     return;
 154   }
 155 
 156   // If OS doesn't support demand paging for large page memory, we need
 157   // to use reserve_memory_special() to reserve and pin the entire region.
 158   // If there is a backing file directory for this space then whether
 159   // large pages are allocated is up to the filesystem of the backing file.
 160   // So we ignore the UseLargePages flag in this case.
 161   bool special = large && !os::can_commit_large_page_memory();
 162   if (special && _fd_for_heap != -1) {
 163     special = false;
 164     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 165       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 166       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 167     }
 168   }
 169 
 170   char* base = NULL;
 171 
 172   if (special) {
 173 
 174     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 175 
 176     if (base != NULL) {
 177       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 178         // OS ignored requested address. Try different address.
 179         return;
 180       }
 181       // Check alignment constraints.
 182       assert((uintptr_t) base % alignment == 0,
 183              "Large pages returned a non-aligned address, base: "
 184              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 185              p2i(base), alignment);
 186       _special = true;
 187     } else {
 188       // failed; try to reserve regular memory below
 189       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 190                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 191         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 192       }
 193     }
 194   }
 195 
 196   if (base == NULL) {
 197     // Optimistically assume that the OSes returns an aligned base pointer.
 198     // When reserving a large address range, most OSes seem to align to at
 199     // least 64K.
 200 
 201     // If the memory was requested at a particular address, use
 202     // os::attempt_reserve_memory_at() to avoid over mapping something
 203     // important.  If available space is not detected, return NULL.
 204 
 205     if (requested_address != 0) {
 206       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 207       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 208         // OS ignored requested address. Try different address.
 209         base = NULL;
 210       }
 211     } else {
 212       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 213     }
 214 
 215     if (base == NULL) return;
 216 
 217     // Check alignment constraints
 218     if ((((size_t)base) & (alignment - 1)) != 0) {
 219       // Base not aligned, retry
 220       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 221 
 222       // Make sure that size is aligned
 223       size = align_up(size, alignment);
 224       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 225 
 226       if (requested_address != 0 &&
 227           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 228         // As a result of the alignment constraints, the allocated base differs
 229         // from the requested address. Return back to the caller who can
 230         // take remedial action (like try again without a requested address).
 231         assert(_base == NULL, "should be");
 232         return;
 233       }
 234     }
 235   }
 236   // Done
 237   _base = base;
 238   _size = size;
 239   _alignment = alignment;
 240   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 241   if (_fd_for_heap != -1) {
 242     _special = true;
 243   }
 244 
 245   update_actual_page_size(large);
 246 }
 247 
 248 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 249                                         bool split, bool realloc) {
 250   assert(partition_size <= size(), "partition failed");
 251   if (split) {
 252     os::split_reserved_memory(base(), size(), partition_size, realloc);
 253   }
 254   ReservedSpace result(base(), partition_size, alignment, special(),
 255                        executable());
 256   return result;
 257 }
 258 
 259 
 260 ReservedSpace
 261 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 262   assert(partition_size <= size(), "partition failed");
 263   ReservedSpace result(base() + partition_size, size() - partition_size,
 264                        alignment, special(), executable());
 265   return result;
 266 }
 267 
 268 
 269 size_t ReservedSpace::page_align_size_up(size_t size) {
 270   return align_up(size, os::vm_page_size());
 271 }
 272 
 273 
 274 size_t ReservedSpace::page_align_size_down(size_t size) {
 275   return align_down(size, os::vm_page_size());
 276 }
 277 
 278 
 279 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 280   return align_up(size, os::vm_allocation_granularity());
 281 }
 282 
 283 
 284 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 285   return align_down(size, os::vm_allocation_granularity());
 286 }
 287 
 288 
 289 void ReservedSpace::release() {
 290   if (is_reserved()) {
 291     char *real_base = _base - _noaccess_prefix;
 292     const size_t real_size = _size + _noaccess_prefix;
 293     if (special()) {
 294       if (_fd_for_heap != -1) {
 295         os::unmap_memory(real_base, real_size);
 296       } else {
 297         os::release_memory_special(real_base, real_size);
 298       }
 299     } else{
 300       os::release_memory(real_base, real_size);
 301     }
 302     _base = NULL;
 303     _size = 0;
 304     _noaccess_prefix = 0;
 305     _alignment = 0;
 306     _special = false;
 307     _executable = false;
 308     _actual_page_size = 0;
 309   }
 310 }
 311 
 312 static size_t noaccess_prefix_size(size_t alignment) {
 313   return lcm(os::vm_page_size(), alignment);
 314 }
 315 
 316 void ReservedHeapSpace::establish_noaccess_prefix() {
 317   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 318   _noaccess_prefix = noaccess_prefix_size(_alignment);
 319 
 320   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 321     if (true
 322         WIN64_ONLY(&& !UseLargePages)
 323         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 324       // Protect memory at the base of the allocated region.
 325       // If special, the page was committed (only matters on windows)
 326       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 327         fatal("cannot protect protection page");
 328       }
 329       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 330                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 331                                  p2i(_base),
 332                                  _noaccess_prefix);
 333       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 334     } else {
 335       Universe::set_narrow_oop_use_implicit_null_checks(false);
 336     }
 337   }
 338 
 339   _base += _noaccess_prefix;
 340   _size -= _noaccess_prefix;
 341   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 342 }
 343 
 344 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 345 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 346 // might still fulfill the wishes of the caller.
 347 // Assures the memory is aligned to 'alignment'.
 348 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 349 void ReservedHeapSpace::try_reserve_heap(size_t size,
 350                                          size_t alignment,
 351                                          bool large,
 352                                          char* requested_address) {
 353   if (_base != NULL) {
 354     // We tried before, but we didn't like the address delivered.
 355     release();
 356   }
 357 
 358   // If OS doesn't support demand paging for large page memory, we need
 359   // to use reserve_memory_special() to reserve and pin the entire region.
 360   // If there is a backing file directory for this space then whether
 361   // large pages are allocated is up to the filesystem of the backing file.
 362   // So we ignore the UseLargePages flag in this case.
 363   bool special = large && !os::can_commit_large_page_memory();
 364   if (special && _fd_for_heap != -1) {
 365     special = false;
 366     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 367                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 368       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 369     }
 370   }
 371   char* base = NULL;
 372 
 373   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 374                              " heap of size " SIZE_FORMAT_HEX,
 375                              p2i(requested_address),
 376                              size);
 377 
 378   if (special) {
 379     base = os::reserve_memory_special(size, alignment, requested_address, false);
 380 
 381     if (base != NULL) {
 382       // Check alignment constraints.
 383       assert((uintptr_t) base % alignment == 0,
 384              "Large pages returned a non-aligned address, base: "
 385              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 386              p2i(base), alignment);
 387       _special = true;
 388     }
 389   }
 390 
 391   if (base == NULL) {
 392     // Failed; try to reserve regular memory below
 393     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 394                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 395       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 396     }
 397 
 398     // Optimistically assume that the OSes returns an aligned base pointer.
 399     // When reserving a large address range, most OSes seem to align to at
 400     // least 64K.
 401 
 402     // If the memory was requested at a particular address, use
 403     // os::attempt_reserve_memory_at() to avoid over mapping something
 404     // important.  If available space is not detected, return NULL.
 405 
 406     if (requested_address != 0) {
 407       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 408     } else {
 409       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 410     }
 411   }
 412   if (base == NULL) { return; }
 413 
 414   // Done
 415   _base = base;
 416   _size = size;
 417   _alignment = alignment;
 418   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 419   if (_fd_for_heap != -1) {
 420     _special = true;
 421   }
 422   update_actual_page_size(large);
 423 
 424   // Check alignment constraints
 425   if ((((size_t)base) & (alignment - 1)) != 0) {
 426     // Base not aligned, retry.
 427     release();
 428   }
 429 }
 430 
 431 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 432                                           char *lowest_start,
 433                                           size_t attach_point_alignment,
 434                                           char *aligned_heap_base_min_address,
 435                                           char *upper_bound,
 436                                           size_t size,
 437                                           size_t alignment,
 438                                           bool large) {
 439   const size_t attach_range = highest_start - lowest_start;
 440   // Cap num_attempts at possible number.
 441   // At least one is possible even for 0 sized attach range.
 442   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 443   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 444 
 445   const size_t stepsize = (attach_range == 0) ? // Only one try.
 446     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 447 
 448   // Try attach points from top to bottom.
 449   char* attach_point = highest_start;
 450   while (attach_point >= lowest_start  &&
 451          attach_point <= highest_start &&  // Avoid wrap around.
 452          ((_base == NULL) ||
 453           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 454     try_reserve_heap(size, alignment, large, attach_point);
 455     attach_point -= stepsize;
 456   }
 457 }
 458 
 459 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 460 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 461 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 462 
 463 // Helper for heap allocation. Returns an array with addresses
 464 // (OS-specific) which are suited for disjoint base mode. Array is
 465 // NULL terminated.
 466 static char** get_attach_addresses_for_disjoint_mode() {
 467   static uint64_t addresses[] = {
 468      2 * SIZE_32G,
 469      3 * SIZE_32G,
 470      4 * SIZE_32G,
 471      8 * SIZE_32G,
 472     10 * SIZE_32G,
 473      1 * SIZE_64K * SIZE_32G,
 474      2 * SIZE_64K * SIZE_32G,
 475      3 * SIZE_64K * SIZE_32G,
 476      4 * SIZE_64K * SIZE_32G,
 477     16 * SIZE_64K * SIZE_32G,
 478     32 * SIZE_64K * SIZE_32G,
 479     34 * SIZE_64K * SIZE_32G,
 480     0
 481   };
 482 
 483   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 484   // the array is sorted.
 485   uint i = 0;
 486   while (addresses[i] != 0 &&
 487          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 488     i++;
 489   }
 490   uint start = i;
 491 
 492   // Avoid more steps than requested.
 493   i = 0;
 494   while (addresses[start+i] != 0) {
 495     if (i == HeapSearchSteps) {
 496       addresses[start+i] = 0;
 497       break;
 498     }
 499     i++;
 500   }
 501 
 502   return (char**) &addresses[start];
 503 }
 504 
 505 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 506   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 507             "can not allocate compressed oop heap for this size");
 508   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 509 
 510   const size_t granularity = os::vm_allocation_granularity();
 511   assert((size & (granularity - 1)) == 0,
 512          "size not aligned to os::vm_allocation_granularity()");
 513   assert((alignment & (granularity - 1)) == 0,
 514          "alignment not aligned to os::vm_allocation_granularity()");
 515   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 516          "not a power of 2");
 517 
 518   // The necessary attach point alignment for generated wish addresses.
 519   // This is needed to increase the chance of attaching for mmap and shmat.
 520   const size_t os_attach_point_alignment =
 521     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 522     NOT_AIX(os::vm_allocation_granularity());
 523   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 524 
 525   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 526   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 527     noaccess_prefix_size(alignment) : 0;
 528 
 529   // Attempt to alloc at user-given address.
 530   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 531     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 532     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 533       release();
 534     }
 535   }
 536 
 537   // Keep heap at HeapBaseMinAddress.
 538   if (_base == NULL) {
 539 
 540     // Try to allocate the heap at addresses that allow efficient oop compression.
 541     // Different schemes are tried, in order of decreasing optimization potential.
 542     //
 543     // For this, try_reserve_heap() is called with the desired heap base addresses.
 544     // A call into the os layer to allocate at a given address can return memory
 545     // at a different address than requested.  Still, this might be memory at a useful
 546     // address. try_reserve_heap() always returns this allocated memory, as only here
 547     // the criteria for a good heap are checked.
 548 
 549     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 550     // Give it several tries from top of range to bottom.
 551     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 552 
 553       // Calc address range within we try to attach (range of possible start addresses).
 554       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 555       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 556       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 557                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 558     }
 559 
 560     // zerobased: Attempt to allocate in the lower 32G.
 561     // But leave room for the compressed class pointers, which is allocated above
 562     // the heap.
 563     char *zerobased_max = (char *)OopEncodingHeapMax;
 564     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 565     // For small heaps, save some space for compressed class pointer
 566     // space so it can be decoded with no base.
 567     if (UseCompressedClassPointers && !UseSharedSpaces &&
 568         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 569         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 570       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 571     }
 572 
 573     // Give it several tries from top of range to bottom.
 574     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 575         ((_base == NULL) ||                        // No previous try succeeded.
 576          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 577 
 578       // Calc address range within we try to attach (range of possible start addresses).
 579       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 580       // Need to be careful about size being guaranteed to be less
 581       // than UnscaledOopHeapMax due to type constraints.
 582       char *lowest_start = aligned_heap_base_min_address;
 583       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 584       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 585         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 586       }
 587       lowest_start = align_up(lowest_start, attach_point_alignment);
 588       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 589                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 590     }
 591 
 592     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 593     // implement null checks.
 594     noaccess_prefix = noaccess_prefix_size(alignment);
 595 
 596     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 597     char** addresses = get_attach_addresses_for_disjoint_mode();
 598     int i = 0;
 599     while (addresses[i] &&                                 // End of array not yet reached.
 600            ((_base == NULL) ||                             // No previous try succeeded.
 601             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 602              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 603       char* const attach_point = addresses[i];
 604       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 605       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 606       i++;
 607     }
 608 
 609     // Last, desperate try without any placement.
 610     if (_base == NULL) {
 611       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 612       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 613     }
 614   }
 615 }
 616 
 617 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 618 
 619   if (size == 0) {
 620     return;
 621   }
 622 
 623   if (heap_allocation_directory != NULL) {
 624     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 625     if (_fd_for_heap == -1) {
 626       vm_exit_during_initialization(
 627         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 628     }
 629   }
 630 
 631   // Heap size should be aligned to alignment, too.
 632   guarantee(is_aligned(size, alignment), "set by caller");
 633 
 634   if (UseCompressedOops) {
 635     initialize_compressed_heap(size, alignment, large);
 636     if (_size > size) {
 637       // We allocated heap with noaccess prefix.
 638       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 639       // if we had to try at arbitrary address.
 640       establish_noaccess_prefix();
 641     }
 642   } else {
 643     initialize(size, alignment, large, NULL, false);
 644   }
 645 
 646   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 647          "area must be distinguishable from marks for mark-sweep");
 648   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 649          "area must be distinguishable from marks for mark-sweep");
 650 
 651   if (base() != NULL) {
 652     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 653   }
 654 
 655   if (_fd_for_heap != -1) {
 656     os::close(_fd_for_heap);
 657   }
 658 }
 659 
 660 // Reserve space for code segment.  Same as Java heap only we mark this as
 661 // executable.
 662 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 663                                      size_t rs_align,
 664                                      bool large) :
 665   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 666   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 667 }
 668 
 669 // VirtualSpace
 670 
 671 VirtualSpace::VirtualSpace() {
 672   _low_boundary           = NULL;
 673   _high_boundary          = NULL;
 674   _low                    = NULL;
 675   _high                   = NULL;
 676   _lower_high             = NULL;
 677   _middle_high            = NULL;
 678   _upper_high             = NULL;
 679   _lower_high_boundary    = NULL;
 680   _middle_high_boundary   = NULL;
 681   _upper_high_boundary    = NULL;
 682   _lower_alignment        = 0;
 683   _middle_alignment       = 0;
 684   _upper_alignment        = 0;
 685   _special                = false;
 686   _executable             = false;
 687 }
 688 
 689 
 690 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 691   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 692   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 693 }
 694 
 695 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 696   if(!rs.is_reserved()) return false;  // allocation failed.
 697   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 698   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 699 
 700   _low_boundary  = rs.base();
 701   _high_boundary = low_boundary() + rs.size();
 702 
 703   _low = low_boundary();
 704   _high = low();
 705 
 706   _special = rs.special();
 707   _executable = rs.executable();
 708 
 709   // When a VirtualSpace begins life at a large size, make all future expansion
 710   // and shrinking occur aligned to a granularity of large pages.  This avoids
 711   // fragmentation of physical addresses that inhibits the use of large pages
 712   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 713   // page size, the only spaces that get handled this way are codecache and
 714   // the heap itself, both of which provide a substantial performance
 715   // boost in many benchmarks when covered by large pages.
 716   //
 717   // No attempt is made to force large page alignment at the very top and
 718   // bottom of the space if they are not aligned so already.
 719   _lower_alignment  = os::vm_page_size();
 720   _middle_alignment = max_commit_granularity;
 721   _upper_alignment  = os::vm_page_size();
 722 
 723   // End of each region
 724   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 725   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 726   _upper_high_boundary = high_boundary();
 727 
 728   // High address of each region
 729   _lower_high = low_boundary();
 730   _middle_high = lower_high_boundary();
 731   _upper_high = middle_high_boundary();
 732 
 733   // commit to initial size
 734   if (committed_size > 0) {
 735     if (!expand_by(committed_size)) {
 736       return false;
 737     }
 738   }
 739   return true;
 740 }
 741 
 742 
 743 VirtualSpace::~VirtualSpace() {
 744   release();
 745 }
 746 
 747 
 748 void VirtualSpace::release() {
 749   // This does not release memory it reserved.
 750   // Caller must release via rs.release();
 751   _low_boundary           = NULL;
 752   _high_boundary          = NULL;
 753   _low                    = NULL;
 754   _high                   = NULL;
 755   _lower_high             = NULL;
 756   _middle_high            = NULL;
 757   _upper_high             = NULL;
 758   _lower_high_boundary    = NULL;
 759   _middle_high_boundary   = NULL;
 760   _upper_high_boundary    = NULL;
 761   _lower_alignment        = 0;
 762   _middle_alignment       = 0;
 763   _upper_alignment        = 0;
 764   _special                = false;
 765   _executable             = false;
 766 }
 767 
 768 
 769 size_t VirtualSpace::committed_size() const {
 770   return pointer_delta(high(), low(), sizeof(char));
 771 }
 772 
 773 
 774 size_t VirtualSpace::reserved_size() const {
 775   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 776 }
 777 
 778 
 779 size_t VirtualSpace::uncommitted_size()  const {
 780   return reserved_size() - committed_size();
 781 }
 782 
 783 size_t VirtualSpace::actual_committed_size() const {
 784   // Special VirtualSpaces commit all reserved space up front.
 785   if (special()) {
 786     return reserved_size();
 787   }
 788 
 789   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 790   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 791   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 792 
 793 #ifdef ASSERT
 794   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 795   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 796   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 797 
 798   if (committed_high > 0) {
 799     assert(committed_low == lower, "Must be");
 800     assert(committed_middle == middle, "Must be");
 801   }
 802 
 803   if (committed_middle > 0) {
 804     assert(committed_low == lower, "Must be");
 805   }
 806   if (committed_middle < middle) {
 807     assert(committed_high == 0, "Must be");
 808   }
 809 
 810   if (committed_low < lower) {
 811     assert(committed_high == 0, "Must be");
 812     assert(committed_middle == 0, "Must be");
 813   }
 814 #endif
 815 
 816   return committed_low + committed_middle + committed_high;
 817 }
 818 
 819 
 820 bool VirtualSpace::contains(const void* p) const {
 821   return low() <= (const char*) p && (const char*) p < high();
 822 }
 823 
 824 static void pretouch_expanded_memory(void* start, void* end) {
 825   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 826   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 827 
 828   os::pretouch_memory(start, end);
 829 }
 830 
 831 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 832   if (os::commit_memory(start, size, alignment, executable)) {
 833     if (pre_touch || AlwaysPreTouch) {
 834       pretouch_expanded_memory(start, start + size);
 835     }
 836     return true;
 837   }
 838 
 839   debug_only(warning(
 840       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 841       " size=" SIZE_FORMAT ", executable=%d) failed",
 842       p2i(start), p2i(start + size), size, executable);)
 843 
 844   return false;
 845 }
 846 
 847 /*
 848    First we need to determine if a particular virtual space is using large
 849    pages.  This is done at the initialize function and only virtual spaces
 850    that are larger than LargePageSizeInBytes use large pages.  Once we
 851    have determined this, all expand_by and shrink_by calls must grow and
 852    shrink by large page size chunks.  If a particular request
 853    is within the current large page, the call to commit and uncommit memory
 854    can be ignored.  In the case that the low and high boundaries of this
 855    space is not large page aligned, the pages leading to the first large
 856    page address and the pages after the last large page address must be
 857    allocated with default pages.
 858 */
 859 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 860   if (uncommitted_size() < bytes) {
 861     return false;
 862   }
 863 
 864   if (special()) {
 865     // don't commit memory if the entire space is pinned in memory
 866     _high += bytes;
 867     return true;
 868   }
 869 
 870   char* previous_high = high();
 871   char* unaligned_new_high = high() + bytes;
 872   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 873 
 874   // Calculate where the new high for each of the regions should be.  If
 875   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 876   // then the unaligned lower and upper new highs would be the
 877   // lower_high() and upper_high() respectively.
 878   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 879   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 880   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 881 
 882   // Align the new highs based on the regions alignment.  lower and upper
 883   // alignment will always be default page size.  middle alignment will be
 884   // LargePageSizeInBytes if the actual size of the virtual space is in
 885   // fact larger than LargePageSizeInBytes.
 886   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 887   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 888   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 889 
 890   // Determine which regions need to grow in this expand_by call.
 891   // If you are growing in the lower region, high() must be in that
 892   // region so calculate the size based on high().  For the middle and
 893   // upper regions, determine the starting point of growth based on the
 894   // location of high().  By getting the MAX of the region's low address
 895   // (or the previous region's high address) and high(), we can tell if it
 896   // is an intra or inter region growth.
 897   size_t lower_needs = 0;
 898   if (aligned_lower_new_high > lower_high()) {
 899     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 900   }
 901   size_t middle_needs = 0;
 902   if (aligned_middle_new_high > middle_high()) {
 903     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 904   }
 905   size_t upper_needs = 0;
 906   if (aligned_upper_new_high > upper_high()) {
 907     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 908   }
 909 
 910   // Check contiguity.
 911   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 912          "high address must be contained within the region");
 913   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 914          "high address must be contained within the region");
 915   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 916          "high address must be contained within the region");
 917 
 918   // Commit regions
 919   if (lower_needs > 0) {
 920     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 921     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 922       return false;
 923     }
 924     _lower_high += lower_needs;
 925   }
 926 
 927   if (middle_needs > 0) {
 928     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 929     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 930       return false;
 931     }
 932     _middle_high += middle_needs;
 933   }
 934 
 935   if (upper_needs > 0) {
 936     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 937     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 938       return false;
 939     }
 940     _upper_high += upper_needs;
 941   }
 942 
 943   _high += bytes;
 944   return true;
 945 }
 946 
 947 // A page is uncommitted if the contents of the entire page is deemed unusable.
 948 // Continue to decrement the high() pointer until it reaches a page boundary
 949 // in which case that particular page can now be uncommitted.
 950 void VirtualSpace::shrink_by(size_t size) {
 951   if (committed_size() < size)
 952     fatal("Cannot shrink virtual space to negative size");
 953 
 954   if (special()) {
 955     // don't uncommit if the entire space is pinned in memory
 956     _high -= size;
 957     return;
 958   }
 959 
 960   char* unaligned_new_high = high() - size;
 961   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 962 
 963   // Calculate new unaligned address
 964   char* unaligned_upper_new_high =
 965     MAX2(unaligned_new_high, middle_high_boundary());
 966   char* unaligned_middle_new_high =
 967     MAX2(unaligned_new_high, lower_high_boundary());
 968   char* unaligned_lower_new_high =
 969     MAX2(unaligned_new_high, low_boundary());
 970 
 971   // Align address to region's alignment
 972   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 973   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 974   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 975 
 976   // Determine which regions need to shrink
 977   size_t upper_needs = 0;
 978   if (aligned_upper_new_high < upper_high()) {
 979     upper_needs =
 980       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 981   }
 982   size_t middle_needs = 0;
 983   if (aligned_middle_new_high < middle_high()) {
 984     middle_needs =
 985       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 986   }
 987   size_t lower_needs = 0;
 988   if (aligned_lower_new_high < lower_high()) {
 989     lower_needs =
 990       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 991   }
 992 
 993   // Check contiguity.
 994   assert(middle_high_boundary() <= upper_high() &&
 995          upper_high() <= upper_high_boundary(),
 996          "high address must be contained within the region");
 997   assert(lower_high_boundary() <= middle_high() &&
 998          middle_high() <= middle_high_boundary(),
 999          "high address must be contained within the region");
1000   assert(low_boundary() <= lower_high() &&
1001          lower_high() <= lower_high_boundary(),
1002          "high address must be contained within the region");
1003 
1004   // Uncommit
1005   if (upper_needs > 0) {
1006     assert(middle_high_boundary() <= aligned_upper_new_high &&
1007            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
1008            "must not shrink beyond region");
1009     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
1010       debug_only(warning("os::uncommit_memory failed"));
1011       return;
1012     } else {
1013       _upper_high -= upper_needs;
1014     }
1015   }
1016   if (middle_needs > 0) {
1017     assert(lower_high_boundary() <= aligned_middle_new_high &&
1018            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1019            "must not shrink beyond region");
1020     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1021       debug_only(warning("os::uncommit_memory failed"));
1022       return;
1023     } else {
1024       _middle_high -= middle_needs;
1025     }
1026   }
1027   if (lower_needs > 0) {
1028     assert(low_boundary() <= aligned_lower_new_high &&
1029            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1030            "must not shrink beyond region");
1031     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1032       debug_only(warning("os::uncommit_memory failed"));
1033       return;
1034     } else {
1035       _lower_high -= lower_needs;
1036     }
1037   }
1038 
1039   _high -= size;
1040 }
1041 
1042 #ifndef PRODUCT
1043 void VirtualSpace::check_for_contiguity() {
1044   // Check contiguity.
1045   assert(low_boundary() <= lower_high() &&
1046          lower_high() <= lower_high_boundary(),
1047          "high address must be contained within the region");
1048   assert(lower_high_boundary() <= middle_high() &&
1049          middle_high() <= middle_high_boundary(),
1050          "high address must be contained within the region");
1051   assert(middle_high_boundary() <= upper_high() &&
1052          upper_high() <= upper_high_boundary(),
1053          "high address must be contained within the region");
1054   assert(low() >= low_boundary(), "low");
1055   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1056   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1057   assert(high() <= upper_high(), "upper high");
1058 }
1059 
1060 void VirtualSpace::print_on(outputStream* out) {
1061   out->print   ("Virtual space:");
1062   if (special()) out->print(" (pinned in memory)");
1063   out->cr();
1064   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1065   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1066   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1067   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1068 }
1069 
1070 void VirtualSpace::print() {
1071   print_on(tty);
1072 }
1073 
1074 /////////////// Unit tests ///////////////
1075 
1076 #ifndef PRODUCT
1077 
1078 class TestReservedSpace : AllStatic {
1079  public:
1080   static void small_page_write(void* addr, size_t size) {
1081     size_t page_size = os::vm_page_size();
1082 
1083     char* end = (char*)addr + size;
1084     for (char* p = (char*)addr; p < end; p += page_size) {
1085       *p = 1;
1086     }
1087   }
1088 
1089   static void release_memory_for_test(ReservedSpace rs) {
1090     if (rs.special()) {
1091       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1092     } else {
1093       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1094     }
1095   }
1096 
1097   static void test_reserved_space1(size_t size, size_t alignment) {
1098     assert(is_aligned(size, alignment), "Incorrect input parameters");
1099 
1100     ReservedSpace rs(size,          // size
1101                      alignment,     // alignment
1102                      UseLargePages, // large
1103                      (char *)NULL); // requested_address
1104 
1105     assert(rs.base() != NULL, "Must be");
1106     assert(rs.size() == size, "Must be");
1107 
1108     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1109     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1110 
1111     if (rs.special()) {
1112       small_page_write(rs.base(), size);
1113     }
1114 
1115     release_memory_for_test(rs);
1116   }
1117 
1118   static void test_reserved_space2(size_t size) {
1119     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1120 
1121     ReservedSpace rs(size);
1122 
1123     assert(rs.base() != NULL, "Must be");
1124     assert(rs.size() == size, "Must be");
1125 
1126     if (rs.special()) {
1127       small_page_write(rs.base(), size);
1128     }
1129 
1130     release_memory_for_test(rs);
1131   }
1132 
1133   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1134     if (size < alignment) {
1135       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1136       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1137       return;
1138     }
1139 
1140     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1141     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1142 
1143     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1144 
1145     ReservedSpace rs(size, alignment, large, false);
1146 
1147     assert(rs.base() != NULL, "Must be");
1148     assert(rs.size() == size, "Must be");
1149 
1150     if (rs.special()) {
1151       small_page_write(rs.base(), size);
1152     }
1153 
1154     release_memory_for_test(rs);
1155   }
1156 
1157 
1158   static void test_reserved_space1() {
1159     size_t size = 2 * 1024 * 1024;
1160     size_t ag   = os::vm_allocation_granularity();
1161 
1162     test_reserved_space1(size,      ag);
1163     test_reserved_space1(size * 2,  ag);
1164     test_reserved_space1(size * 10, ag);
1165   }
1166 
1167   static void test_reserved_space2() {
1168     size_t size = 2 * 1024 * 1024;
1169     size_t ag = os::vm_allocation_granularity();
1170 
1171     test_reserved_space2(size * 1);
1172     test_reserved_space2(size * 2);
1173     test_reserved_space2(size * 10);
1174     test_reserved_space2(ag);
1175     test_reserved_space2(size - ag);
1176     test_reserved_space2(size);
1177     test_reserved_space2(size + ag);
1178     test_reserved_space2(size * 2);
1179     test_reserved_space2(size * 2 - ag);
1180     test_reserved_space2(size * 2 + ag);
1181     test_reserved_space2(size * 3);
1182     test_reserved_space2(size * 3 - ag);
1183     test_reserved_space2(size * 3 + ag);
1184     test_reserved_space2(size * 10);
1185     test_reserved_space2(size * 10 + size / 2);
1186   }
1187 
1188   static void test_reserved_space3() {
1189     size_t ag = os::vm_allocation_granularity();
1190 
1191     test_reserved_space3(ag,      ag    , false);
1192     test_reserved_space3(ag * 2,  ag    , false);
1193     test_reserved_space3(ag * 3,  ag    , false);
1194     test_reserved_space3(ag * 2,  ag * 2, false);
1195     test_reserved_space3(ag * 4,  ag * 2, false);
1196     test_reserved_space3(ag * 8,  ag * 2, false);
1197     test_reserved_space3(ag * 4,  ag * 4, false);
1198     test_reserved_space3(ag * 8,  ag * 4, false);
1199     test_reserved_space3(ag * 16, ag * 4, false);
1200 
1201     if (UseLargePages) {
1202       size_t lp = os::large_page_size();
1203 
1204       // Without large pages
1205       test_reserved_space3(lp,     ag * 4, false);
1206       test_reserved_space3(lp * 2, ag * 4, false);
1207       test_reserved_space3(lp * 4, ag * 4, false);
1208       test_reserved_space3(lp,     lp    , false);
1209       test_reserved_space3(lp * 2, lp    , false);
1210       test_reserved_space3(lp * 3, lp    , false);
1211       test_reserved_space3(lp * 2, lp * 2, false);
1212       test_reserved_space3(lp * 4, lp * 2, false);
1213       test_reserved_space3(lp * 8, lp * 2, false);
1214 
1215       // With large pages
1216       test_reserved_space3(lp, ag * 4    , true);
1217       test_reserved_space3(lp * 2, ag * 4, true);
1218       test_reserved_space3(lp * 4, ag * 4, true);
1219       test_reserved_space3(lp, lp        , true);
1220       test_reserved_space3(lp * 2, lp    , true);
1221       test_reserved_space3(lp * 3, lp    , true);
1222       test_reserved_space3(lp * 2, lp * 2, true);
1223       test_reserved_space3(lp * 4, lp * 2, true);
1224       test_reserved_space3(lp * 8, lp * 2, true);
1225     }
1226   }
1227 
1228   static void test_reserved_space() {
1229     test_reserved_space1();
1230     test_reserved_space2();
1231     test_reserved_space3();
1232   }
1233 };
1234 
1235 void TestReservedSpace_test() {
1236   TestReservedSpace::test_reserved_space();
1237 }
1238 
1239 #define assert_equals(actual, expected)  \
1240   assert(actual == expected,             \
1241          "Got " SIZE_FORMAT " expected " \
1242          SIZE_FORMAT, actual, expected);
1243 
1244 #define assert_ge(value1, value2)                  \
1245   assert(value1 >= value2,                         \
1246          "'" #value1 "': " SIZE_FORMAT " '"        \
1247          #value2 "': " SIZE_FORMAT, value1, value2);
1248 
1249 #define assert_lt(value1, value2)                  \
1250   assert(value1 < value2,                          \
1251          "'" #value1 "': " SIZE_FORMAT " '"        \
1252          #value2 "': " SIZE_FORMAT, value1, value2);
1253 
1254 
1255 class TestVirtualSpace : AllStatic {
1256   enum TestLargePages {
1257     Default,
1258     Disable,
1259     Reserve,
1260     Commit
1261   };
1262 
1263   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1264     switch(mode) {
1265     default:
1266     case Default:
1267     case Reserve:
1268       return ReservedSpace(reserve_size_aligned);
1269     case Disable:
1270     case Commit:
1271       return ReservedSpace(reserve_size_aligned,
1272                            os::vm_allocation_granularity(),
1273                            /* large */ false, /* exec */ false);
1274     }
1275   }
1276 
1277   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1278     switch(mode) {
1279     default:
1280     case Default:
1281     case Reserve:
1282       return vs.initialize(rs, 0);
1283     case Disable:
1284       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1285     case Commit:
1286       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1287     }
1288   }
1289 
1290  public:
1291   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1292                                                         TestLargePages mode = Default) {
1293     size_t granularity = os::vm_allocation_granularity();
1294     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1295 
1296     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1297 
1298     assert(reserved.is_reserved(), "Must be");
1299 
1300     VirtualSpace vs;
1301     bool initialized = initialize_virtual_space(vs, reserved, mode);
1302     assert(initialized, "Failed to initialize VirtualSpace");
1303 
1304     vs.expand_by(commit_size, false);
1305 
1306     if (vs.special()) {
1307       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1308     } else {
1309       assert_ge(vs.actual_committed_size(), commit_size);
1310       // Approximate the commit granularity.
1311       // Make sure that we don't commit using large pages
1312       // if large pages has been disabled for this VirtualSpace.
1313       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1314                                    os::vm_page_size() : os::large_page_size();
1315       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1316     }
1317 
1318     reserved.release();
1319   }
1320 
1321   static void test_virtual_space_actual_committed_space_one_large_page() {
1322     if (!UseLargePages) {
1323       return;
1324     }
1325 
1326     size_t large_page_size = os::large_page_size();
1327 
1328     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1329 
1330     assert(reserved.is_reserved(), "Must be");
1331 
1332     VirtualSpace vs;
1333     bool initialized = vs.initialize(reserved, 0);
1334     assert(initialized, "Failed to initialize VirtualSpace");
1335 
1336     vs.expand_by(large_page_size, false);
1337 
1338     assert_equals(vs.actual_committed_size(), large_page_size);
1339 
1340     reserved.release();
1341   }
1342 
1343   static void test_virtual_space_actual_committed_space() {
1344     test_virtual_space_actual_committed_space(4 * K, 0);
1345     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1346     test_virtual_space_actual_committed_space(8 * K, 0);
1347     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1348     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1349     test_virtual_space_actual_committed_space(12 * K, 0);
1350     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1351     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1352     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1353     test_virtual_space_actual_committed_space(64 * K, 0);
1354     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1355     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1356     test_virtual_space_actual_committed_space(2 * M, 0);
1357     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1358     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1359     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1360     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1361     test_virtual_space_actual_committed_space(10 * M, 0);
1362     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1363     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1364     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1365     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1366     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1367     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1368   }
1369 
1370   static void test_virtual_space_disable_large_pages() {
1371     if (!UseLargePages) {
1372       return;
1373     }
1374     // These test cases verify that if we force VirtualSpace to disable large pages
1375     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1376     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1377     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1378     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1379     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1380     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1381     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1382 
1383     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1384     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1385     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1386     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1387     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1388     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1389     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1390 
1391     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1392     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1393     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1394     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1395     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1396     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1397     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1398   }
1399 
1400   static void test_virtual_space() {
1401     test_virtual_space_actual_committed_space();
1402     test_virtual_space_actual_committed_space_one_large_page();
1403     test_virtual_space_disable_large_pages();
1404   }
1405 };
1406 
1407 void TestVirtualSpace_test() {
1408   TestVirtualSpace::test_virtual_space();
1409 }
1410 
1411 #endif // PRODUCT
1412 
1413 #endif