1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/compressedOops.hpp"
  30 #include "oops/markWord.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/os.inline.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/powerOfTwo.hpp"
  36 
  37 // ReservedSpace
  38 
  39 // Dummy constructor
  40 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  41     _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
  42 }
  43 
  44 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1) {
  66   initialize(size, alignment, large, requested_address, false);
  67 }
  68 
  69 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  70                              bool large,
  71                              bool executable) : _fd_for_heap(-1) {
  72   initialize(size, alignment, large, NULL, executable);
  73 }
  74 
  75 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  76                              bool special, bool executable) : _fd_for_heap(-1) {
  77   assert((size % os::vm_allocation_granularity()) == 0,
  78          "size not allocation aligned");
  79   _base = base;
  80   _size = size;
  81   _alignment = alignment;
  82   _noaccess_prefix = 0;
  83   _special = special;
  84   _executable = executable;
  85 }
  86 
  87 // Helper method
  88 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  89   if (is_file_mapped) {
  90     if (!os::unmap_memory(base, size)) {
  91       fatal("os::unmap_memory failed");
  92     }
  93   } else if (!os::release_memory(base, size)) {
  94     fatal("os::release_memory failed");
  95   }
  96 }
  97 
  98 // Helper method.
  99 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
 100                                            const size_t size, bool special, bool is_file_mapped = false)
 101 {
 102   if (base == requested_address || requested_address == NULL)
 103     return false; // did not fail
 104 
 105   if (base != NULL) {
 106     // Different reserve address may be acceptable in other cases
 107     // but for compressed oops heap should be at requested address.
 108     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 109     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 110     // OS ignored requested address. Try different address.
 111     if (special) {
 112       if (!os::release_memory_special(base, size)) {
 113         fatal("os::release_memory_special failed");
 114       }
 115     } else {
 116       unmap_or_release_memory(base, size, is_file_mapped);
 117     }
 118   }
 119   return true;
 120 }
 121 
 122 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 123                                char* requested_address,
 124                                bool executable) {
 125   const size_t granularity = os::vm_allocation_granularity();
 126   assert((size & (granularity - 1)) == 0,
 127          "size not aligned to os::vm_allocation_granularity()");
 128   assert((alignment & (granularity - 1)) == 0,
 129          "alignment not aligned to os::vm_allocation_granularity()");
 130   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 131          "not a power of 2");
 132 
 133   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 134 
 135   _base = NULL;
 136   _size = 0;
 137   _special = false;
 138   _executable = executable;
 139   _alignment = 0;
 140   _noaccess_prefix = 0;
 141   if (size == 0) {
 142     return;
 143   }
 144 
 145   // If OS doesn't support demand paging for large page memory, we need
 146   // to use reserve_memory_special() to reserve and pin the entire region.
 147   // If there is a backing file directory for this space then whether
 148   // large pages are allocated is up to the filesystem of the backing file.
 149   // So we ignore the UseLargePages flag in this case.
 150   bool special = large && !os::can_commit_large_page_memory();
 151   if (special && _fd_for_heap != -1) {
 152     special = false;
 153     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 154       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 155       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 156     }
 157   }
 158 
 159   char* base = NULL;
 160 
 161   if (special) {
 162 
 163     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 164 
 165     if (base != NULL) {
 166       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 167         // OS ignored requested address. Try different address.
 168         return;
 169       }
 170       // Check alignment constraints.
 171       assert((uintptr_t) base % alignment == 0,
 172              "Large pages returned a non-aligned address, base: "
 173              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 174              p2i(base), alignment);
 175       _special = true;
 176     } else {
 177       // failed; try to reserve regular memory below
 178       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 179                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 180         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 181       }
 182     }
 183   }
 184 
 185   if (base == NULL) {
 186     // Optimistically assume that the OSes returns an aligned base pointer.
 187     // When reserving a large address range, most OSes seem to align to at
 188     // least 64K.
 189 
 190     // If the memory was requested at a particular address, use
 191     // os::attempt_reserve_memory_at() to avoid over mapping something
 192     // important.  If available space is not detected, return NULL.
 193 
 194     if (requested_address != 0) {
 195       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 196       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 197         // OS ignored requested address. Try different address.
 198         base = NULL;
 199       }
 200     } else {
 201       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 202     }
 203 
 204     if (base == NULL) return;
 205 
 206     // Check alignment constraints
 207     if ((((size_t)base) & (alignment - 1)) != 0) {
 208       // Base not aligned, retry
 209       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 210 
 211       // Make sure that size is aligned
 212       size = align_up(size, alignment);
 213       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 214 
 215       if (requested_address != 0 &&
 216           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 217         // As a result of the alignment constraints, the allocated base differs
 218         // from the requested address. Return back to the caller who can
 219         // take remedial action (like try again without a requested address).
 220         assert(_base == NULL, "should be");
 221         return;
 222       }
 223     }
 224   }
 225   // Done
 226   _base = base;
 227   _size = size;
 228   _alignment = alignment;
 229   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 230   if (_fd_for_heap != -1) {
 231     _special = true;
 232   }
 233 }
 234 
 235 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 236                                         bool split, bool realloc) {
 237   assert(partition_size <= size(), "partition failed");
 238   if (split) {
 239     os::split_reserved_memory(base(), size(), partition_size, realloc);
 240   }
 241   ReservedSpace result(base(), partition_size, alignment, special(),
 242                        executable());
 243   return result;
 244 }
 245 
 246 
 247 ReservedSpace
 248 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 249   assert(partition_size <= size(), "partition failed");
 250   ReservedSpace result(base() + partition_size, size() - partition_size,
 251                        alignment, special(), executable());
 252   return result;
 253 }
 254 
 255 
 256 size_t ReservedSpace::page_align_size_up(size_t size) {
 257   return align_up(size, os::vm_page_size());
 258 }
 259 
 260 
 261 size_t ReservedSpace::page_align_size_down(size_t size) {
 262   return align_down(size, os::vm_page_size());
 263 }
 264 
 265 
 266 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 267   return align_up(size, os::vm_allocation_granularity());
 268 }
 269 
 270 
 271 void ReservedSpace::release() {
 272   if (is_reserved()) {
 273     char *real_base = _base - _noaccess_prefix;
 274     const size_t real_size = _size + _noaccess_prefix;
 275     if (special()) {
 276       if (_fd_for_heap != -1) {
 277         os::unmap_memory(real_base, real_size);
 278       } else {
 279         os::release_memory_special(real_base, real_size);
 280       }
 281     } else{
 282       os::release_memory(real_base, real_size);
 283     }
 284     _base = NULL;
 285     _size = 0;
 286     _noaccess_prefix = 0;
 287     _alignment = 0;
 288     _special = false;
 289     _executable = false;
 290   }
 291 }
 292 
 293 static size_t noaccess_prefix_size(size_t alignment) {
 294   return lcm(os::vm_page_size(), alignment);
 295 }
 296 
 297 void ReservedHeapSpace::establish_noaccess_prefix() {
 298   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 299   _noaccess_prefix = noaccess_prefix_size(_alignment);
 300 
 301   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 302     if (true
 303         WIN64_ONLY(&& !UseLargePages)
 304         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 305       // Protect memory at the base of the allocated region.
 306       // If special, the page was committed (only matters on windows)
 307       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 308         fatal("cannot protect protection page");
 309       }
 310       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 311                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 312                                  p2i(_base),
 313                                  _noaccess_prefix);
 314       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
 315     } else {
 316       CompressedOops::set_use_implicit_null_checks(false);
 317     }
 318   }
 319 
 320   _base += _noaccess_prefix;
 321   _size -= _noaccess_prefix;
 322   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 323 }
 324 
 325 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 326 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 327 // might still fulfill the wishes of the caller.
 328 // Assures the memory is aligned to 'alignment'.
 329 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 330 void ReservedHeapSpace::try_reserve_heap(size_t size,
 331                                          size_t alignment,
 332                                          bool large,
 333                                          char* requested_address) {
 334   if (_base != NULL) {
 335     // We tried before, but we didn't like the address delivered.
 336     release();
 337   }
 338 
 339   // If OS doesn't support demand paging for large page memory, we need
 340   // to use reserve_memory_special() to reserve and pin the entire region.
 341   // If there is a backing file directory for this space then whether
 342   // large pages are allocated is up to the filesystem of the backing file.
 343   // So we ignore the UseLargePages flag in this case.
 344   bool special = large && !os::can_commit_large_page_memory();
 345   if (special && _fd_for_heap != -1) {
 346     special = false;
 347     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 348                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 349       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 350     }
 351   }
 352   char* base = NULL;
 353 
 354   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 355                              " heap of size " SIZE_FORMAT_HEX,
 356                              p2i(requested_address),
 357                              size);
 358 
 359   if (special) {
 360     base = os::reserve_memory_special(size, alignment, requested_address, false);
 361 
 362     if (base != NULL) {
 363       // Check alignment constraints.
 364       assert((uintptr_t) base % alignment == 0,
 365              "Large pages returned a non-aligned address, base: "
 366              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 367              p2i(base), alignment);
 368       _special = true;
 369     }
 370   }
 371 
 372   if (base == NULL) {
 373     // Failed; try to reserve regular memory below
 374     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 375                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 376       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 377     }
 378 
 379     // Optimistically assume that the OSes returns an aligned base pointer.
 380     // When reserving a large address range, most OSes seem to align to at
 381     // least 64K.
 382 
 383     // If the memory was requested at a particular address, use
 384     // os::attempt_reserve_memory_at() to avoid over mapping something
 385     // important.  If available space is not detected, return NULL.
 386 
 387     if (requested_address != 0) {
 388       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 389     } else {
 390       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 391     }
 392   }
 393   if (base == NULL) { return; }
 394 
 395   // Done
 396   _base = base;
 397   _size = size;
 398   _alignment = alignment;
 399 
 400   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 401   if (_fd_for_heap != -1) {
 402     _special = true;
 403   }
 404 
 405   // Check alignment constraints
 406   if ((((size_t)base) & (alignment - 1)) != 0) {
 407     // Base not aligned, retry.
 408     release();
 409   }
 410 }
 411 
 412 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 413                                           char *lowest_start,
 414                                           size_t attach_point_alignment,
 415                                           char *aligned_heap_base_min_address,
 416                                           char *upper_bound,
 417                                           size_t size,
 418                                           size_t alignment,
 419                                           bool large) {
 420   const size_t attach_range = highest_start - lowest_start;
 421   // Cap num_attempts at possible number.
 422   // At least one is possible even for 0 sized attach range.
 423   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 424   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 425 
 426   const size_t stepsize = (attach_range == 0) ? // Only one try.
 427     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 428 
 429   // Try attach points from top to bottom.
 430   char* attach_point = highest_start;
 431   while (attach_point >= lowest_start  &&
 432          attach_point <= highest_start &&  // Avoid wrap around.
 433          ((_base == NULL) ||
 434           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 435     try_reserve_heap(size, alignment, large, attach_point);
 436     attach_point -= stepsize;
 437   }
 438 }
 439 
 440 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 441 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 442 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 443 
 444 // Helper for heap allocation. Returns an array with addresses
 445 // (OS-specific) which are suited for disjoint base mode. Array is
 446 // NULL terminated.
 447 static char** get_attach_addresses_for_disjoint_mode() {
 448   static uint64_t addresses[] = {
 449      2 * SIZE_32G,
 450      3 * SIZE_32G,
 451      4 * SIZE_32G,
 452      8 * SIZE_32G,
 453     10 * SIZE_32G,
 454      1 * SIZE_64K * SIZE_32G,
 455      2 * SIZE_64K * SIZE_32G,
 456      3 * SIZE_64K * SIZE_32G,
 457      4 * SIZE_64K * SIZE_32G,
 458     16 * SIZE_64K * SIZE_32G,
 459     32 * SIZE_64K * SIZE_32G,
 460     34 * SIZE_64K * SIZE_32G,
 461     0
 462   };
 463 
 464   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 465   // the array is sorted.
 466   uint i = 0;
 467   while (addresses[i] != 0 &&
 468          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 469     i++;
 470   }
 471   uint start = i;
 472 
 473   // Avoid more steps than requested.
 474   i = 0;
 475   while (addresses[start+i] != 0) {
 476     if (i == HeapSearchSteps) {
 477       addresses[start+i] = 0;
 478       break;
 479     }
 480     i++;
 481   }
 482 
 483   return (char**) &addresses[start];
 484 }
 485 
 486 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 487   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 488             "can not allocate compressed oop heap for this size");
 489   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 490 
 491   const size_t granularity = os::vm_allocation_granularity();
 492   assert((size & (granularity - 1)) == 0,
 493          "size not aligned to os::vm_allocation_granularity()");
 494   assert((alignment & (granularity - 1)) == 0,
 495          "alignment not aligned to os::vm_allocation_granularity()");
 496   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 497          "not a power of 2");
 498 
 499   // The necessary attach point alignment for generated wish addresses.
 500   // This is needed to increase the chance of attaching for mmap and shmat.
 501   const size_t os_attach_point_alignment =
 502     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 503     NOT_AIX(os::vm_allocation_granularity());
 504   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 505 
 506   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 507   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 508     noaccess_prefix_size(alignment) : 0;
 509 
 510   // Attempt to alloc at user-given address.
 511   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 512     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 513     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 514       release();
 515     }
 516   }
 517 
 518   // Keep heap at HeapBaseMinAddress.
 519   if (_base == NULL) {
 520 
 521     // Try to allocate the heap at addresses that allow efficient oop compression.
 522     // Different schemes are tried, in order of decreasing optimization potential.
 523     //
 524     // For this, try_reserve_heap() is called with the desired heap base addresses.
 525     // A call into the os layer to allocate at a given address can return memory
 526     // at a different address than requested.  Still, this might be memory at a useful
 527     // address. try_reserve_heap() always returns this allocated memory, as only here
 528     // the criteria for a good heap are checked.
 529 
 530     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 531     // Give it several tries from top of range to bottom.
 532     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 533 
 534       // Calc address range within we try to attach (range of possible start addresses).
 535       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 536       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 537       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 538                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 539     }
 540 
 541     // zerobased: Attempt to allocate in the lower 32G.
 542     // But leave room for the compressed class pointers, which is allocated above
 543     // the heap.
 544     char *zerobased_max = (char *)OopEncodingHeapMax;
 545     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 546     // For small heaps, save some space for compressed class pointer
 547     // space so it can be decoded with no base.
 548     if (UseCompressedClassPointers && !UseSharedSpaces &&
 549         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 550         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 551       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 552     }
 553 
 554     // Give it several tries from top of range to bottom.
 555     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 556         ((_base == NULL) ||                        // No previous try succeeded.
 557          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 558 
 559       // Calc address range within we try to attach (range of possible start addresses).
 560       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 561       // Need to be careful about size being guaranteed to be less
 562       // than UnscaledOopHeapMax due to type constraints.
 563       char *lowest_start = aligned_heap_base_min_address;
 564       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 565       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 566         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 567       }
 568       lowest_start = align_up(lowest_start, attach_point_alignment);
 569       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 570                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 571     }
 572 
 573     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 574     // implement null checks.
 575     noaccess_prefix = noaccess_prefix_size(alignment);
 576 
 577     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 578     char** addresses = get_attach_addresses_for_disjoint_mode();
 579     int i = 0;
 580     while (addresses[i] &&                                 // End of array not yet reached.
 581            ((_base == NULL) ||                             // No previous try succeeded.
 582             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 583              !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 584       char* const attach_point = addresses[i];
 585       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 586       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 587       i++;
 588     }
 589 
 590     // Last, desperate try without any placement.
 591     if (_base == NULL) {
 592       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 593       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 594     }
 595   }
 596 }
 597 
 598 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 599 
 600   if (size == 0) {
 601     return;
 602   }
 603 
 604   if (heap_allocation_directory != NULL) {
 605     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 606     if (_fd_for_heap == -1) {
 607       vm_exit_during_initialization(
 608         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 609     }
 610   }
 611 
 612   // Heap size should be aligned to alignment, too.
 613   guarantee(is_aligned(size, alignment), "set by caller");
 614 
 615   if (UseCompressedOops) {
 616     initialize_compressed_heap(size, alignment, large);
 617     if (_size > size) {
 618       // We allocated heap with noaccess prefix.
 619       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 620       // if we had to try at arbitrary address.
 621       establish_noaccess_prefix();
 622     }
 623   } else {
 624     initialize(size, alignment, large, NULL, false);
 625   }
 626 
 627   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 628          "area must be distinguishable from marks for mark-sweep");
 629   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 630          "area must be distinguishable from marks for mark-sweep");
 631 
 632   if (base() != NULL) {
 633     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 634   }
 635 
 636   if (_fd_for_heap != -1) {
 637     os::close(_fd_for_heap);
 638   }
 639 }
 640 
 641 MemRegion ReservedHeapSpace::region() const {
 642   return MemRegion((HeapWord*)base(), (HeapWord*)end());
 643 }
 644 
 645 // Reserve space for code segment.  Same as Java heap only we mark this as
 646 // executable.
 647 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 648                                      size_t rs_align,
 649                                      bool large) :
 650   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 651   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 652 }
 653 
 654 // VirtualSpace
 655 
 656 VirtualSpace::VirtualSpace() {
 657   _low_boundary           = NULL;
 658   _high_boundary          = NULL;
 659   _low                    = NULL;
 660   _high                   = NULL;
 661   _lower_high             = NULL;
 662   _middle_high            = NULL;
 663   _upper_high             = NULL;
 664   _lower_high_boundary    = NULL;
 665   _middle_high_boundary   = NULL;
 666   _upper_high_boundary    = NULL;
 667   _lower_alignment        = 0;
 668   _middle_alignment       = 0;
 669   _upper_alignment        = 0;
 670   _special                = false;
 671   _executable             = false;
 672 }
 673 
 674 
 675 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 676   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 677   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 678 }
 679 
 680 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 681   if(!rs.is_reserved()) return false;  // allocation failed.
 682   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 683   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 684 
 685   _low_boundary  = rs.base();
 686   _high_boundary = low_boundary() + rs.size();
 687 
 688   _low = low_boundary();
 689   _high = low();
 690 
 691   _special = rs.special();
 692   _executable = rs.executable();
 693 
 694   // When a VirtualSpace begins life at a large size, make all future expansion
 695   // and shrinking occur aligned to a granularity of large pages.  This avoids
 696   // fragmentation of physical addresses that inhibits the use of large pages
 697   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 698   // page size, the only spaces that get handled this way are codecache and
 699   // the heap itself, both of which provide a substantial performance
 700   // boost in many benchmarks when covered by large pages.
 701   //
 702   // No attempt is made to force large page alignment at the very top and
 703   // bottom of the space if they are not aligned so already.
 704   _lower_alignment  = os::vm_page_size();
 705   _middle_alignment = max_commit_granularity;
 706   _upper_alignment  = os::vm_page_size();
 707 
 708   // End of each region
 709   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 710   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 711   _upper_high_boundary = high_boundary();
 712 
 713   // High address of each region
 714   _lower_high = low_boundary();
 715   _middle_high = lower_high_boundary();
 716   _upper_high = middle_high_boundary();
 717 
 718   // commit to initial size
 719   if (committed_size > 0) {
 720     if (!expand_by(committed_size)) {
 721       return false;
 722     }
 723   }
 724   return true;
 725 }
 726 
 727 
 728 VirtualSpace::~VirtualSpace() {
 729   release();
 730 }
 731 
 732 
 733 void VirtualSpace::release() {
 734   // This does not release memory it reserved.
 735   // Caller must release via rs.release();
 736   _low_boundary           = NULL;
 737   _high_boundary          = NULL;
 738   _low                    = NULL;
 739   _high                   = NULL;
 740   _lower_high             = NULL;
 741   _middle_high            = NULL;
 742   _upper_high             = NULL;
 743   _lower_high_boundary    = NULL;
 744   _middle_high_boundary   = NULL;
 745   _upper_high_boundary    = NULL;
 746   _lower_alignment        = 0;
 747   _middle_alignment       = 0;
 748   _upper_alignment        = 0;
 749   _special                = false;
 750   _executable             = false;
 751 }
 752 
 753 
 754 size_t VirtualSpace::committed_size() const {
 755   return pointer_delta(high(), low(), sizeof(char));
 756 }
 757 
 758 
 759 size_t VirtualSpace::reserved_size() const {
 760   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 761 }
 762 
 763 
 764 size_t VirtualSpace::uncommitted_size()  const {
 765   return reserved_size() - committed_size();
 766 }
 767 
 768 size_t VirtualSpace::actual_committed_size() const {
 769   // Special VirtualSpaces commit all reserved space up front.
 770   if (special()) {
 771     return reserved_size();
 772   }
 773 
 774   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 775   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 776   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 777 
 778 #ifdef ASSERT
 779   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 780   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 781   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 782 
 783   if (committed_high > 0) {
 784     assert(committed_low == lower, "Must be");
 785     assert(committed_middle == middle, "Must be");
 786   }
 787 
 788   if (committed_middle > 0) {
 789     assert(committed_low == lower, "Must be");
 790   }
 791   if (committed_middle < middle) {
 792     assert(committed_high == 0, "Must be");
 793   }
 794 
 795   if (committed_low < lower) {
 796     assert(committed_high == 0, "Must be");
 797     assert(committed_middle == 0, "Must be");
 798   }
 799 #endif
 800 
 801   return committed_low + committed_middle + committed_high;
 802 }
 803 
 804 
 805 bool VirtualSpace::contains(const void* p) const {
 806   return low() <= (const char*) p && (const char*) p < high();
 807 }
 808 
 809 static void pretouch_expanded_memory(void* start, void* end) {
 810   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 811   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 812 
 813   os::pretouch_memory(start, end);
 814 }
 815 
 816 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 817   if (os::commit_memory(start, size, alignment, executable)) {
 818     if (pre_touch || AlwaysPreTouch) {
 819       pretouch_expanded_memory(start, start + size);
 820     }
 821     return true;
 822   }
 823 
 824   debug_only(warning(
 825       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 826       " size=" SIZE_FORMAT ", executable=%d) failed",
 827       p2i(start), p2i(start + size), size, executable);)
 828 
 829   return false;
 830 }
 831 
 832 /*
 833    First we need to determine if a particular virtual space is using large
 834    pages.  This is done at the initialize function and only virtual spaces
 835    that are larger than LargePageSizeInBytes use large pages.  Once we
 836    have determined this, all expand_by and shrink_by calls must grow and
 837    shrink by large page size chunks.  If a particular request
 838    is within the current large page, the call to commit and uncommit memory
 839    can be ignored.  In the case that the low and high boundaries of this
 840    space is not large page aligned, the pages leading to the first large
 841    page address and the pages after the last large page address must be
 842    allocated with default pages.
 843 */
 844 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 845   if (uncommitted_size() < bytes) {
 846     return false;
 847   }
 848 
 849   if (special()) {
 850     // don't commit memory if the entire space is pinned in memory
 851     _high += bytes;
 852     return true;
 853   }
 854 
 855   char* previous_high = high();
 856   char* unaligned_new_high = high() + bytes;
 857   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 858 
 859   // Calculate where the new high for each of the regions should be.  If
 860   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 861   // then the unaligned lower and upper new highs would be the
 862   // lower_high() and upper_high() respectively.
 863   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 864   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 865   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 866 
 867   // Align the new highs based on the regions alignment.  lower and upper
 868   // alignment will always be default page size.  middle alignment will be
 869   // LargePageSizeInBytes if the actual size of the virtual space is in
 870   // fact larger than LargePageSizeInBytes.
 871   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 872   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 873   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 874 
 875   // Determine which regions need to grow in this expand_by call.
 876   // If you are growing in the lower region, high() must be in that
 877   // region so calculate the size based on high().  For the middle and
 878   // upper regions, determine the starting point of growth based on the
 879   // location of high().  By getting the MAX of the region's low address
 880   // (or the previous region's high address) and high(), we can tell if it
 881   // is an intra or inter region growth.
 882   size_t lower_needs = 0;
 883   if (aligned_lower_new_high > lower_high()) {
 884     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 885   }
 886   size_t middle_needs = 0;
 887   if (aligned_middle_new_high > middle_high()) {
 888     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 889   }
 890   size_t upper_needs = 0;
 891   if (aligned_upper_new_high > upper_high()) {
 892     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 893   }
 894 
 895   // Check contiguity.
 896   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 897          "high address must be contained within the region");
 898   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 899          "high address must be contained within the region");
 900   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 901          "high address must be contained within the region");
 902 
 903   // Commit regions
 904   if (lower_needs > 0) {
 905     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 906     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 907       return false;
 908     }
 909     _lower_high += lower_needs;
 910   }
 911 
 912   if (middle_needs > 0) {
 913     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 914     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 915       return false;
 916     }
 917     _middle_high += middle_needs;
 918   }
 919 
 920   if (upper_needs > 0) {
 921     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 922     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 923       return false;
 924     }
 925     _upper_high += upper_needs;
 926   }
 927 
 928   _high += bytes;
 929   return true;
 930 }
 931 
 932 // A page is uncommitted if the contents of the entire page is deemed unusable.
 933 // Continue to decrement the high() pointer until it reaches a page boundary
 934 // in which case that particular page can now be uncommitted.
 935 void VirtualSpace::shrink_by(size_t size) {
 936   if (committed_size() < size)
 937     fatal("Cannot shrink virtual space to negative size");
 938 
 939   if (special()) {
 940     // don't uncommit if the entire space is pinned in memory
 941     _high -= size;
 942     return;
 943   }
 944 
 945   char* unaligned_new_high = high() - size;
 946   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 947 
 948   // Calculate new unaligned address
 949   char* unaligned_upper_new_high =
 950     MAX2(unaligned_new_high, middle_high_boundary());
 951   char* unaligned_middle_new_high =
 952     MAX2(unaligned_new_high, lower_high_boundary());
 953   char* unaligned_lower_new_high =
 954     MAX2(unaligned_new_high, low_boundary());
 955 
 956   // Align address to region's alignment
 957   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 958   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 959   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 960 
 961   // Determine which regions need to shrink
 962   size_t upper_needs = 0;
 963   if (aligned_upper_new_high < upper_high()) {
 964     upper_needs =
 965       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 966   }
 967   size_t middle_needs = 0;
 968   if (aligned_middle_new_high < middle_high()) {
 969     middle_needs =
 970       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 971   }
 972   size_t lower_needs = 0;
 973   if (aligned_lower_new_high < lower_high()) {
 974     lower_needs =
 975       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 976   }
 977 
 978   // Check contiguity.
 979   assert(middle_high_boundary() <= upper_high() &&
 980          upper_high() <= upper_high_boundary(),
 981          "high address must be contained within the region");
 982   assert(lower_high_boundary() <= middle_high() &&
 983          middle_high() <= middle_high_boundary(),
 984          "high address must be contained within the region");
 985   assert(low_boundary() <= lower_high() &&
 986          lower_high() <= lower_high_boundary(),
 987          "high address must be contained within the region");
 988 
 989   // Uncommit
 990   if (upper_needs > 0) {
 991     assert(middle_high_boundary() <= aligned_upper_new_high &&
 992            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 993            "must not shrink beyond region");
 994     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 995       debug_only(warning("os::uncommit_memory failed"));
 996       return;
 997     } else {
 998       _upper_high -= upper_needs;
 999     }
1000   }
1001   if (middle_needs > 0) {
1002     assert(lower_high_boundary() <= aligned_middle_new_high &&
1003            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1004            "must not shrink beyond region");
1005     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1006       debug_only(warning("os::uncommit_memory failed"));
1007       return;
1008     } else {
1009       _middle_high -= middle_needs;
1010     }
1011   }
1012   if (lower_needs > 0) {
1013     assert(low_boundary() <= aligned_lower_new_high &&
1014            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1015            "must not shrink beyond region");
1016     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1017       debug_only(warning("os::uncommit_memory failed"));
1018       return;
1019     } else {
1020       _lower_high -= lower_needs;
1021     }
1022   }
1023 
1024   _high -= size;
1025 }
1026 
1027 #ifndef PRODUCT
1028 void VirtualSpace::check_for_contiguity() {
1029   // Check contiguity.
1030   assert(low_boundary() <= lower_high() &&
1031          lower_high() <= lower_high_boundary(),
1032          "high address must be contained within the region");
1033   assert(lower_high_boundary() <= middle_high() &&
1034          middle_high() <= middle_high_boundary(),
1035          "high address must be contained within the region");
1036   assert(middle_high_boundary() <= upper_high() &&
1037          upper_high() <= upper_high_boundary(),
1038          "high address must be contained within the region");
1039   assert(low() >= low_boundary(), "low");
1040   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1041   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1042   assert(high() <= upper_high(), "upper high");
1043 }
1044 
1045 void VirtualSpace::print_on(outputStream* out) {
1046   out->print   ("Virtual space:");
1047   if (special()) out->print(" (pinned in memory)");
1048   out->cr();
1049   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1050   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1051   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1052   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1053 }
1054 
1055 void VirtualSpace::print() {
1056   print_on(tty);
1057 }
1058 
1059 /////////////// Unit tests ///////////////
1060 
1061 #ifndef PRODUCT
1062 
1063 class TestReservedSpace : AllStatic {
1064  public:
1065   static void small_page_write(void* addr, size_t size) {
1066     size_t page_size = os::vm_page_size();
1067 
1068     char* end = (char*)addr + size;
1069     for (char* p = (char*)addr; p < end; p += page_size) {
1070       *p = 1;
1071     }
1072   }
1073 
1074   static void release_memory_for_test(ReservedSpace rs) {
1075     if (rs.special()) {
1076       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1077     } else {
1078       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1079     }
1080   }
1081 
1082   static void test_reserved_space1(size_t size, size_t alignment) {
1083     assert(is_aligned(size, alignment), "Incorrect input parameters");
1084 
1085     ReservedSpace rs(size,          // size
1086                      alignment,     // alignment
1087                      UseLargePages, // large
1088                      (char *)NULL); // requested_address
1089 
1090     assert(rs.base() != NULL, "Must be");
1091     assert(rs.size() == size, "Must be");
1092 
1093     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1094     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1095 
1096     if (rs.special()) {
1097       small_page_write(rs.base(), size);
1098     }
1099 
1100     release_memory_for_test(rs);
1101   }
1102 
1103   static void test_reserved_space2(size_t size) {
1104     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1105 
1106     ReservedSpace rs(size);
1107 
1108     assert(rs.base() != NULL, "Must be");
1109     assert(rs.size() == size, "Must be");
1110 
1111     if (rs.special()) {
1112       small_page_write(rs.base(), size);
1113     }
1114 
1115     release_memory_for_test(rs);
1116   }
1117 
1118   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1119     if (size < alignment) {
1120       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1121       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1122       return;
1123     }
1124 
1125     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1126     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1127 
1128     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1129 
1130     ReservedSpace rs(size, alignment, large, false);
1131 
1132     assert(rs.base() != NULL, "Must be");
1133     assert(rs.size() == size, "Must be");
1134 
1135     if (rs.special()) {
1136       small_page_write(rs.base(), size);
1137     }
1138 
1139     release_memory_for_test(rs);
1140   }
1141 
1142 
1143   static void test_reserved_space1() {
1144     size_t size = 2 * 1024 * 1024;
1145     size_t ag   = os::vm_allocation_granularity();
1146 
1147     test_reserved_space1(size,      ag);
1148     test_reserved_space1(size * 2,  ag);
1149     test_reserved_space1(size * 10, ag);
1150   }
1151 
1152   static void test_reserved_space2() {
1153     size_t size = 2 * 1024 * 1024;
1154     size_t ag = os::vm_allocation_granularity();
1155 
1156     test_reserved_space2(size * 1);
1157     test_reserved_space2(size * 2);
1158     test_reserved_space2(size * 10);
1159     test_reserved_space2(ag);
1160     test_reserved_space2(size - ag);
1161     test_reserved_space2(size);
1162     test_reserved_space2(size + ag);
1163     test_reserved_space2(size * 2);
1164     test_reserved_space2(size * 2 - ag);
1165     test_reserved_space2(size * 2 + ag);
1166     test_reserved_space2(size * 3);
1167     test_reserved_space2(size * 3 - ag);
1168     test_reserved_space2(size * 3 + ag);
1169     test_reserved_space2(size * 10);
1170     test_reserved_space2(size * 10 + size / 2);
1171   }
1172 
1173   static void test_reserved_space3() {
1174     size_t ag = os::vm_allocation_granularity();
1175 
1176     test_reserved_space3(ag,      ag    , false);
1177     test_reserved_space3(ag * 2,  ag    , false);
1178     test_reserved_space3(ag * 3,  ag    , false);
1179     test_reserved_space3(ag * 2,  ag * 2, false);
1180     test_reserved_space3(ag * 4,  ag * 2, false);
1181     test_reserved_space3(ag * 8,  ag * 2, false);
1182     test_reserved_space3(ag * 4,  ag * 4, false);
1183     test_reserved_space3(ag * 8,  ag * 4, false);
1184     test_reserved_space3(ag * 16, ag * 4, false);
1185 
1186     if (UseLargePages) {
1187       size_t lp = os::large_page_size();
1188 
1189       // Without large pages
1190       test_reserved_space3(lp,     ag * 4, false);
1191       test_reserved_space3(lp * 2, ag * 4, false);
1192       test_reserved_space3(lp * 4, ag * 4, false);
1193       test_reserved_space3(lp,     lp    , false);
1194       test_reserved_space3(lp * 2, lp    , false);
1195       test_reserved_space3(lp * 3, lp    , false);
1196       test_reserved_space3(lp * 2, lp * 2, false);
1197       test_reserved_space3(lp * 4, lp * 2, false);
1198       test_reserved_space3(lp * 8, lp * 2, false);
1199 
1200       // With large pages
1201       test_reserved_space3(lp, ag * 4    , true);
1202       test_reserved_space3(lp * 2, ag * 4, true);
1203       test_reserved_space3(lp * 4, ag * 4, true);
1204       test_reserved_space3(lp, lp        , true);
1205       test_reserved_space3(lp * 2, lp    , true);
1206       test_reserved_space3(lp * 3, lp    , true);
1207       test_reserved_space3(lp * 2, lp * 2, true);
1208       test_reserved_space3(lp * 4, lp * 2, true);
1209       test_reserved_space3(lp * 8, lp * 2, true);
1210     }
1211   }
1212 
1213   static void test_reserved_space() {
1214     test_reserved_space1();
1215     test_reserved_space2();
1216     test_reserved_space3();
1217   }
1218 };
1219 
1220 void TestReservedSpace_test() {
1221   TestReservedSpace::test_reserved_space();
1222 }
1223 
1224 #define assert_equals(actual, expected)  \
1225   assert(actual == expected,             \
1226          "Got " SIZE_FORMAT " expected " \
1227          SIZE_FORMAT, actual, expected);
1228 
1229 #define assert_ge(value1, value2)                  \
1230   assert(value1 >= value2,                         \
1231          "'" #value1 "': " SIZE_FORMAT " '"        \
1232          #value2 "': " SIZE_FORMAT, value1, value2);
1233 
1234 #define assert_lt(value1, value2)                  \
1235   assert(value1 < value2,                          \
1236          "'" #value1 "': " SIZE_FORMAT " '"        \
1237          #value2 "': " SIZE_FORMAT, value1, value2);
1238 
1239 
1240 class TestVirtualSpace : AllStatic {
1241   enum TestLargePages {
1242     Default,
1243     Disable,
1244     Reserve,
1245     Commit
1246   };
1247 
1248   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1249     switch(mode) {
1250     default:
1251     case Default:
1252     case Reserve:
1253       return ReservedSpace(reserve_size_aligned);
1254     case Disable:
1255     case Commit:
1256       return ReservedSpace(reserve_size_aligned,
1257                            os::vm_allocation_granularity(),
1258                            /* large */ false, /* exec */ false);
1259     }
1260   }
1261 
1262   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1263     switch(mode) {
1264     default:
1265     case Default:
1266     case Reserve:
1267       return vs.initialize(rs, 0);
1268     case Disable:
1269       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1270     case Commit:
1271       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1272     }
1273   }
1274 
1275  public:
1276   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1277                                                         TestLargePages mode = Default) {
1278     size_t granularity = os::vm_allocation_granularity();
1279     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1280 
1281     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1282 
1283     assert(reserved.is_reserved(), "Must be");
1284 
1285     VirtualSpace vs;
1286     bool initialized = initialize_virtual_space(vs, reserved, mode);
1287     assert(initialized, "Failed to initialize VirtualSpace");
1288 
1289     vs.expand_by(commit_size, false);
1290 
1291     if (vs.special()) {
1292       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1293     } else {
1294       assert_ge(vs.actual_committed_size(), commit_size);
1295       // Approximate the commit granularity.
1296       // Make sure that we don't commit using large pages
1297       // if large pages has been disabled for this VirtualSpace.
1298       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1299                                    os::vm_page_size() : os::large_page_size();
1300       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1301     }
1302 
1303     reserved.release();
1304   }
1305 
1306   static void test_virtual_space_actual_committed_space_one_large_page() {
1307     if (!UseLargePages) {
1308       return;
1309     }
1310 
1311     size_t large_page_size = os::large_page_size();
1312 
1313     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1314 
1315     assert(reserved.is_reserved(), "Must be");
1316 
1317     VirtualSpace vs;
1318     bool initialized = vs.initialize(reserved, 0);
1319     assert(initialized, "Failed to initialize VirtualSpace");
1320 
1321     vs.expand_by(large_page_size, false);
1322 
1323     assert_equals(vs.actual_committed_size(), large_page_size);
1324 
1325     reserved.release();
1326   }
1327 
1328   static void test_virtual_space_actual_committed_space() {
1329     test_virtual_space_actual_committed_space(4 * K, 0);
1330     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1331     test_virtual_space_actual_committed_space(8 * K, 0);
1332     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1333     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1334     test_virtual_space_actual_committed_space(12 * K, 0);
1335     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1336     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1337     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1338     test_virtual_space_actual_committed_space(64 * K, 0);
1339     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1340     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1341     test_virtual_space_actual_committed_space(2 * M, 0);
1342     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1343     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1344     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1345     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1346     test_virtual_space_actual_committed_space(10 * M, 0);
1347     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1348     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1349     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1350     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1351     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1352     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1353   }
1354 
1355   static void test_virtual_space_disable_large_pages() {
1356     if (!UseLargePages) {
1357       return;
1358     }
1359     // These test cases verify that if we force VirtualSpace to disable large pages
1360     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1361     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1362     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1363     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1364     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1365     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1366     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1367 
1368     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1369     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1370     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1371     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1372     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1373     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1374     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1375 
1376     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1377     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1378     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1379     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1380     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1381     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1382     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1383   }
1384 
1385   static void test_virtual_space() {
1386     test_virtual_space_actual_committed_space();
1387     test_virtual_space_actual_committed_space_one_large_page();
1388     test_virtual_space_disable_large_pages();
1389   }
1390 };
1391 
1392 void TestVirtualSpace_test() {
1393   TestVirtualSpace::test_virtual_space();
1394 }
1395 
1396 #endif // PRODUCT
1397 
1398 #endif