1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/compressedOops.hpp"
  30 #include "oops/markWord.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/os.inline.hpp"
  33 #include "services/memTracker.hpp"
  34 #include "utilities/align.hpp"
  35 #include "utilities/powerOfTwo.hpp"
  36 
  37 // ReservedSpace
  38 
  39 // Dummy constructor
  40 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  41     _alignment(0), _special(false), _fd_for_heap(-1), _executable(false) {
  42 }
  43 
  44 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  45   bool has_preferred_page_size = preferred_page_size != 0;
  46   // Want to use large pages where possible and pad with small pages.
  47   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  48   bool large_pages = page_size != (size_t)os::vm_page_size();
  49   size_t alignment;
  50   if (large_pages && has_preferred_page_size) {
  51     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  52     // ReservedSpace initialization requires size to be aligned to the given
  53     // alignment. Align the size up.
  54     size = align_up(size, alignment);
  55   } else {
  56     // Don't force the alignment to be large page aligned,
  57     // since that will waste memory.
  58     alignment = os::vm_allocation_granularity();
  59   }
  60   initialize(size, alignment, large_pages, NULL, false);
  61 }
  62 
  63 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  64                              bool large,
  65                              char* requested_address) : _fd_for_heap(-1) {
  66   initialize(size, alignment, large, requested_address, false);
  67 }
  68 
  69 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
  70                              bool special, bool executable) : _fd_for_heap(-1) {
  71   assert((size % os::vm_allocation_granularity()) == 0,
  72          "size not allocation aligned");
  73   _base = base;
  74   _size = size;
  75   _alignment = alignment;
  76   _noaccess_prefix = 0;
  77   _special = special;
  78   _executable = executable;
  79 }
  80 
  81 // Helper method
  82 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  83   if (is_file_mapped) {
  84     if (!os::unmap_memory(base, size)) {
  85       fatal("os::unmap_memory failed");
  86     }
  87   } else if (!os::release_memory(base, size)) {
  88     fatal("os::release_memory failed");
  89   }
  90 }
  91 
  92 // Helper method.
  93 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  94                                            const size_t size, bool special, bool is_file_mapped = false)
  95 {
  96   if (base == requested_address || requested_address == NULL)
  97     return false; // did not fail
  98 
  99   if (base != NULL) {
 100     // Different reserve address may be acceptable in other cases
 101     // but for compressed oops heap should be at requested address.
 102     assert(UseCompressedOops, "currently requested address used only for compressed oops");
 103     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
 104     // OS ignored requested address. Try different address.
 105     if (special) {
 106       if (!os::release_memory_special(base, size)) {
 107         fatal("os::release_memory_special failed");
 108       }
 109     } else {
 110       unmap_or_release_memory(base, size, is_file_mapped);
 111     }
 112   }
 113   return true;
 114 }
 115 
 116 // Helper method.
 117 static bool should_use_reserve_memory_special(bool large, 
 118                                               int fd_for_heap, 
 119                                               const char* fallback_log_message)
 120 {
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   // If there is a backing file directory for this space then whether
 124   // large pages are allocated is up to the filesystem of the backing file.
 125   // So we ignore the UseLargePages flag in this case.
 126   bool special = large && !os::can_commit_large_page_memory();
 127   if (special && fd_for_heap != -1) {
 128     special = false;
 129     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 130                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 131       log_debug(gc, heap)("%s", fallback_log_message);
 132     }
 133   }
 134   return special;
 135 }
 136 
 137 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 138                                char* requested_address,
 139                                bool executable) {
 140   const size_t granularity = os::vm_allocation_granularity();
 141   assert((size & (granularity - 1)) == 0,
 142          "size not aligned to os::vm_allocation_granularity()");
 143   assert((alignment & (granularity - 1)) == 0,
 144          "alignment not aligned to os::vm_allocation_granularity()");
 145   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 146          "not a power of 2");
 147 
 148   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 149 
 150   _base = NULL;
 151   _size = 0;
 152   _special = false;
 153   _executable = executable;
 154   _alignment = 0;
 155   _noaccess_prefix = 0;
 156   if (size == 0) {
 157     return;
 158   }
 159 
 160   bool special = should_use_reserve_memory_special(large, _fd_for_heap, 
 161                                                    "Ignoring UseLargePages since large page "
 162                                                    "support is up to the file system of the "
 163                                                    "backing file for Java heap");
 164 
 165   char* base = NULL;
 166 
 167   if (special) {
 168 
 169     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 170 
 171     if (base != NULL) {
 172       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 173         // OS ignored requested address. Try different address.
 174         return;
 175       }
 176       // Check alignment constraints.
 177       assert((uintptr_t) base % alignment == 0,
 178              "Large pages returned a non-aligned address, base: "
 179              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 180              p2i(base), alignment);
 181       _special = true;
 182     } else {
 183       // failed; try to reserve regular memory below
 184       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 185                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 186         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 187       }
 188     }
 189   }
 190 
 191   if (base == NULL) {
 192     // Optimistically assume that the OSes returns an aligned base pointer.
 193     // When reserving a large address range, most OSes seem to align to at
 194     // least 64K.
 195 
 196     // If the memory was requested at a particular address, use
 197     // os::attempt_reserve_memory_at() to avoid over mapping something
 198     // important.  If available space is not detected, return NULL.
 199 
 200     if (requested_address != 0) {
 201       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 202       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 203         // OS ignored requested address. Try different address.
 204         base = NULL;
 205       }
 206     } else {
 207       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 208     }
 209 
 210     if (base == NULL) return;
 211 
 212     // Check alignment constraints
 213     if ((((size_t)base) & (alignment - 1)) != 0) {
 214       // Base not aligned, retry
 215       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 216 
 217       // Make sure that size is aligned
 218       size = align_up(size, alignment);
 219       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 220 
 221       if (requested_address != 0 &&
 222           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 223         // As a result of the alignment constraints, the allocated base differs
 224         // from the requested address. Return back to the caller who can
 225         // take remedial action (like try again without a requested address).
 226         assert(_base == NULL, "should be");
 227         return;
 228       }
 229     }
 230   }
 231   // Done
 232   _base = base;
 233   _size = size;
 234   _alignment = alignment;
 235   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 236   if (_fd_for_heap != -1) {
 237     _special = true;
 238   }
 239 }
 240 
 241 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment, bool split) {
 242   assert(partition_size <= size(), "partition failed");
 243   if (split && partition_size > 0 && partition_size < size()) {
 244     os::split_reserved_memory(base(), size(), partition_size);
 245   }
 246   ReservedSpace result(base(), partition_size, alignment, special(),
 247                        executable());
 248   return result;
 249 }
 250 
 251 
 252 ReservedSpace
 253 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 254   assert(partition_size <= size(), "partition failed");
 255   ReservedSpace result(base() + partition_size, size() - partition_size,
 256                        alignment, special(), executable());
 257   return result;
 258 }
 259 
 260 
 261 size_t ReservedSpace::page_align_size_up(size_t size) {
 262   return align_up(size, os::vm_page_size());
 263 }
 264 
 265 
 266 size_t ReservedSpace::page_align_size_down(size_t size) {
 267   return align_down(size, os::vm_page_size());
 268 }
 269 
 270 
 271 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 272   return align_up(size, os::vm_allocation_granularity());
 273 }
 274 
 275 
 276 void ReservedSpace::release() {
 277   if (is_reserved()) {
 278     char *real_base = _base - _noaccess_prefix;
 279     const size_t real_size = _size + _noaccess_prefix;
 280     if (special()) {
 281       if (_fd_for_heap != -1) {
 282         os::unmap_memory(real_base, real_size);
 283       } else {
 284         os::release_memory_special(real_base, real_size);
 285       }
 286     } else{
 287       os::release_memory(real_base, real_size);
 288     }
 289     _base = NULL;
 290     _size = 0;
 291     _noaccess_prefix = 0;
 292     _alignment = 0;
 293     _special = false;
 294     _executable = false;
 295   }
 296 }
 297 
 298 static size_t noaccess_prefix_size(size_t alignment) {
 299   return lcm(os::vm_page_size(), alignment);
 300 }
 301 
 302 void ReservedHeapSpace::establish_noaccess_prefix() {
 303   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(CompressedOops::use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       CompressedOops::set_use_implicit_null_checks(false);
 322     }
 323   }
 324 
 325   _base += _noaccess_prefix;
 326   _size -= _noaccess_prefix;
 327   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 328 }
 329 
 330 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 331 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 332 // might still fulfill the wishes of the caller.
 333 // Assures the memory is aligned to 'alignment'.
 334 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 335 void ReservedHeapSpace::try_reserve_heap(size_t size,
 336                                          size_t alignment,
 337                                          bool large,
 338                                          char* requested_address) {
 339   if (_base != NULL) {
 340     // We tried before, but we didn't like the address delivered.
 341     release();
 342   }
 343 
 344   bool special = should_use_reserve_memory_special(large, _fd_for_heap, 
 345                                                    "Cannot allocate large pages for Java Heap "
 346                                                    "when AllocateHeapAt option is set.");
 347   char* base = NULL;
 348 
 349   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 350                              " heap of size " SIZE_FORMAT_HEX,
 351                              p2i(requested_address),
 352                              size);
 353 
 354   if (special) {
 355     base = os::reserve_memory_special(size, alignment, requested_address, false);
 356 
 357     if (base != NULL) {
 358       // Check alignment constraints.
 359       assert((uintptr_t) base % alignment == 0,
 360              "Large pages returned a non-aligned address, base: "
 361              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 362              p2i(base), alignment);
 363       _special = true;
 364     }
 365   }
 366 
 367   if (base == NULL) {
 368     // Failed; try to reserve regular memory below
 369     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 370                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 371       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 372     }
 373 
 374     // Optimistically assume that the OSes returns an aligned base pointer.
 375     // When reserving a large address range, most OSes seem to align to at
 376     // least 64K.
 377 
 378     // If the memory was requested at a particular address, use
 379     // os::attempt_reserve_memory_at() to avoid over mapping something
 380     // important.  If available space is not detected, return NULL.
 381 
 382     if (requested_address != 0) {
 383       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 384     } else {
 385       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 386     }
 387   }
 388   if (base == NULL) { return; }
 389 
 390   // Done
 391   _base = base;
 392   _size = size;
 393   _alignment = alignment;
 394 
 395   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 396   if (_fd_for_heap != -1) {
 397     _special = true;
 398   }
 399 
 400   // Check alignment constraints
 401   if ((((size_t)base) & (alignment - 1)) != 0) {
 402     // Base not aligned, retry.
 403     release();
 404   }
 405 }
 406 
 407 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 408                                           char *lowest_start,
 409                                           size_t attach_point_alignment,
 410                                           char *aligned_heap_base_min_address,
 411                                           char *upper_bound,
 412                                           size_t size,
 413                                           size_t alignment,
 414                                           bool large) {
 415   const size_t attach_range = highest_start - lowest_start;
 416   // Cap num_attempts at possible number.
 417   // At least one is possible even for 0 sized attach range.
 418   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 419   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 420 
 421   const size_t stepsize = (attach_range == 0) ? // Only one try.
 422     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 423 
 424   // Try attach points from top to bottom.
 425   char* attach_point = highest_start;
 426   while (attach_point >= lowest_start  &&
 427          attach_point <= highest_start &&  // Avoid wrap around.
 428          ((_base == NULL) ||
 429           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 430     try_reserve_heap(size, alignment, large, attach_point);
 431     attach_point -= stepsize;
 432   }
 433 }
 434 
 435 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 436 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 437 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 438 
 439 // Helper for heap allocation. Returns an array with addresses
 440 // (OS-specific) which are suited for disjoint base mode. Array is
 441 // NULL terminated.
 442 static char** get_attach_addresses_for_disjoint_mode() {
 443   static uint64_t addresses[] = {
 444      2 * SIZE_32G,
 445      3 * SIZE_32G,
 446      4 * SIZE_32G,
 447      8 * SIZE_32G,
 448     10 * SIZE_32G,
 449      1 * SIZE_64K * SIZE_32G,
 450      2 * SIZE_64K * SIZE_32G,
 451      3 * SIZE_64K * SIZE_32G,
 452      4 * SIZE_64K * SIZE_32G,
 453     16 * SIZE_64K * SIZE_32G,
 454     32 * SIZE_64K * SIZE_32G,
 455     34 * SIZE_64K * SIZE_32G,
 456     0
 457   };
 458 
 459   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 460   // the array is sorted.
 461   uint i = 0;
 462   while (addresses[i] != 0 &&
 463          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 464     i++;
 465   }
 466   uint start = i;
 467 
 468   // Avoid more steps than requested.
 469   i = 0;
 470   while (addresses[start+i] != 0) {
 471     if (i == HeapSearchSteps) {
 472       addresses[start+i] = 0;
 473       break;
 474     }
 475     i++;
 476   }
 477 
 478   return (char**) &addresses[start];
 479 }
 480 
 481 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 482   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 483             "can not allocate compressed oop heap for this size");
 484   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 485 
 486   const size_t granularity = os::vm_allocation_granularity();
 487   assert((size & (granularity - 1)) == 0,
 488          "size not aligned to os::vm_allocation_granularity()");
 489   assert((alignment & (granularity - 1)) == 0,
 490          "alignment not aligned to os::vm_allocation_granularity()");
 491   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 492          "not a power of 2");
 493 
 494   // The necessary attach point alignment for generated wish addresses.
 495   // This is needed to increase the chance of attaching for mmap and shmat.
 496   const size_t os_attach_point_alignment =
 497     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 498     NOT_AIX(os::vm_allocation_granularity());
 499   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 500 
 501   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 502   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 503     noaccess_prefix_size(alignment) : 0;
 504 
 505   // Attempt to alloc at user-given address.
 506   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 507     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 508     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 509       release();
 510     }
 511   }
 512 
 513   // Keep heap at HeapBaseMinAddress.
 514   if (_base == NULL) {
 515 
 516     // Try to allocate the heap at addresses that allow efficient oop compression.
 517     // Different schemes are tried, in order of decreasing optimization potential.
 518     //
 519     // For this, try_reserve_heap() is called with the desired heap base addresses.
 520     // A call into the os layer to allocate at a given address can return memory
 521     // at a different address than requested.  Still, this might be memory at a useful
 522     // address. try_reserve_heap() always returns this allocated memory, as only here
 523     // the criteria for a good heap are checked.
 524 
 525     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 526     // Give it several tries from top of range to bottom.
 527     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 528 
 529       // Calc address range within we try to attach (range of possible start addresses).
 530       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 531       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 532       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 533                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 534     }
 535 
 536     // zerobased: Attempt to allocate in the lower 32G.
 537     // But leave room for the compressed class pointers, which is allocated above
 538     // the heap.
 539     char *zerobased_max = (char *)OopEncodingHeapMax;
 540     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 541     // For small heaps, save some space for compressed class pointer
 542     // space so it can be decoded with no base.
 543     if (UseCompressedClassPointers && !UseSharedSpaces &&
 544         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 545         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 546       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 547     }
 548 
 549     // Give it several tries from top of range to bottom.
 550     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 551         ((_base == NULL) ||                        // No previous try succeeded.
 552          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 553 
 554       // Calc address range within we try to attach (range of possible start addresses).
 555       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 556       // Need to be careful about size being guaranteed to be less
 557       // than UnscaledOopHeapMax due to type constraints.
 558       char *lowest_start = aligned_heap_base_min_address;
 559       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 560       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 561         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 562       }
 563       lowest_start = align_up(lowest_start, attach_point_alignment);
 564       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 565                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 566     }
 567 
 568     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 569     // implement null checks.
 570     noaccess_prefix = noaccess_prefix_size(alignment);
 571 
 572     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 573     char** addresses = get_attach_addresses_for_disjoint_mode();
 574     int i = 0;
 575     while (addresses[i] &&                                 // End of array not yet reached.
 576            ((_base == NULL) ||                             // No previous try succeeded.
 577             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 578              !CompressedOops::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 579       char* const attach_point = addresses[i];
 580       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 581       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 582       i++;
 583     }
 584 
 585     // Last, desperate try without any placement.
 586     if (_base == NULL) {
 587       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 588       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 589     }
 590   }
 591 }
 592 
 593 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 594 
 595   if (size == 0) {
 596     return;
 597   }
 598 
 599   if (heap_allocation_directory != NULL) {
 600     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 601     if (_fd_for_heap == -1) {
 602       vm_exit_during_initialization(
 603         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 604     }
 605   }
 606 
 607   // Heap size should be aligned to alignment, too.
 608   guarantee(is_aligned(size, alignment), "set by caller");
 609 
 610   if (UseCompressedOops) {
 611     initialize_compressed_heap(size, alignment, large);
 612     if (_size > size) {
 613       // We allocated heap with noaccess prefix.
 614       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 615       // if we had to try at arbitrary address.
 616       establish_noaccess_prefix();
 617     }
 618   } else {
 619     initialize(size, alignment, large, NULL, false);
 620   }
 621 
 622   assert(markWord::encode_pointer_as_mark(_base).decode_pointer() == _base,
 623          "area must be distinguishable from marks for mark-sweep");
 624   assert(markWord::encode_pointer_as_mark(&_base[size]).decode_pointer() == &_base[size],
 625          "area must be distinguishable from marks for mark-sweep");
 626 
 627   if (base() != NULL) {
 628     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 629   }
 630 
 631   if (_fd_for_heap != -1) {
 632     os::close(_fd_for_heap);
 633   }
 634 }
 635 
 636 MemRegion ReservedHeapSpace::region() const {
 637   return MemRegion((HeapWord*)base(), (HeapWord*)end());
 638 }
 639 
 640 // Reserve space for code segment.  Same as Java heap only we mark this as
 641 // executable.
 642 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 643                                      size_t rs_align,
 644                                      bool large) : ReservedSpace() {
 645   initialize(r_size, rs_align, large, /*requested address*/ NULL, /*executable*/ true);
 646   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 647 }
 648 
 649 // VirtualSpace
 650 
 651 VirtualSpace::VirtualSpace() {
 652   _low_boundary           = NULL;
 653   _high_boundary          = NULL;
 654   _low                    = NULL;
 655   _high                   = NULL;
 656   _lower_high             = NULL;
 657   _middle_high            = NULL;
 658   _upper_high             = NULL;
 659   _lower_high_boundary    = NULL;
 660   _middle_high_boundary   = NULL;
 661   _upper_high_boundary    = NULL;
 662   _lower_alignment        = 0;
 663   _middle_alignment       = 0;
 664   _upper_alignment        = 0;
 665   _special                = false;
 666   _executable             = false;
 667 }
 668 
 669 
 670 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 671   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 672   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 673 }
 674 
 675 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 676   if(!rs.is_reserved()) return false;  // allocation failed.
 677   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 678   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 679 
 680   _low_boundary  = rs.base();
 681   _high_boundary = low_boundary() + rs.size();
 682 
 683   _low = low_boundary();
 684   _high = low();
 685 
 686   _special = rs.special();
 687   _executable = rs.executable();
 688 
 689   // When a VirtualSpace begins life at a large size, make all future expansion
 690   // and shrinking occur aligned to a granularity of large pages.  This avoids
 691   // fragmentation of physical addresses that inhibits the use of large pages
 692   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 693   // page size, the only spaces that get handled this way are codecache and
 694   // the heap itself, both of which provide a substantial performance
 695   // boost in many benchmarks when covered by large pages.
 696   //
 697   // No attempt is made to force large page alignment at the very top and
 698   // bottom of the space if they are not aligned so already.
 699   _lower_alignment  = os::vm_page_size();
 700   _middle_alignment = max_commit_granularity;
 701   _upper_alignment  = os::vm_page_size();
 702 
 703   // End of each region
 704   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 705   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 706   _upper_high_boundary = high_boundary();
 707 
 708   // High address of each region
 709   _lower_high = low_boundary();
 710   _middle_high = lower_high_boundary();
 711   _upper_high = middle_high_boundary();
 712 
 713   // commit to initial size
 714   if (committed_size > 0) {
 715     if (!expand_by(committed_size)) {
 716       return false;
 717     }
 718   }
 719   return true;
 720 }
 721 
 722 
 723 VirtualSpace::~VirtualSpace() {
 724   release();
 725 }
 726 
 727 
 728 void VirtualSpace::release() {
 729   // This does not release memory it reserved.
 730   // Caller must release via rs.release();
 731   _low_boundary           = NULL;
 732   _high_boundary          = NULL;
 733   _low                    = NULL;
 734   _high                   = NULL;
 735   _lower_high             = NULL;
 736   _middle_high            = NULL;
 737   _upper_high             = NULL;
 738   _lower_high_boundary    = NULL;
 739   _middle_high_boundary   = NULL;
 740   _upper_high_boundary    = NULL;
 741   _lower_alignment        = 0;
 742   _middle_alignment       = 0;
 743   _upper_alignment        = 0;
 744   _special                = false;
 745   _executable             = false;
 746 }
 747 
 748 
 749 size_t VirtualSpace::committed_size() const {
 750   return pointer_delta(high(), low(), sizeof(char));
 751 }
 752 
 753 
 754 size_t VirtualSpace::reserved_size() const {
 755   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 756 }
 757 
 758 
 759 size_t VirtualSpace::uncommitted_size()  const {
 760   return reserved_size() - committed_size();
 761 }
 762 
 763 size_t VirtualSpace::actual_committed_size() const {
 764   // Special VirtualSpaces commit all reserved space up front.
 765   if (special()) {
 766     return reserved_size();
 767   }
 768 
 769   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 770   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 771   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 772 
 773 #ifdef ASSERT
 774   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 775   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 776   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 777 
 778   if (committed_high > 0) {
 779     assert(committed_low == lower, "Must be");
 780     assert(committed_middle == middle, "Must be");
 781   }
 782 
 783   if (committed_middle > 0) {
 784     assert(committed_low == lower, "Must be");
 785   }
 786   if (committed_middle < middle) {
 787     assert(committed_high == 0, "Must be");
 788   }
 789 
 790   if (committed_low < lower) {
 791     assert(committed_high == 0, "Must be");
 792     assert(committed_middle == 0, "Must be");
 793   }
 794 #endif
 795 
 796   return committed_low + committed_middle + committed_high;
 797 }
 798 
 799 
 800 bool VirtualSpace::contains(const void* p) const {
 801   return low() <= (const char*) p && (const char*) p < high();
 802 }
 803 
 804 static void pretouch_expanded_memory(void* start, void* end) {
 805   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 806   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 807 
 808   os::pretouch_memory(start, end);
 809 }
 810 
 811 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 812   if (os::commit_memory(start, size, alignment, executable)) {
 813     if (pre_touch || AlwaysPreTouch) {
 814       pretouch_expanded_memory(start, start + size);
 815     }
 816     return true;
 817   }
 818 
 819   debug_only(warning(
 820       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 821       " size=" SIZE_FORMAT ", executable=%d) failed",
 822       p2i(start), p2i(start + size), size, executable);)
 823 
 824   return false;
 825 }
 826 
 827 /*
 828    First we need to determine if a particular virtual space is using large
 829    pages.  This is done at the initialize function and only virtual spaces
 830    that are larger than LargePageSizeInBytes use large pages.  Once we
 831    have determined this, all expand_by and shrink_by calls must grow and
 832    shrink by large page size chunks.  If a particular request
 833    is within the current large page, the call to commit and uncommit memory
 834    can be ignored.  In the case that the low and high boundaries of this
 835    space is not large page aligned, the pages leading to the first large
 836    page address and the pages after the last large page address must be
 837    allocated with default pages.
 838 */
 839 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 840   if (uncommitted_size() < bytes) {
 841     return false;
 842   }
 843 
 844   if (special()) {
 845     // don't commit memory if the entire space is pinned in memory
 846     _high += bytes;
 847     return true;
 848   }
 849 
 850   char* previous_high = high();
 851   char* unaligned_new_high = high() + bytes;
 852   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 853 
 854   // Calculate where the new high for each of the regions should be.  If
 855   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 856   // then the unaligned lower and upper new highs would be the
 857   // lower_high() and upper_high() respectively.
 858   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 859   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 860   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 861 
 862   // Align the new highs based on the regions alignment.  lower and upper
 863   // alignment will always be default page size.  middle alignment will be
 864   // LargePageSizeInBytes if the actual size of the virtual space is in
 865   // fact larger than LargePageSizeInBytes.
 866   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 867   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 868   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 869 
 870   // Determine which regions need to grow in this expand_by call.
 871   // If you are growing in the lower region, high() must be in that
 872   // region so calculate the size based on high().  For the middle and
 873   // upper regions, determine the starting point of growth based on the
 874   // location of high().  By getting the MAX of the region's low address
 875   // (or the previous region's high address) and high(), we can tell if it
 876   // is an intra or inter region growth.
 877   size_t lower_needs = 0;
 878   if (aligned_lower_new_high > lower_high()) {
 879     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 880   }
 881   size_t middle_needs = 0;
 882   if (aligned_middle_new_high > middle_high()) {
 883     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 884   }
 885   size_t upper_needs = 0;
 886   if (aligned_upper_new_high > upper_high()) {
 887     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 888   }
 889 
 890   // Check contiguity.
 891   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 892          "high address must be contained within the region");
 893   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 894          "high address must be contained within the region");
 895   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 896          "high address must be contained within the region");
 897 
 898   // Commit regions
 899   if (lower_needs > 0) {
 900     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 901     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 902       return false;
 903     }
 904     _lower_high += lower_needs;
 905   }
 906 
 907   if (middle_needs > 0) {
 908     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 909     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 910       return false;
 911     }
 912     _middle_high += middle_needs;
 913   }
 914 
 915   if (upper_needs > 0) {
 916     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 917     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 918       return false;
 919     }
 920     _upper_high += upper_needs;
 921   }
 922 
 923   _high += bytes;
 924   return true;
 925 }
 926 
 927 // A page is uncommitted if the contents of the entire page is deemed unusable.
 928 // Continue to decrement the high() pointer until it reaches a page boundary
 929 // in which case that particular page can now be uncommitted.
 930 void VirtualSpace::shrink_by(size_t size) {
 931   if (committed_size() < size)
 932     fatal("Cannot shrink virtual space to negative size");
 933 
 934   if (special()) {
 935     // don't uncommit if the entire space is pinned in memory
 936     _high -= size;
 937     return;
 938   }
 939 
 940   char* unaligned_new_high = high() - size;
 941   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 942 
 943   // Calculate new unaligned address
 944   char* unaligned_upper_new_high =
 945     MAX2(unaligned_new_high, middle_high_boundary());
 946   char* unaligned_middle_new_high =
 947     MAX2(unaligned_new_high, lower_high_boundary());
 948   char* unaligned_lower_new_high =
 949     MAX2(unaligned_new_high, low_boundary());
 950 
 951   // Align address to region's alignment
 952   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 953   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 954   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 955 
 956   // Determine which regions need to shrink
 957   size_t upper_needs = 0;
 958   if (aligned_upper_new_high < upper_high()) {
 959     upper_needs =
 960       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 961   }
 962   size_t middle_needs = 0;
 963   if (aligned_middle_new_high < middle_high()) {
 964     middle_needs =
 965       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 966   }
 967   size_t lower_needs = 0;
 968   if (aligned_lower_new_high < lower_high()) {
 969     lower_needs =
 970       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 971   }
 972 
 973   // Check contiguity.
 974   assert(middle_high_boundary() <= upper_high() &&
 975          upper_high() <= upper_high_boundary(),
 976          "high address must be contained within the region");
 977   assert(lower_high_boundary() <= middle_high() &&
 978          middle_high() <= middle_high_boundary(),
 979          "high address must be contained within the region");
 980   assert(low_boundary() <= lower_high() &&
 981          lower_high() <= lower_high_boundary(),
 982          "high address must be contained within the region");
 983 
 984   // Uncommit
 985   if (upper_needs > 0) {
 986     assert(middle_high_boundary() <= aligned_upper_new_high &&
 987            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 988            "must not shrink beyond region");
 989     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 990       debug_only(warning("os::uncommit_memory failed"));
 991       return;
 992     } else {
 993       _upper_high -= upper_needs;
 994     }
 995   }
 996   if (middle_needs > 0) {
 997     assert(lower_high_boundary() <= aligned_middle_new_high &&
 998            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 999            "must not shrink beyond region");
1000     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1001       debug_only(warning("os::uncommit_memory failed"));
1002       return;
1003     } else {
1004       _middle_high -= middle_needs;
1005     }
1006   }
1007   if (lower_needs > 0) {
1008     assert(low_boundary() <= aligned_lower_new_high &&
1009            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1010            "must not shrink beyond region");
1011     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1012       debug_only(warning("os::uncommit_memory failed"));
1013       return;
1014     } else {
1015       _lower_high -= lower_needs;
1016     }
1017   }
1018 
1019   _high -= size;
1020 }
1021 
1022 #ifndef PRODUCT
1023 void VirtualSpace::check_for_contiguity() {
1024   // Check contiguity.
1025   assert(low_boundary() <= lower_high() &&
1026          lower_high() <= lower_high_boundary(),
1027          "high address must be contained within the region");
1028   assert(lower_high_boundary() <= middle_high() &&
1029          middle_high() <= middle_high_boundary(),
1030          "high address must be contained within the region");
1031   assert(middle_high_boundary() <= upper_high() &&
1032          upper_high() <= upper_high_boundary(),
1033          "high address must be contained within the region");
1034   assert(low() >= low_boundary(), "low");
1035   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1036   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1037   assert(high() <= upper_high(), "upper high");
1038 }
1039 
1040 void VirtualSpace::print_on(outputStream* out) {
1041   out->print   ("Virtual space:");
1042   if (special()) out->print(" (pinned in memory)");
1043   out->cr();
1044   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1045   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1046   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1047   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1048 }
1049 
1050 void VirtualSpace::print() {
1051   print_on(tty);
1052 }
1053 
1054 /////////////// Unit tests ///////////////
1055 
1056 #ifndef PRODUCT
1057 
1058 class TestReservedSpace : AllStatic {
1059  public:
1060   static void small_page_write(void* addr, size_t size) {
1061     size_t page_size = os::vm_page_size();
1062 
1063     char* end = (char*)addr + size;
1064     for (char* p = (char*)addr; p < end; p += page_size) {
1065       *p = 1;
1066     }
1067   }
1068 
1069   static void release_memory_for_test(ReservedSpace rs) {
1070     if (rs.special()) {
1071       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1072     } else {
1073       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1074     }
1075   }
1076 
1077   static void test_reserved_space1(size_t size, size_t alignment) {
1078     assert(is_aligned(size, alignment), "Incorrect input parameters");
1079 
1080     ReservedSpace rs(size,          // size
1081                      alignment,     // alignment
1082                      UseLargePages, // large
1083                      (char *)NULL); // requested_address
1084 
1085     assert(rs.base() != NULL, "Must be");
1086     assert(rs.size() == size, "Must be");
1087 
1088     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1089     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1090 
1091     if (rs.special()) {
1092       small_page_write(rs.base(), size);
1093     }
1094 
1095     release_memory_for_test(rs);
1096   }
1097 
1098   static void test_reserved_space2(size_t size) {
1099     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1100 
1101     ReservedSpace rs(size);
1102 
1103     assert(rs.base() != NULL, "Must be");
1104     assert(rs.size() == size, "Must be");
1105 
1106     if (rs.special()) {
1107       small_page_write(rs.base(), size);
1108     }
1109 
1110     release_memory_for_test(rs);
1111   }
1112 
1113   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1114     if (size < alignment) {
1115       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1116       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1117       return;
1118     }
1119 
1120     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1121     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1122 
1123     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1124 
1125     ReservedSpace rs(size, alignment, large);
1126 
1127     assert(rs.base() != NULL, "Must be");
1128     assert(rs.size() == size, "Must be");
1129 
1130     if (rs.special()) {
1131       small_page_write(rs.base(), size);
1132     }
1133 
1134     release_memory_for_test(rs);
1135   }
1136 
1137 
1138   static void test_reserved_space1() {
1139     size_t size = 2 * 1024 * 1024;
1140     size_t ag   = os::vm_allocation_granularity();
1141 
1142     test_reserved_space1(size,      ag);
1143     test_reserved_space1(size * 2,  ag);
1144     test_reserved_space1(size * 10, ag);
1145   }
1146 
1147   static void test_reserved_space2() {
1148     size_t size = 2 * 1024 * 1024;
1149     size_t ag = os::vm_allocation_granularity();
1150 
1151     test_reserved_space2(size * 1);
1152     test_reserved_space2(size * 2);
1153     test_reserved_space2(size * 10);
1154     test_reserved_space2(ag);
1155     test_reserved_space2(size - ag);
1156     test_reserved_space2(size);
1157     test_reserved_space2(size + ag);
1158     test_reserved_space2(size * 2);
1159     test_reserved_space2(size * 2 - ag);
1160     test_reserved_space2(size * 2 + ag);
1161     test_reserved_space2(size * 3);
1162     test_reserved_space2(size * 3 - ag);
1163     test_reserved_space2(size * 3 + ag);
1164     test_reserved_space2(size * 10);
1165     test_reserved_space2(size * 10 + size / 2);
1166   }
1167 
1168   static void test_reserved_space3() {
1169     size_t ag = os::vm_allocation_granularity();
1170 
1171     test_reserved_space3(ag,      ag    , false);
1172     test_reserved_space3(ag * 2,  ag    , false);
1173     test_reserved_space3(ag * 3,  ag    , false);
1174     test_reserved_space3(ag * 2,  ag * 2, false);
1175     test_reserved_space3(ag * 4,  ag * 2, false);
1176     test_reserved_space3(ag * 8,  ag * 2, false);
1177     test_reserved_space3(ag * 4,  ag * 4, false);
1178     test_reserved_space3(ag * 8,  ag * 4, false);
1179     test_reserved_space3(ag * 16, ag * 4, false);
1180 
1181     if (UseLargePages) {
1182       size_t lp = os::large_page_size();
1183 
1184       // Without large pages
1185       test_reserved_space3(lp,     ag * 4, false);
1186       test_reserved_space3(lp * 2, ag * 4, false);
1187       test_reserved_space3(lp * 4, ag * 4, false);
1188       test_reserved_space3(lp,     lp    , false);
1189       test_reserved_space3(lp * 2, lp    , false);
1190       test_reserved_space3(lp * 3, lp    , false);
1191       test_reserved_space3(lp * 2, lp * 2, false);
1192       test_reserved_space3(lp * 4, lp * 2, false);
1193       test_reserved_space3(lp * 8, lp * 2, false);
1194 
1195       // With large pages
1196       test_reserved_space3(lp, ag * 4    , true);
1197       test_reserved_space3(lp * 2, ag * 4, true);
1198       test_reserved_space3(lp * 4, ag * 4, true);
1199       test_reserved_space3(lp, lp        , true);
1200       test_reserved_space3(lp * 2, lp    , true);
1201       test_reserved_space3(lp * 3, lp    , true);
1202       test_reserved_space3(lp * 2, lp * 2, true);
1203       test_reserved_space3(lp * 4, lp * 2, true);
1204       test_reserved_space3(lp * 8, lp * 2, true);
1205     }
1206   }
1207 
1208   static void test_reserved_space() {
1209     test_reserved_space1();
1210     test_reserved_space2();
1211     test_reserved_space3();
1212   }
1213 };
1214 
1215 void TestReservedSpace_test() {
1216   TestReservedSpace::test_reserved_space();
1217 }
1218 
1219 #define assert_equals(actual, expected)  \
1220   assert(actual == expected,             \
1221          "Got " SIZE_FORMAT " expected " \
1222          SIZE_FORMAT, actual, expected);
1223 
1224 #define assert_ge(value1, value2)                  \
1225   assert(value1 >= value2,                         \
1226          "'" #value1 "': " SIZE_FORMAT " '"        \
1227          #value2 "': " SIZE_FORMAT, value1, value2);
1228 
1229 #define assert_lt(value1, value2)                  \
1230   assert(value1 < value2,                          \
1231          "'" #value1 "': " SIZE_FORMAT " '"        \
1232          #value2 "': " SIZE_FORMAT, value1, value2);
1233 
1234 
1235 class TestVirtualSpace : AllStatic {
1236   enum TestLargePages {
1237     Default,
1238     Disable,
1239     Reserve,
1240     Commit
1241   };
1242 
1243   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1244     switch(mode) {
1245     default:
1246     case Default:
1247     case Reserve:
1248       return ReservedSpace(reserve_size_aligned);
1249     case Disable:
1250     case Commit:
1251       return ReservedSpace(reserve_size_aligned,
1252                            os::vm_allocation_granularity(),
1253                            /* large */ false);
1254     }
1255   }
1256 
1257   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1258     switch(mode) {
1259     default:
1260     case Default:
1261     case Reserve:
1262       return vs.initialize(rs, 0);
1263     case Disable:
1264       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1265     case Commit:
1266       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1267     }
1268   }
1269 
1270  public:
1271   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1272                                                         TestLargePages mode = Default) {
1273     size_t granularity = os::vm_allocation_granularity();
1274     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1275 
1276     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1277 
1278     assert(reserved.is_reserved(), "Must be");
1279 
1280     VirtualSpace vs;
1281     bool initialized = initialize_virtual_space(vs, reserved, mode);
1282     assert(initialized, "Failed to initialize VirtualSpace");
1283 
1284     vs.expand_by(commit_size, false);
1285 
1286     if (vs.special()) {
1287       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1288     } else {
1289       assert_ge(vs.actual_committed_size(), commit_size);
1290       // Approximate the commit granularity.
1291       // Make sure that we don't commit using large pages
1292       // if large pages has been disabled for this VirtualSpace.
1293       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1294                                    os::vm_page_size() : os::large_page_size();
1295       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1296     }
1297 
1298     reserved.release();
1299   }
1300 
1301   static void test_virtual_space_actual_committed_space_one_large_page() {
1302     if (!UseLargePages) {
1303       return;
1304     }
1305 
1306     size_t large_page_size = os::large_page_size();
1307 
1308     ReservedSpace reserved(large_page_size, large_page_size, true);
1309 
1310     assert(reserved.is_reserved(), "Must be");
1311 
1312     VirtualSpace vs;
1313     bool initialized = vs.initialize(reserved, 0);
1314     assert(initialized, "Failed to initialize VirtualSpace");
1315 
1316     vs.expand_by(large_page_size, false);
1317 
1318     assert_equals(vs.actual_committed_size(), large_page_size);
1319 
1320     reserved.release();
1321   }
1322 
1323   static void test_virtual_space_actual_committed_space() {
1324     test_virtual_space_actual_committed_space(4 * K, 0);
1325     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1326     test_virtual_space_actual_committed_space(8 * K, 0);
1327     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1328     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1329     test_virtual_space_actual_committed_space(12 * K, 0);
1330     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1331     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1332     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1333     test_virtual_space_actual_committed_space(64 * K, 0);
1334     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1335     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1336     test_virtual_space_actual_committed_space(2 * M, 0);
1337     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1338     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1339     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1340     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1341     test_virtual_space_actual_committed_space(10 * M, 0);
1342     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1343     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1344     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1345     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1346     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1347     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1348   }
1349 
1350   static void test_virtual_space_disable_large_pages() {
1351     if (!UseLargePages) {
1352       return;
1353     }
1354     // These test cases verify that if we force VirtualSpace to disable large pages
1355     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1356     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1357     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1358     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1359     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1360     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1361     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1362 
1363     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1364     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1365     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1366     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1367     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1368     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1369     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1370 
1371     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1372     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1373     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1374     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1375     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1376     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1377     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1378   }
1379 
1380   static void test_virtual_space() {
1381     test_virtual_space_actual_committed_space();
1382     test_virtual_space_actual_committed_space_one_large_page();
1383     test_virtual_space_disable_large_pages();
1384   }
1385 };
1386 
1387 void TestVirtualSpace_test() {
1388   TestVirtualSpace::test_virtual_space();
1389 }
1390 
1391 #endif // PRODUCT
1392 
1393 #endif