1 /*
   2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "logging/log.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "memory/virtualspace.hpp"
  29 #include "oops/markOop.hpp"
  30 #include "oops/oop.inline.hpp"
  31 #include "services/memTracker.hpp"
  32 #include "utilities/align.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false), _fd_for_heap(-1) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _fd_for_heap(-1) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) : _fd_for_heap(-1) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) : _fd_for_heap(-1) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method
  73 static void unmap_or_release_memory(char* base, size_t size, bool is_file_mapped) {
  74   if (is_file_mapped) {
  75     if (!os::unmap_memory(base, size)) {
  76       fatal("os::unmap_memory failed");
  77     }
  78   } else if (!os::release_memory(base, size)) {
  79     fatal("os::release_memory failed");
  80   }
  81 }
  82 
  83 // Helper method.
  84 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  85                                            const size_t size, bool special, bool is_file_mapped = false)
  86 {
  87   if (base == requested_address || requested_address == NULL)
  88     return false; // did not fail
  89 
  90   if (base != NULL) {
  91     // Different reserve address may be acceptable in other cases
  92     // but for compressed oops heap should be at requested address.
  93     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  94     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  95     // OS ignored requested address. Try different address.
  96     if (special) {
  97       if (!os::release_memory_special(base, size)) {
  98         fatal("os::release_memory_special failed");
  99       }
 100     } else {
 101       unmap_or_release_memory(base, size, is_file_mapped); 
 102     }
 103   }
 104   return true;
 105 }
 106 
 107 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 108                                char* requested_address,
 109                                bool executable) {
 110   const size_t granularity = os::vm_allocation_granularity();
 111   assert((size & (granularity - 1)) == 0,
 112          "size not aligned to os::vm_allocation_granularity()");
 113   assert((alignment & (granularity - 1)) == 0,
 114          "alignment not aligned to os::vm_allocation_granularity()");
 115   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 116          "not a power of 2");
 117 
 118   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 119 
 120   _base = NULL;
 121   _size = 0;
 122   _special = false;
 123   _executable = executable;
 124   _alignment = 0;
 125   _noaccess_prefix = 0;
 126   if (size == 0) {
 127     return;
 128   }
 129 
 130   // If OS doesn't support demand paging for large page memory, we need
 131   // to use reserve_memory_special() to reserve and pin the entire region.
 132   // If there is a backing file directory for this space then whether
 133   // large pages are allocated is up to the filesystem of the backing file.
 134   // So we ignore the UseLargePages flag in this case.
 135   bool special = large && !os::can_commit_large_page_memory();
 136   if (special && _fd_for_heap != -1) {
 137     special = false;
 138     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 139       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 140       log_debug(gc, heap)("Ignoring UseLargePages since large page support is up to the file system of the backing file for Java heap");
 141     }
 142   }
 143 
 144   char* base = NULL;
 145 
 146   if (special) {
 147 
 148     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 149 
 150     if (base != NULL) {
 151       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 152         // OS ignored requested address. Try different address.
 153         return;
 154       }
 155       // Check alignment constraints.
 156       assert((uintptr_t) base % alignment == 0,
 157              "Large pages returned a non-aligned address, base: "
 158              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 159              p2i(base), alignment);
 160       _special = true;
 161     } else {
 162       // failed; try to reserve regular memory below
 163       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 164                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 165         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 166       }
 167     }
 168   }
 169 
 170   if (base == NULL) {
 171     // Optimistically assume that the OSes returns an aligned base pointer.
 172     // When reserving a large address range, most OSes seem to align to at
 173     // least 64K.
 174 
 175     // If the memory was requested at a particular address, use
 176     // os::attempt_reserve_memory_at() to avoid over mapping something
 177     // important.  If available space is not detected, return NULL.
 178 
 179     if (requested_address != 0) {
 180       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 181       if (failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 182         // OS ignored requested address. Try different address.
 183         base = NULL;
 184       }
 185     } else {
 186       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 187     }
 188 
 189     if (base == NULL) return;
 190 
 191     // Check alignment constraints
 192     if ((((size_t)base) & (alignment - 1)) != 0) {
 193       // Base not aligned, retry
 194       unmap_or_release_memory(base, size, _fd_for_heap != -1 /*is_file_mapped*/);
 195 
 196       // Make sure that size is aligned
 197       size = align_up(size, alignment);
 198       base = os::reserve_memory_aligned(size, alignment, _fd_for_heap);
 199 
 200       if (requested_address != 0 &&
 201           failed_to_reserve_as_requested(base, requested_address, size, false, _fd_for_heap != -1)) {
 202         // As a result of the alignment constraints, the allocated base differs
 203         // from the requested address. Return back to the caller who can
 204         // take remedial action (like try again without a requested address).
 205         assert(_base == NULL, "should be");
 206         return;
 207       }
 208     }
 209   }
 210   // Done
 211   _base = base;
 212   _size = size;
 213   _alignment = alignment;
 214   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 215   if (_fd_for_heap != -1) {
 216     _special = true;
 217   }
 218 }
 219 
 220 
 221 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 222                              bool special, bool executable) {
 223   assert((size % os::vm_allocation_granularity()) == 0,
 224          "size not allocation aligned");
 225   _base = base;
 226   _size = size;
 227   _alignment = alignment;
 228   _noaccess_prefix = 0;
 229   _special = special;
 230   _executable = executable;
 231 }
 232 
 233 
 234 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 235                                         bool split, bool realloc) {
 236   assert(partition_size <= size(), "partition failed");
 237   if (split) {
 238     os::split_reserved_memory(base(), size(), partition_size, realloc);
 239   }
 240   ReservedSpace result(base(), partition_size, alignment, special(),
 241                        executable());
 242   return result;
 243 }
 244 
 245 
 246 ReservedSpace
 247 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 248   assert(partition_size <= size(), "partition failed");
 249   ReservedSpace result(base() + partition_size, size() - partition_size,
 250                        alignment, special(), executable());
 251   return result;
 252 }
 253 
 254 
 255 size_t ReservedSpace::page_align_size_up(size_t size) {
 256   return align_up(size, os::vm_page_size());
 257 }
 258 
 259 
 260 size_t ReservedSpace::page_align_size_down(size_t size) {
 261   return align_down(size, os::vm_page_size());
 262 }
 263 
 264 
 265 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 266   return align_up(size, os::vm_allocation_granularity());
 267 }
 268 
 269 
 270 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 271   return align_down(size, os::vm_allocation_granularity());
 272 }
 273 
 274 
 275 void ReservedSpace::release() {
 276   if (is_reserved()) {
 277     char *real_base = _base - _noaccess_prefix;
 278     const size_t real_size = _size + _noaccess_prefix;
 279     if (special()) {
 280       if (_fd_for_heap != -1) {
 281         os::unmap_memory(real_base, real_size);
 282       } else {
 283         os::release_memory_special(real_base, real_size);
 284       }
 285     } else{
 286       os::release_memory(real_base, real_size);
 287     }
 288     _base = NULL;
 289     _size = 0;
 290     _noaccess_prefix = 0;
 291     _alignment = 0;
 292     _special = false;
 293     _executable = false;
 294   }
 295 }
 296 
 297 static size_t noaccess_prefix_size(size_t alignment) {
 298   return lcm(os::vm_page_size(), alignment);
 299 }
 300 
 301 void ReservedHeapSpace::establish_noaccess_prefix() {
 302   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 303   _noaccess_prefix = noaccess_prefix_size(_alignment);
 304 
 305   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 306     if (true
 307         WIN64_ONLY(&& !UseLargePages)
 308         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 309       // Protect memory at the base of the allocated region.
 310       // If special, the page was committed (only matters on windows)
 311       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 312         fatal("cannot protect protection page");
 313       }
 314       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 315                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 316                                  p2i(_base),
 317                                  _noaccess_prefix);
 318       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 319     } else {
 320       Universe::set_narrow_oop_use_implicit_null_checks(false);
 321     }
 322   }
 323 
 324   _base += _noaccess_prefix;
 325   _size -= _noaccess_prefix;
 326   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 327 }
 328 
 329 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 330 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 331 // might still fulfill the wishes of the caller.
 332 // Assures the memory is aligned to 'alignment'.
 333 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 334 void ReservedHeapSpace::try_reserve_heap(size_t size,
 335                                          size_t alignment,
 336                                          bool large,
 337                                          char* requested_address) {
 338   if (_base != NULL) {
 339     // We tried before, but we didn't like the address delivered.
 340     release();
 341   }
 342 
 343   // If OS doesn't support demand paging for large page memory, we need
 344   // to use reserve_memory_special() to reserve and pin the entire region.
 345   // If there is a backing file directory for this space then whether
 346   // large pages are allocated is up to the filesystem of the backing file.
 347   // So we ignore the UseLargePages flag in this case.
 348   bool special = large && !os::can_commit_large_page_memory();
 349   if (special && _fd_for_heap != -1) {
 350     special = false;
 351     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 352                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 353       log_debug(gc, heap)("Cannot allocate large pages for Java Heap when AllocateHeapAt option is set.");
 354     }
 355   }
 356   char* base = NULL;
 357 
 358   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 359                              " heap of size " SIZE_FORMAT_HEX,
 360                              p2i(requested_address),
 361                              size);
 362 
 363   if (special) {
 364     base = os::reserve_memory_special(size, alignment, requested_address, false);
 365 
 366     if (base != NULL) {
 367       // Check alignment constraints.
 368       assert((uintptr_t) base % alignment == 0,
 369              "Large pages returned a non-aligned address, base: "
 370              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 371              p2i(base), alignment);
 372       _special = true;
 373     }
 374   }
 375 
 376   if (base == NULL) {
 377     // Failed; try to reserve regular memory below
 378     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 379                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 380       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 381     }
 382 
 383     // Optimistically assume that the OSes returns an aligned base pointer.
 384     // When reserving a large address range, most OSes seem to align to at
 385     // least 64K.
 386 
 387     // If the memory was requested at a particular address, use
 388     // os::attempt_reserve_memory_at() to avoid over mapping something
 389     // important.  If available space is not detected, return NULL.
 390 
 391     if (requested_address != 0) {
 392       base = os::attempt_reserve_memory_at(size, requested_address, _fd_for_heap);
 393     } else {
 394       base = os::reserve_memory(size, NULL, alignment, _fd_for_heap);
 395     }
 396   }
 397   if (base == NULL) { return; }
 398 
 399   // Done
 400   _base = base;
 401   _size = size;
 402   _alignment = alignment;
 403 
 404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 405   if (_fd_for_heap != -1) {
 406     _special = true;
 407   }
 408 
 409   // Check alignment constraints
 410   if ((((size_t)base) & (alignment - 1)) != 0) {
 411     // Base not aligned, retry.
 412     release();
 413   }
 414 }
 415 
 416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 417                                           char *lowest_start,
 418                                           size_t attach_point_alignment,
 419                                           char *aligned_heap_base_min_address,
 420                                           char *upper_bound,
 421                                           size_t size,
 422                                           size_t alignment,
 423                                           bool large) {
 424   const size_t attach_range = highest_start - lowest_start;
 425   // Cap num_attempts at possible number.
 426   // At least one is possible even for 0 sized attach range.
 427   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 428   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 429 
 430   const size_t stepsize = (attach_range == 0) ? // Only one try.
 431     (size_t) highest_start : align_up(attach_range / num_attempts_to_try, attach_point_alignment);
 432 
 433   // Try attach points from top to bottom.
 434   char* attach_point = highest_start;
 435   while (attach_point >= lowest_start  &&
 436          attach_point <= highest_start &&  // Avoid wrap around.
 437          ((_base == NULL) ||
 438           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 439     try_reserve_heap(size, alignment, large, attach_point);
 440     attach_point -= stepsize;
 441   }
 442 }
 443 
 444 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 445 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 446 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 447 
 448 // Helper for heap allocation. Returns an array with addresses
 449 // (OS-specific) which are suited for disjoint base mode. Array is
 450 // NULL terminated.
 451 static char** get_attach_addresses_for_disjoint_mode() {
 452   static uint64_t addresses[] = {
 453      2 * SIZE_32G,
 454      3 * SIZE_32G,
 455      4 * SIZE_32G,
 456      8 * SIZE_32G,
 457     10 * SIZE_32G,
 458      1 * SIZE_64K * SIZE_32G,
 459      2 * SIZE_64K * SIZE_32G,
 460      3 * SIZE_64K * SIZE_32G,
 461      4 * SIZE_64K * SIZE_32G,
 462     16 * SIZE_64K * SIZE_32G,
 463     32 * SIZE_64K * SIZE_32G,
 464     34 * SIZE_64K * SIZE_32G,
 465     0
 466   };
 467 
 468   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 469   // the array is sorted.
 470   uint i = 0;
 471   while (addresses[i] != 0 &&
 472          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 473     i++;
 474   }
 475   uint start = i;
 476 
 477   // Avoid more steps than requested.
 478   i = 0;
 479   while (addresses[start+i] != 0) {
 480     if (i == HeapSearchSteps) {
 481       addresses[start+i] = 0;
 482       break;
 483     }
 484     i++;
 485   }
 486 
 487   return (char**) &addresses[start];
 488 }
 489 
 490 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 491   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 492             "can not allocate compressed oop heap for this size");
 493   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 494   assert(HeapBaseMinAddress > 0, "sanity");
 495 
 496   const size_t granularity = os::vm_allocation_granularity();
 497   assert((size & (granularity - 1)) == 0,
 498          "size not aligned to os::vm_allocation_granularity()");
 499   assert((alignment & (granularity - 1)) == 0,
 500          "alignment not aligned to os::vm_allocation_granularity()");
 501   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 502          "not a power of 2");
 503 
 504   // The necessary attach point alignment for generated wish addresses.
 505   // This is needed to increase the chance of attaching for mmap and shmat.
 506   const size_t os_attach_point_alignment =
 507     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 508     NOT_AIX(os::vm_allocation_granularity());
 509   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 510 
 511   char *aligned_heap_base_min_address = (char *)align_up((void *)HeapBaseMinAddress, alignment);
 512   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 513     noaccess_prefix_size(alignment) : 0;
 514 
 515   // Attempt to alloc at user-given address.
 516   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 517     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 518     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 519       release();
 520     }
 521   }
 522 
 523   // Keep heap at HeapBaseMinAddress.
 524   if (_base == NULL) {
 525 
 526     // Try to allocate the heap at addresses that allow efficient oop compression.
 527     // Different schemes are tried, in order of decreasing optimization potential.
 528     //
 529     // For this, try_reserve_heap() is called with the desired heap base addresses.
 530     // A call into the os layer to allocate at a given address can return memory
 531     // at a different address than requested.  Still, this might be memory at a useful
 532     // address. try_reserve_heap() always returns this allocated memory, as only here
 533     // the criteria for a good heap are checked.
 534 
 535     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 536     // Give it several tries from top of range to bottom.
 537     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 538 
 539       // Calc address range within we try to attach (range of possible start addresses).
 540       char* const highest_start = align_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 541       char* const lowest_start  = align_up(aligned_heap_base_min_address, attach_point_alignment);
 542       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 543                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 544     }
 545 
 546     // zerobased: Attempt to allocate in the lower 32G.
 547     // But leave room for the compressed class pointers, which is allocated above
 548     // the heap.
 549     char *zerobased_max = (char *)OopEncodingHeapMax;
 550     const size_t class_space = align_up(CompressedClassSpaceSize, alignment);
 551     // For small heaps, save some space for compressed class pointer
 552     // space so it can be decoded with no base.
 553     if (UseCompressedClassPointers && !UseSharedSpaces &&
 554         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 555         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 556       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 557     }
 558 
 559     // Give it several tries from top of range to bottom.
 560     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 561         ((_base == NULL) ||                        // No previous try succeeded.
 562          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char *const highest_start = align_down(zerobased_max - size, attach_point_alignment);
 566       // Need to be careful about size being guaranteed to be less
 567       // than UnscaledOopHeapMax due to type constraints.
 568       char *lowest_start = aligned_heap_base_min_address;
 569       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 570       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 571         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 572       }
 573       lowest_start = align_up(lowest_start, attach_point_alignment);
 574       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 575                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 576     }
 577 
 578     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 579     // implement null checks.
 580     noaccess_prefix = noaccess_prefix_size(alignment);
 581 
 582     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 583     char** addresses = get_attach_addresses_for_disjoint_mode();
 584     int i = 0;
 585     while (addresses[i] &&                                 // End of array not yet reached.
 586            ((_base == NULL) ||                             // No previous try succeeded.
 587             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 588              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 589       char* const attach_point = addresses[i];
 590       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 591       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 592       i++;
 593     }
 594 
 595     // Last, desperate try without any placement.
 596     if (_base == NULL) {
 597       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 599     }
 600   }
 601 }
 602 
 603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* heap_allocation_directory) : ReservedSpace() {
 604 
 605   if (size == 0) {
 606     return;
 607   }
 608 
 609   if (heap_allocation_directory != NULL) {
 610     _fd_for_heap = os::create_file_for_heap(heap_allocation_directory);
 611     if (_fd_for_heap == -1) {
 612       vm_exit_during_initialization(
 613         err_msg("Could not create file for Heap at location %s", heap_allocation_directory));
 614     }
 615   }
 616 
 617   // Heap size should be aligned to alignment, too.
 618   guarantee(is_aligned(size, alignment), "set by caller");
 619 
 620   if (UseCompressedOops) {
 621     initialize_compressed_heap(size, alignment, large);
 622     if (_size > size) {
 623       // We allocated heap with noaccess prefix.
 624       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 625       // if we had to try at arbitrary address.
 626       establish_noaccess_prefix();
 627     }
 628   } else {
 629     initialize(size, alignment, large, NULL, false);
 630   }
 631 
 632   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 633          "area must be distinguishable from marks for mark-sweep");
 634   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 635          "area must be distinguishable from marks for mark-sweep");
 636 
 637   if (base() != NULL) {
 638     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 639   }
 640 
 641   if (_fd_for_heap != -1) {
 642     os::close(_fd_for_heap);
 643   }
 644 }
 645 
 646 // Reserve space for code segment.  Same as Java heap only we mark this as
 647 // executable.
 648 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 649                                      size_t rs_align,
 650                                      bool large) :
 651   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 652   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 653 }
 654 
 655 // VirtualSpace
 656 
 657 VirtualSpace::VirtualSpace() {
 658   _low_boundary           = NULL;
 659   _high_boundary          = NULL;
 660   _low                    = NULL;
 661   _high                   = NULL;
 662   _lower_high             = NULL;
 663   _middle_high            = NULL;
 664   _upper_high             = NULL;
 665   _lower_high_boundary    = NULL;
 666   _middle_high_boundary   = NULL;
 667   _upper_high_boundary    = NULL;
 668   _lower_alignment        = 0;
 669   _middle_alignment       = 0;
 670   _upper_alignment        = 0;
 671   _special                = false;
 672   _executable             = false;
 673 }
 674 
 675 
 676 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 677   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 678   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 679 }
 680 
 681 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 682   if(!rs.is_reserved()) return false;  // allocation failed.
 683   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 684   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 685 
 686   _low_boundary  = rs.base();
 687   _high_boundary = low_boundary() + rs.size();
 688 
 689   _low = low_boundary();
 690   _high = low();
 691 
 692   _special = rs.special();
 693   _executable = rs.executable();
 694 
 695   // When a VirtualSpace begins life at a large size, make all future expansion
 696   // and shrinking occur aligned to a granularity of large pages.  This avoids
 697   // fragmentation of physical addresses that inhibits the use of large pages
 698   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 699   // page size, the only spaces that get handled this way are codecache and
 700   // the heap itself, both of which provide a substantial performance
 701   // boost in many benchmarks when covered by large pages.
 702   //
 703   // No attempt is made to force large page alignment at the very top and
 704   // bottom of the space if they are not aligned so already.
 705   _lower_alignment  = os::vm_page_size();
 706   _middle_alignment = max_commit_granularity;
 707   _upper_alignment  = os::vm_page_size();
 708 
 709   // End of each region
 710   _lower_high_boundary = align_up(low_boundary(), middle_alignment());
 711   _middle_high_boundary = align_down(high_boundary(), middle_alignment());
 712   _upper_high_boundary = high_boundary();
 713 
 714   // High address of each region
 715   _lower_high = low_boundary();
 716   _middle_high = lower_high_boundary();
 717   _upper_high = middle_high_boundary();
 718 
 719   // commit to initial size
 720   if (committed_size > 0) {
 721     if (!expand_by(committed_size)) {
 722       return false;
 723     }
 724   }
 725   return true;
 726 }
 727 
 728 
 729 VirtualSpace::~VirtualSpace() {
 730   release();
 731 }
 732 
 733 
 734 void VirtualSpace::release() {
 735   // This does not release memory it reserved.
 736   // Caller must release via rs.release();
 737   _low_boundary           = NULL;
 738   _high_boundary          = NULL;
 739   _low                    = NULL;
 740   _high                   = NULL;
 741   _lower_high             = NULL;
 742   _middle_high            = NULL;
 743   _upper_high             = NULL;
 744   _lower_high_boundary    = NULL;
 745   _middle_high_boundary   = NULL;
 746   _upper_high_boundary    = NULL;
 747   _lower_alignment        = 0;
 748   _middle_alignment       = 0;
 749   _upper_alignment        = 0;
 750   _special                = false;
 751   _executable             = false;
 752 }
 753 
 754 
 755 size_t VirtualSpace::committed_size() const {
 756   return pointer_delta(high(), low(), sizeof(char));
 757 }
 758 
 759 
 760 size_t VirtualSpace::reserved_size() const {
 761   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 762 }
 763 
 764 
 765 size_t VirtualSpace::uncommitted_size()  const {
 766   return reserved_size() - committed_size();
 767 }
 768 
 769 size_t VirtualSpace::actual_committed_size() const {
 770   // Special VirtualSpaces commit all reserved space up front.
 771   if (special()) {
 772     return reserved_size();
 773   }
 774 
 775   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 776   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 777   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 778 
 779 #ifdef ASSERT
 780   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 781   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 782   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 783 
 784   if (committed_high > 0) {
 785     assert(committed_low == lower, "Must be");
 786     assert(committed_middle == middle, "Must be");
 787   }
 788 
 789   if (committed_middle > 0) {
 790     assert(committed_low == lower, "Must be");
 791   }
 792   if (committed_middle < middle) {
 793     assert(committed_high == 0, "Must be");
 794   }
 795 
 796   if (committed_low < lower) {
 797     assert(committed_high == 0, "Must be");
 798     assert(committed_middle == 0, "Must be");
 799   }
 800 #endif
 801 
 802   return committed_low + committed_middle + committed_high;
 803 }
 804 
 805 
 806 bool VirtualSpace::contains(const void* p) const {
 807   return low() <= (const char*) p && (const char*) p < high();
 808 }
 809 
 810 static void pretouch_expanded_memory(void* start, void* end) {
 811   assert(is_aligned(start, os::vm_page_size()), "Unexpected alignment");
 812   assert(is_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 813 
 814   os::pretouch_memory(start, end);
 815 }
 816 
 817 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 818   if (os::commit_memory(start, size, alignment, executable)) {
 819     if (pre_touch || AlwaysPreTouch) {
 820       pretouch_expanded_memory(start, start + size);
 821     }
 822     return true;
 823   }
 824 
 825   debug_only(warning(
 826       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 827       " size=" SIZE_FORMAT ", executable=%d) failed",
 828       p2i(start), p2i(start + size), size, executable);)
 829 
 830   return false;
 831 }
 832 
 833 /*
 834    First we need to determine if a particular virtual space is using large
 835    pages.  This is done at the initialize function and only virtual spaces
 836    that are larger than LargePageSizeInBytes use large pages.  Once we
 837    have determined this, all expand_by and shrink_by calls must grow and
 838    shrink by large page size chunks.  If a particular request
 839    is within the current large page, the call to commit and uncommit memory
 840    can be ignored.  In the case that the low and high boundaries of this
 841    space is not large page aligned, the pages leading to the first large
 842    page address and the pages after the last large page address must be
 843    allocated with default pages.
 844 */
 845 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 846   if (uncommitted_size() < bytes) {
 847     return false;
 848   }
 849 
 850   if (special()) {
 851     // don't commit memory if the entire space is pinned in memory
 852     _high += bytes;
 853     return true;
 854   }
 855 
 856   char* previous_high = high();
 857   char* unaligned_new_high = high() + bytes;
 858   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 859 
 860   // Calculate where the new high for each of the regions should be.  If
 861   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 862   // then the unaligned lower and upper new highs would be the
 863   // lower_high() and upper_high() respectively.
 864   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 865   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 866   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 867 
 868   // Align the new highs based on the regions alignment.  lower and upper
 869   // alignment will always be default page size.  middle alignment will be
 870   // LargePageSizeInBytes if the actual size of the virtual space is in
 871   // fact larger than LargePageSizeInBytes.
 872   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 873   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 874   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 875 
 876   // Determine which regions need to grow in this expand_by call.
 877   // If you are growing in the lower region, high() must be in that
 878   // region so calculate the size based on high().  For the middle and
 879   // upper regions, determine the starting point of growth based on the
 880   // location of high().  By getting the MAX of the region's low address
 881   // (or the previous region's high address) and high(), we can tell if it
 882   // is an intra or inter region growth.
 883   size_t lower_needs = 0;
 884   if (aligned_lower_new_high > lower_high()) {
 885     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 886   }
 887   size_t middle_needs = 0;
 888   if (aligned_middle_new_high > middle_high()) {
 889     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 890   }
 891   size_t upper_needs = 0;
 892   if (aligned_upper_new_high > upper_high()) {
 893     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 894   }
 895 
 896   // Check contiguity.
 897   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 898          "high address must be contained within the region");
 899   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 900          "high address must be contained within the region");
 901   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 902          "high address must be contained within the region");
 903 
 904   // Commit regions
 905   if (lower_needs > 0) {
 906     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 907     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 908       return false;
 909     }
 910     _lower_high += lower_needs;
 911   }
 912 
 913   if (middle_needs > 0) {
 914     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 915     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 916       return false;
 917     }
 918     _middle_high += middle_needs;
 919   }
 920 
 921   if (upper_needs > 0) {
 922     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 923     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 924       return false;
 925     }
 926     _upper_high += upper_needs;
 927   }
 928 
 929   _high += bytes;
 930   return true;
 931 }
 932 
 933 // A page is uncommitted if the contents of the entire page is deemed unusable.
 934 // Continue to decrement the high() pointer until it reaches a page boundary
 935 // in which case that particular page can now be uncommitted.
 936 void VirtualSpace::shrink_by(size_t size) {
 937   if (committed_size() < size)
 938     fatal("Cannot shrink virtual space to negative size");
 939 
 940   if (special()) {
 941     // don't uncommit if the entire space is pinned in memory
 942     _high -= size;
 943     return;
 944   }
 945 
 946   char* unaligned_new_high = high() - size;
 947   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 948 
 949   // Calculate new unaligned address
 950   char* unaligned_upper_new_high =
 951     MAX2(unaligned_new_high, middle_high_boundary());
 952   char* unaligned_middle_new_high =
 953     MAX2(unaligned_new_high, lower_high_boundary());
 954   char* unaligned_lower_new_high =
 955     MAX2(unaligned_new_high, low_boundary());
 956 
 957   // Align address to region's alignment
 958   char* aligned_upper_new_high =  align_up(unaligned_upper_new_high, upper_alignment());
 959   char* aligned_middle_new_high = align_up(unaligned_middle_new_high, middle_alignment());
 960   char* aligned_lower_new_high =  align_up(unaligned_lower_new_high, lower_alignment());
 961 
 962   // Determine which regions need to shrink
 963   size_t upper_needs = 0;
 964   if (aligned_upper_new_high < upper_high()) {
 965     upper_needs =
 966       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 967   }
 968   size_t middle_needs = 0;
 969   if (aligned_middle_new_high < middle_high()) {
 970     middle_needs =
 971       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 972   }
 973   size_t lower_needs = 0;
 974   if (aligned_lower_new_high < lower_high()) {
 975     lower_needs =
 976       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 977   }
 978 
 979   // Check contiguity.
 980   assert(middle_high_boundary() <= upper_high() &&
 981          upper_high() <= upper_high_boundary(),
 982          "high address must be contained within the region");
 983   assert(lower_high_boundary() <= middle_high() &&
 984          middle_high() <= middle_high_boundary(),
 985          "high address must be contained within the region");
 986   assert(low_boundary() <= lower_high() &&
 987          lower_high() <= lower_high_boundary(),
 988          "high address must be contained within the region");
 989 
 990   // Uncommit
 991   if (upper_needs > 0) {
 992     assert(middle_high_boundary() <= aligned_upper_new_high &&
 993            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 994            "must not shrink beyond region");
 995     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 996       debug_only(warning("os::uncommit_memory failed"));
 997       return;
 998     } else {
 999       _upper_high -= upper_needs;
1000     }
1001   }
1002   if (middle_needs > 0) {
1003     assert(lower_high_boundary() <= aligned_middle_new_high &&
1004            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1005            "must not shrink beyond region");
1006     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1007       debug_only(warning("os::uncommit_memory failed"));
1008       return;
1009     } else {
1010       _middle_high -= middle_needs;
1011     }
1012   }
1013   if (lower_needs > 0) {
1014     assert(low_boundary() <= aligned_lower_new_high &&
1015            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1016            "must not shrink beyond region");
1017     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1018       debug_only(warning("os::uncommit_memory failed"));
1019       return;
1020     } else {
1021       _lower_high -= lower_needs;
1022     }
1023   }
1024 
1025   _high -= size;
1026 }
1027 
1028 #ifndef PRODUCT
1029 void VirtualSpace::check_for_contiguity() {
1030   // Check contiguity.
1031   assert(low_boundary() <= lower_high() &&
1032          lower_high() <= lower_high_boundary(),
1033          "high address must be contained within the region");
1034   assert(lower_high_boundary() <= middle_high() &&
1035          middle_high() <= middle_high_boundary(),
1036          "high address must be contained within the region");
1037   assert(middle_high_boundary() <= upper_high() &&
1038          upper_high() <= upper_high_boundary(),
1039          "high address must be contained within the region");
1040   assert(low() >= low_boundary(), "low");
1041   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1042   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1043   assert(high() <= upper_high(), "upper high");
1044 }
1045 
1046 void VirtualSpace::print_on(outputStream* out) {
1047   out->print   ("Virtual space:");
1048   if (special()) out->print(" (pinned in memory)");
1049   out->cr();
1050   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1051   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1052   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1053   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1054 }
1055 
1056 void VirtualSpace::print() {
1057   print_on(tty);
1058 }
1059 
1060 /////////////// Unit tests ///////////////
1061 
1062 #ifndef PRODUCT
1063 
1064 #define test_log(...) \
1065   do {\
1066     if (VerboseInternalVMTests) { \
1067       tty->print_cr(__VA_ARGS__); \
1068       tty->flush(); \
1069     }\
1070   } while (false)
1071 
1072 class TestReservedSpace : AllStatic {
1073  public:
1074   static void small_page_write(void* addr, size_t size) {
1075     size_t page_size = os::vm_page_size();
1076 
1077     char* end = (char*)addr + size;
1078     for (char* p = (char*)addr; p < end; p += page_size) {
1079       *p = 1;
1080     }
1081   }
1082 
1083   static void release_memory_for_test(ReservedSpace rs) {
1084     if (rs.special()) {
1085       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1086     } else {
1087       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1088     }
1089   }
1090 
1091   static void test_reserved_space1(size_t size, size_t alignment) {
1092     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1093 
1094     assert(is_aligned(size, alignment), "Incorrect input parameters");
1095 
1096     ReservedSpace rs(size,          // size
1097                      alignment,     // alignment
1098                      UseLargePages, // large
1099                      (char *)NULL); // requested_address
1100 
1101     test_log(" rs.special() == %d", rs.special());
1102 
1103     assert(rs.base() != NULL, "Must be");
1104     assert(rs.size() == size, "Must be");
1105 
1106     assert(is_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1107     assert(is_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1108 
1109     if (rs.special()) {
1110       small_page_write(rs.base(), size);
1111     }
1112 
1113     release_memory_for_test(rs);
1114   }
1115 
1116   static void test_reserved_space2(size_t size) {
1117     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1118 
1119     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1120 
1121     ReservedSpace rs(size);
1122 
1123     test_log(" rs.special() == %d", rs.special());
1124 
1125     assert(rs.base() != NULL, "Must be");
1126     assert(rs.size() == size, "Must be");
1127 
1128     if (rs.special()) {
1129       small_page_write(rs.base(), size);
1130     }
1131 
1132     release_memory_for_test(rs);
1133   }
1134 
1135   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1136     test_log("test_reserved_space3(%p, %p, %d)",
1137         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1138 
1139     if (size < alignment) {
1140       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1141       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1142       return;
1143     }
1144 
1145     assert(is_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1146     assert(is_aligned(size, alignment), "Must be at least aligned against alignment");
1147 
1148     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1149 
1150     ReservedSpace rs(size, alignment, large, false);
1151 
1152     test_log(" rs.special() == %d", rs.special());
1153 
1154     assert(rs.base() != NULL, "Must be");
1155     assert(rs.size() == size, "Must be");
1156 
1157     if (rs.special()) {
1158       small_page_write(rs.base(), size);
1159     }
1160 
1161     release_memory_for_test(rs);
1162   }
1163 
1164 
1165   static void test_reserved_space1() {
1166     size_t size = 2 * 1024 * 1024;
1167     size_t ag   = os::vm_allocation_granularity();
1168 
1169     test_reserved_space1(size,      ag);
1170     test_reserved_space1(size * 2,  ag);
1171     test_reserved_space1(size * 10, ag);
1172   }
1173 
1174   static void test_reserved_space2() {
1175     size_t size = 2 * 1024 * 1024;
1176     size_t ag = os::vm_allocation_granularity();
1177 
1178     test_reserved_space2(size * 1);
1179     test_reserved_space2(size * 2);
1180     test_reserved_space2(size * 10);
1181     test_reserved_space2(ag);
1182     test_reserved_space2(size - ag);
1183     test_reserved_space2(size);
1184     test_reserved_space2(size + ag);
1185     test_reserved_space2(size * 2);
1186     test_reserved_space2(size * 2 - ag);
1187     test_reserved_space2(size * 2 + ag);
1188     test_reserved_space2(size * 3);
1189     test_reserved_space2(size * 3 - ag);
1190     test_reserved_space2(size * 3 + ag);
1191     test_reserved_space2(size * 10);
1192     test_reserved_space2(size * 10 + size / 2);
1193   }
1194 
1195   static void test_reserved_space3() {
1196     size_t ag = os::vm_allocation_granularity();
1197 
1198     test_reserved_space3(ag,      ag    , false);
1199     test_reserved_space3(ag * 2,  ag    , false);
1200     test_reserved_space3(ag * 3,  ag    , false);
1201     test_reserved_space3(ag * 2,  ag * 2, false);
1202     test_reserved_space3(ag * 4,  ag * 2, false);
1203     test_reserved_space3(ag * 8,  ag * 2, false);
1204     test_reserved_space3(ag * 4,  ag * 4, false);
1205     test_reserved_space3(ag * 8,  ag * 4, false);
1206     test_reserved_space3(ag * 16, ag * 4, false);
1207 
1208     if (UseLargePages) {
1209       size_t lp = os::large_page_size();
1210 
1211       // Without large pages
1212       test_reserved_space3(lp,     ag * 4, false);
1213       test_reserved_space3(lp * 2, ag * 4, false);
1214       test_reserved_space3(lp * 4, ag * 4, false);
1215       test_reserved_space3(lp,     lp    , false);
1216       test_reserved_space3(lp * 2, lp    , false);
1217       test_reserved_space3(lp * 3, lp    , false);
1218       test_reserved_space3(lp * 2, lp * 2, false);
1219       test_reserved_space3(lp * 4, lp * 2, false);
1220       test_reserved_space3(lp * 8, lp * 2, false);
1221 
1222       // With large pages
1223       test_reserved_space3(lp, ag * 4    , true);
1224       test_reserved_space3(lp * 2, ag * 4, true);
1225       test_reserved_space3(lp * 4, ag * 4, true);
1226       test_reserved_space3(lp, lp        , true);
1227       test_reserved_space3(lp * 2, lp    , true);
1228       test_reserved_space3(lp * 3, lp    , true);
1229       test_reserved_space3(lp * 2, lp * 2, true);
1230       test_reserved_space3(lp * 4, lp * 2, true);
1231       test_reserved_space3(lp * 8, lp * 2, true);
1232     }
1233   }
1234 
1235   static void test_reserved_space() {
1236     test_reserved_space1();
1237     test_reserved_space2();
1238     test_reserved_space3();
1239   }
1240 };
1241 
1242 void TestReservedSpace_test() {
1243   TestReservedSpace::test_reserved_space();
1244 }
1245 
1246 #define assert_equals(actual, expected)  \
1247   assert(actual == expected,             \
1248          "Got " SIZE_FORMAT " expected " \
1249          SIZE_FORMAT, actual, expected);
1250 
1251 #define assert_ge(value1, value2)                  \
1252   assert(value1 >= value2,                         \
1253          "'" #value1 "': " SIZE_FORMAT " '"        \
1254          #value2 "': " SIZE_FORMAT, value1, value2);
1255 
1256 #define assert_lt(value1, value2)                  \
1257   assert(value1 < value2,                          \
1258          "'" #value1 "': " SIZE_FORMAT " '"        \
1259          #value2 "': " SIZE_FORMAT, value1, value2);
1260 
1261 
1262 class TestVirtualSpace : AllStatic {
1263   enum TestLargePages {
1264     Default,
1265     Disable,
1266     Reserve,
1267     Commit
1268   };
1269 
1270   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1271     switch(mode) {
1272     default:
1273     case Default:
1274     case Reserve:
1275       return ReservedSpace(reserve_size_aligned);
1276     case Disable:
1277     case Commit:
1278       return ReservedSpace(reserve_size_aligned,
1279                            os::vm_allocation_granularity(),
1280                            /* large */ false, /* exec */ false);
1281     }
1282   }
1283 
1284   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1285     switch(mode) {
1286     default:
1287     case Default:
1288     case Reserve:
1289       return vs.initialize(rs, 0);
1290     case Disable:
1291       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1292     case Commit:
1293       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1294     }
1295   }
1296 
1297  public:
1298   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1299                                                         TestLargePages mode = Default) {
1300     size_t granularity = os::vm_allocation_granularity();
1301     size_t reserve_size_aligned = align_up(reserve_size, granularity);
1302 
1303     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1304 
1305     assert(reserved.is_reserved(), "Must be");
1306 
1307     VirtualSpace vs;
1308     bool initialized = initialize_virtual_space(vs, reserved, mode);
1309     assert(initialized, "Failed to initialize VirtualSpace");
1310 
1311     vs.expand_by(commit_size, false);
1312 
1313     if (vs.special()) {
1314       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1315     } else {
1316       assert_ge(vs.actual_committed_size(), commit_size);
1317       // Approximate the commit granularity.
1318       // Make sure that we don't commit using large pages
1319       // if large pages has been disabled for this VirtualSpace.
1320       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1321                                    os::vm_page_size() : os::large_page_size();
1322       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1323     }
1324 
1325     reserved.release();
1326   }
1327 
1328   static void test_virtual_space_actual_committed_space_one_large_page() {
1329     if (!UseLargePages) {
1330       return;
1331     }
1332 
1333     size_t large_page_size = os::large_page_size();
1334 
1335     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1336 
1337     assert(reserved.is_reserved(), "Must be");
1338 
1339     VirtualSpace vs;
1340     bool initialized = vs.initialize(reserved, 0);
1341     assert(initialized, "Failed to initialize VirtualSpace");
1342 
1343     vs.expand_by(large_page_size, false);
1344 
1345     assert_equals(vs.actual_committed_size(), large_page_size);
1346 
1347     reserved.release();
1348   }
1349 
1350   static void test_virtual_space_actual_committed_space() {
1351     test_virtual_space_actual_committed_space(4 * K, 0);
1352     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1353     test_virtual_space_actual_committed_space(8 * K, 0);
1354     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1355     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1356     test_virtual_space_actual_committed_space(12 * K, 0);
1357     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1358     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1359     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1360     test_virtual_space_actual_committed_space(64 * K, 0);
1361     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1362     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1363     test_virtual_space_actual_committed_space(2 * M, 0);
1364     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1365     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1366     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1367     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1368     test_virtual_space_actual_committed_space(10 * M, 0);
1369     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1370     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1371     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1372     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1373     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1374     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1375   }
1376 
1377   static void test_virtual_space_disable_large_pages() {
1378     if (!UseLargePages) {
1379       return;
1380     }
1381     // These test cases verify that if we force VirtualSpace to disable large pages
1382     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1383     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1384     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1385     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1386     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1387     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1388     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1389 
1390     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1391     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1392     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1393     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1394     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1395     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1396     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1397 
1398     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1399     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1400     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1401     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1402     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1403     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1404     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1405   }
1406 
1407   static void test_virtual_space() {
1408     test_virtual_space_actual_committed_space();
1409     test_virtual_space_actual_committed_space_one_large_page();
1410     test_virtual_space_disable_large_pages();
1411   }
1412 };
1413 
1414 void TestVirtualSpace_test() {
1415   TestVirtualSpace::test_virtual_space();
1416 }
1417 
1418 #endif // PRODUCT
1419 
1420 #endif