1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false), _backing_fd(-1) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) : _backing_fd(-1) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) : _backing_fd(-1) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) : _backing_fd(-1) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special, bool is_file_mapped= false)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (is_file_mapped) {
  91         if (!os::unmap_memory(base, size)) {
  92           fatal("os::release_memory failed");
  93         }
  94       } else {
  95         if (!os::release_memory(base, size)) {
  96           fatal("os::release_memory failed");
  97         }
  98       }
  99     }
 100   }
 101   return true;
 102 }
 103 
 104 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 105                                char* requested_address,
 106                                bool executable) {
 107   const size_t granularity = os::vm_allocation_granularity();
 108   assert((size & (granularity - 1)) == 0,
 109          "size not aligned to os::vm_allocation_granularity()");
 110   assert((alignment & (granularity - 1)) == 0,
 111          "alignment not aligned to os::vm_allocation_granularity()");
 112   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 113          "not a power of 2");
 114 
 115   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 116 
 117   _base = NULL;
 118   _size = 0;
 119   _special = false;
 120   _executable = executable;
 121   _alignment = 0;
 122   _noaccess_prefix = 0;
 123   if (size == 0) {
 124     return;
 125   }
 126 
 127   // If OS doesn't support demand paging for large page memory, we need
 128   // to use reserve_memory_special() to reserve and pin the entire region.
 129   // If there is a backing file directory for this VirtualSpace then whether
 130   // large pages are allocated is upto the filesystem the dir resides in.
 131   // So we ignore the UseLargePages flag in this case.
 132   bool special = large && !os::can_commit_large_page_memory();
 133   if (special && _backing_fd != -1) {
 134     special = false;
 135     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 136                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 137       log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option.");
 138     }
 139   }
 140   char* base = NULL;
 141 
 142   if (special) {
 143 
 144     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 145 
 146     if (base != NULL) {
 147       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 148         // OS ignored requested address. Try different address.
 149         return;
 150       }
 151       // Check alignment constraints.
 152       assert((uintptr_t) base % alignment == 0,
 153              "Large pages returned a non-aligned address, base: "
 154              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 155              p2i(base), alignment);
 156       _special = true;
 157     } else {
 158       // failed; try to reserve regular memory below
 159       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 160                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 161         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 162       }
 163     }
 164   }
 165 
 166   if (base == NULL) {
 167     // Optimistically assume that the OSes returns an aligned base pointer.
 168     // When reserving a large address range, most OSes seem to align to at
 169     // least 64K.
 170 
 171     // If the memory was requested at a particular address, use
 172     // os::attempt_reserve_memory_at() to avoid over mapping something
 173     // important.  If available space is not detected, return NULL.
 174 
 175     if (requested_address != 0) {
 176       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 177       if (failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) {
 178         // OS ignored requested address. Try different address.
 179         base = NULL;
 180       }
 181     } else {
 182       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 183     }
 184 
 185     if (base == NULL) return;
 186 
 187     // Check alignment constraints
 188     if ((((size_t)base) & (alignment - 1)) != 0) {
 189       // Base not aligned, retry
 190       if (_backing_fd != -1) {
 191         // unmap_memory will do extra work esp. in Windows
 192         if (!os::unmap_memory(base, size)) fatal("os::release_memory failed");
 193       } else {
 194         if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 195       }
 196       // Make sure that size is aligned
 197       size = align_size_up(size, alignment);
 198       base = os::reserve_memory_aligned(size, alignment, _backing_fd);
 199 
 200       if (requested_address != 0 &&
 201           failed_to_reserve_as_requested(base, requested_address, size, false, _backing_fd != -1)) {
 202         // As a result of the alignment constraints, the allocated base differs
 203         // from the requested address. Return back to the caller who can
 204         // take remedial action (like try again without a requested address).
 205         assert(_base == NULL, "should be");
 206         return;
 207       }
 208     }
 209   }
 210   // Done
 211   _base = base;
 212   _size = size;
 213   _alignment = alignment;
 214   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 215   if (_backing_fd != -1) {
 216     _special = true;
 217   }
 218 }
 219 
 220 
 221 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 222                              bool special, bool executable) {
 223   assert((size % os::vm_allocation_granularity()) == 0,
 224          "size not allocation aligned");
 225   _base = base;
 226   _size = size;
 227   _alignment = alignment;
 228   _noaccess_prefix = 0;
 229   _special = special;
 230   _executable = executable;
 231 }
 232 
 233 
 234 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 235                                         bool split, bool realloc) {
 236   assert(partition_size <= size(), "partition failed");
 237   if (split) {
 238     os::split_reserved_memory(base(), size(), partition_size, realloc);
 239   }
 240   ReservedSpace result(base(), partition_size, alignment, special(),
 241                        executable());
 242   return result;
 243 }
 244 
 245 
 246 ReservedSpace
 247 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 248   assert(partition_size <= size(), "partition failed");
 249   ReservedSpace result(base() + partition_size, size() - partition_size,
 250                        alignment, special(), executable());
 251   return result;
 252 }
 253 
 254 
 255 size_t ReservedSpace::page_align_size_up(size_t size) {
 256   return align_size_up(size, os::vm_page_size());
 257 }
 258 
 259 
 260 size_t ReservedSpace::page_align_size_down(size_t size) {
 261   return align_size_down(size, os::vm_page_size());
 262 }
 263 
 264 
 265 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 266   return align_size_up(size, os::vm_allocation_granularity());
 267 }
 268 
 269 
 270 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 271   return align_size_down(size, os::vm_allocation_granularity());
 272 }
 273 
 274 
 275 void ReservedSpace::release() {
 276   if (is_reserved()) {
 277     char *real_base = _base - _noaccess_prefix;
 278     const size_t real_size = _size + _noaccess_prefix;
 279     if (special()) {
 280       if (_backing_fd != -1) {
 281         os::unmap_memory(real_base, real_size);
 282       }
 283       else {
 284         os::release_memory_special(real_base, real_size);
 285       }
 286     } else{
 287       os::release_memory(real_base, real_size);
 288     }
 289     _base = NULL;
 290     _size = 0;
 291     _noaccess_prefix = 0;
 292     _alignment = 0;
 293     _special = false;
 294     _executable = false;
 295   }
 296 }
 297 
 298 static size_t noaccess_prefix_size(size_t alignment) {
 299   return lcm(os::vm_page_size(), alignment);
 300 }
 301 
 302 void ReservedHeapSpace::establish_noaccess_prefix() {
 303   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 304   _noaccess_prefix = noaccess_prefix_size(_alignment);
 305 
 306   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 307     if (true
 308         WIN64_ONLY(&& !UseLargePages)
 309         AIX_ONLY(&& os::vm_page_size() != 64*K)) {
 310       // Protect memory at the base of the allocated region.
 311       // If special, the page was committed (only matters on windows)
 312       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 313         fatal("cannot protect protection page");
 314       }
 315       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 316                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 317                                  p2i(_base),
 318                                  _noaccess_prefix);
 319       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 320     } else {
 321       Universe::set_narrow_oop_use_implicit_null_checks(false);
 322     }
 323   }
 324 
 325   _base += _noaccess_prefix;
 326   _size -= _noaccess_prefix;
 327   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 328 }
 329 
 330 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 331 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 332 // might still fulfill the wishes of the caller.
 333 // Assures the memory is aligned to 'alignment'.
 334 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 335 void ReservedHeapSpace::try_reserve_heap(size_t size,
 336                                          size_t alignment,
 337                                          bool large,
 338                                          char* requested_address) {
 339   if (_base != NULL) {
 340     // We tried before, but we didn't like the address delivered.
 341     release();
 342   }
 343 
 344   // If OS doesn't support demand paging for large page memory, we need
 345   // to use reserve_memory_special() to reserve and pin the entire region.
 346   // If there is a backing file directory for this VirtualSpace then whether
 347   // large pages are allocated is upto the filesystem the dir resides in.
 348   // So we ignore the UseLargePages flag in this case.
 349   bool special = large && !os::can_commit_large_page_memory();
 350   if (special && _backing_fd != -1) {
 351     special = false;
 352     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 353       !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 354       log_debug(gc, heap, coops)("UseLargePages can't be set with HeapDir option.");
 355     }
 356   }
 357   char* base = NULL;
 358 
 359   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 360                              " heap of size " SIZE_FORMAT_HEX,
 361                              p2i(requested_address),
 362                              size);
 363 
 364   if (special) {
 365     base = os::reserve_memory_special(size, alignment, requested_address, false);
 366 
 367     if (base != NULL) {
 368       // Check alignment constraints.
 369       assert((uintptr_t) base % alignment == 0,
 370              "Large pages returned a non-aligned address, base: "
 371              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 372              p2i(base), alignment);
 373       _special = true;
 374     }
 375   }
 376 
 377   if (base == NULL) {
 378     // Failed; try to reserve regular memory below
 379     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 380                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 381       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 382     }
 383 
 384     // Optimistically assume that the OSes returns an aligned base pointer.
 385     // When reserving a large address range, most OSes seem to align to at
 386     // least 64K.
 387 
 388     // If the memory was requested at a particular address, use
 389     // os::attempt_reserve_memory_at() to avoid over mapping something
 390     // important.  If available space is not detected, return NULL.
 391 
 392     if (requested_address != 0) {
 393       base = os::attempt_reserve_memory_at(size, requested_address, _backing_fd);
 394     } else {
 395       base = os::reserve_memory(size, NULL, alignment, _backing_fd);
 396     }
 397   }
 398   if (base == NULL) { return; }
 399 
 400   // Done
 401   _base = base;
 402   _size = size;
 403   _alignment = alignment;
 404   // If heap is reserved with a backing file, the entire space has been committed. So set the _special flag to true
 405   if (_backing_fd != -1) {
 406     _special = true;
 407   }
 408 
 409   // Check alignment constraints
 410   if ((((size_t)base) & (alignment - 1)) != 0) {
 411     // Base not aligned, retry.
 412     release();
 413   }
 414 }
 415 
 416 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 417                                           char *lowest_start,
 418                                           size_t attach_point_alignment,
 419                                           char *aligned_heap_base_min_address,
 420                                           char *upper_bound,
 421                                           size_t size,
 422                                           size_t alignment,
 423                                           bool large) {
 424   const size_t attach_range = highest_start - lowest_start;
 425   // Cap num_attempts at possible number.
 426   // At least one is possible even for 0 sized attach range.
 427   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 428   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 429 
 430   const size_t stepsize = (attach_range == 0) ? // Only one try.
 431     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 432 
 433   // Try attach points from top to bottom.
 434   char* attach_point = highest_start;
 435   while (attach_point >= lowest_start  &&
 436          attach_point <= highest_start &&  // Avoid wrap around.
 437          ((_base == NULL) ||
 438           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 439     try_reserve_heap(size, alignment, large, attach_point);
 440     attach_point -= stepsize;
 441   }
 442 }
 443 
 444 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 445 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 446 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 447 
 448 // Helper for heap allocation. Returns an array with addresses
 449 // (OS-specific) which are suited for disjoint base mode. Array is
 450 // NULL terminated.
 451 static char** get_attach_addresses_for_disjoint_mode() {
 452   static uint64_t addresses[] = {
 453      2 * SIZE_32G,
 454      3 * SIZE_32G,
 455      4 * SIZE_32G,
 456      8 * SIZE_32G,
 457     10 * SIZE_32G,
 458      1 * SIZE_64K * SIZE_32G,
 459      2 * SIZE_64K * SIZE_32G,
 460      3 * SIZE_64K * SIZE_32G,
 461      4 * SIZE_64K * SIZE_32G,
 462     16 * SIZE_64K * SIZE_32G,
 463     32 * SIZE_64K * SIZE_32G,
 464     34 * SIZE_64K * SIZE_32G,
 465     0
 466   };
 467 
 468   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 469   // the array is sorted.
 470   uint i = 0;
 471   while (addresses[i] != 0 &&
 472          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 473     i++;
 474   }
 475   uint start = i;
 476 
 477   // Avoid more steps than requested.
 478   i = 0;
 479   while (addresses[start+i] != 0) {
 480     if (i == HeapSearchSteps) {
 481       addresses[start+i] = 0;
 482       break;
 483     }
 484     i++;
 485   }
 486 
 487   return (char**) &addresses[start];
 488 }
 489 
 490 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 491   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 492             "can not allocate compressed oop heap for this size");
 493   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 494   assert(HeapBaseMinAddress > 0, "sanity");
 495 
 496   const size_t granularity = os::vm_allocation_granularity();
 497   assert((size & (granularity - 1)) == 0,
 498          "size not aligned to os::vm_allocation_granularity()");
 499   assert((alignment & (granularity - 1)) == 0,
 500          "alignment not aligned to os::vm_allocation_granularity()");
 501   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 502          "not a power of 2");
 503 
 504   // The necessary attach point alignment for generated wish addresses.
 505   // This is needed to increase the chance of attaching for mmap and shmat.
 506   const size_t os_attach_point_alignment =
 507     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 508     NOT_AIX(os::vm_allocation_granularity());
 509   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 510 
 511   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 512   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 513     noaccess_prefix_size(alignment) : 0;
 514 
 515   // Attempt to alloc at user-given address.
 516   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 517     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 518     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 519       release();
 520     }
 521   }
 522 
 523   // Keep heap at HeapBaseMinAddress.
 524   if (_base == NULL) {
 525 
 526     // Try to allocate the heap at addresses that allow efficient oop compression.
 527     // Different schemes are tried, in order of decreasing optimization potential.
 528     //
 529     // For this, try_reserve_heap() is called with the desired heap base addresses.
 530     // A call into the os layer to allocate at a given address can return memory
 531     // at a different address than requested.  Still, this might be memory at a useful
 532     // address. try_reserve_heap() always returns this allocated memory, as only here
 533     // the criteria for a good heap are checked.
 534 
 535     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 536     // Give it several tries from top of range to bottom.
 537     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 538 
 539       // Calc address range within we try to attach (range of possible start addresses).
 540       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 541       char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
 542       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 543                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 544     }
 545 
 546     // zerobased: Attempt to allocate in the lower 32G.
 547     // But leave room for the compressed class pointers, which is allocated above
 548     // the heap.
 549     char *zerobased_max = (char *)OopEncodingHeapMax;
 550     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 551     // For small heaps, save some space for compressed class pointer
 552     // space so it can be decoded with no base.
 553     if (UseCompressedClassPointers && !UseSharedSpaces &&
 554         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 555         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 556       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 557     }
 558 
 559     // Give it several tries from top of range to bottom.
 560     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 561         ((_base == NULL) ||                        // No previous try succeeded.
 562          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 563 
 564       // Calc address range within we try to attach (range of possible start addresses).
 565       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 566       // Need to be careful about size being guaranteed to be less
 567       // than UnscaledOopHeapMax due to type constraints.
 568       char *lowest_start = aligned_heap_base_min_address;
 569       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 570       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 571         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 572       }
 573       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 574       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 575                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 576     }
 577 
 578     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 579     // implement null checks.
 580     noaccess_prefix = noaccess_prefix_size(alignment);
 581 
 582     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 583     char** addresses = get_attach_addresses_for_disjoint_mode();
 584     int i = 0;
 585     while (addresses[i] &&                                 // End of array not yet reached.
 586            ((_base == NULL) ||                             // No previous try succeeded.
 587             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 588              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 589       char* const attach_point = addresses[i];
 590       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 591       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 592       i++;
 593     }
 594 
 595     // Last, desperate try without any placement.
 596     if (_base == NULL) {
 597       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 598       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 599     }
 600   }
 601 }
 602 
 603 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backing_fs_for_heap) : ReservedSpace() {
 604 
 605   if (size == 0) {
 606     return;
 607   }
 608 
 609   if (backing_fs_for_heap != NULL) {
 610     _backing_fd = os::create_file_for_heap(backing_fs_for_heap, size);
 611     if (_backing_fd == -1) {
 612       vm_exit_during_initialization(
 613         err_msg("Could not create file for Heap at location %s", backing_fs_for_heap));
 614     }
 615   }
 616 
 617   // Heap size should be aligned to alignment, too.
 618   guarantee(is_size_aligned(size, alignment), "set by caller");
 619 
 620   if (UseCompressedOops) {
 621     initialize_compressed_heap(size, alignment, large);
 622     if (_size > size) {
 623       // We allocated heap with noaccess prefix.
 624       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 625       // if we had to try at arbitrary address.
 626       establish_noaccess_prefix();
 627     }
 628   } else {
 629     initialize(size, alignment, large, NULL, false);
 630   }
 631 
 632   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 633          "area must be distinguishable from marks for mark-sweep");
 634   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 635          "area must be distinguishable from marks for mark-sweep");
 636 
 637   if (base() > 0) {
 638     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 639   }
 640 
 641   if (backing_fs_for_heap != NULL) {
 642     os::close(_backing_fd);
 643   }
 644 }
 645 
 646 // Reserve space for code segment.  Same as Java heap only we mark this as
 647 // executable.
 648 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 649                                      size_t rs_align,
 650                                      bool large) :
 651   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 652   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 653 }
 654 
 655 // VirtualSpace
 656 
 657 VirtualSpace::VirtualSpace() {
 658   _low_boundary           = NULL;
 659   _high_boundary          = NULL;
 660   _low                    = NULL;
 661   _high                   = NULL;
 662   _lower_high             = NULL;
 663   _middle_high            = NULL;
 664   _upper_high             = NULL;
 665   _lower_high_boundary    = NULL;
 666   _middle_high_boundary   = NULL;
 667   _upper_high_boundary    = NULL;
 668   _lower_alignment        = 0;
 669   _middle_alignment       = 0;
 670   _upper_alignment        = 0;
 671   _special                = false;
 672   _executable             = false;
 673 }
 674 
 675 
 676 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 677   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 678   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 679 }
 680 
 681 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 682   if(!rs.is_reserved()) return false;  // allocation failed.
 683   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 684   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 685 
 686   _low_boundary  = rs.base();
 687   _high_boundary = low_boundary() + rs.size();
 688 
 689   _low = low_boundary();
 690   _high = low();
 691 
 692   _special = rs.special();
 693   _executable = rs.executable();
 694 
 695   // When a VirtualSpace begins life at a large size, make all future expansion
 696   // and shrinking occur aligned to a granularity of large pages.  This avoids
 697   // fragmentation of physical addresses that inhibits the use of large pages
 698   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 699   // page size, the only spaces that get handled this way are codecache and
 700   // the heap itself, both of which provide a substantial performance
 701   // boost in many benchmarks when covered by large pages.
 702   //
 703   // No attempt is made to force large page alignment at the very top and
 704   // bottom of the space if they are not aligned so already.
 705   _lower_alignment  = os::vm_page_size();
 706   _middle_alignment = max_commit_granularity;
 707   _upper_alignment  = os::vm_page_size();
 708 
 709   // End of each region
 710   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 711   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 712   _upper_high_boundary = high_boundary();
 713 
 714   // High address of each region
 715   _lower_high = low_boundary();
 716   _middle_high = lower_high_boundary();
 717   _upper_high = middle_high_boundary();
 718 
 719   // commit to initial size
 720   if (committed_size > 0) {
 721     if (!expand_by(committed_size)) {
 722       return false;
 723     }
 724   }
 725   return true;
 726 }
 727 
 728 
 729 VirtualSpace::~VirtualSpace() {
 730   release();
 731 }
 732 
 733 
 734 void VirtualSpace::release() {
 735   // This does not release memory it reserved.
 736   // Caller must release via rs.release();
 737   _low_boundary           = NULL;
 738   _high_boundary          = NULL;
 739   _low                    = NULL;
 740   _high                   = NULL;
 741   _lower_high             = NULL;
 742   _middle_high            = NULL;
 743   _upper_high             = NULL;
 744   _lower_high_boundary    = NULL;
 745   _middle_high_boundary   = NULL;
 746   _upper_high_boundary    = NULL;
 747   _lower_alignment        = 0;
 748   _middle_alignment       = 0;
 749   _upper_alignment        = 0;
 750   _special                = false;
 751   _executable             = false;
 752 }
 753 
 754 
 755 size_t VirtualSpace::committed_size() const {
 756   return pointer_delta(high(), low(), sizeof(char));
 757 }
 758 
 759 
 760 size_t VirtualSpace::reserved_size() const {
 761   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 762 }
 763 
 764 
 765 size_t VirtualSpace::uncommitted_size()  const {
 766   return reserved_size() - committed_size();
 767 }
 768 
 769 size_t VirtualSpace::actual_committed_size() const {
 770   // Special VirtualSpaces commit all reserved space up front.
 771   if (special()) {
 772     return reserved_size();
 773   }
 774 
 775   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 776   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 777   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 778 
 779 #ifdef ASSERT
 780   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 781   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 782   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 783 
 784   if (committed_high > 0) {
 785     assert(committed_low == lower, "Must be");
 786     assert(committed_middle == middle, "Must be");
 787   }
 788 
 789   if (committed_middle > 0) {
 790     assert(committed_low == lower, "Must be");
 791   }
 792   if (committed_middle < middle) {
 793     assert(committed_high == 0, "Must be");
 794   }
 795 
 796   if (committed_low < lower) {
 797     assert(committed_high == 0, "Must be");
 798     assert(committed_middle == 0, "Must be");
 799   }
 800 #endif
 801 
 802   return committed_low + committed_middle + committed_high;
 803 }
 804 
 805 
 806 bool VirtualSpace::contains(const void* p) const {
 807   return low() <= (const char*) p && (const char*) p < high();
 808 }
 809 
 810 static void pretouch_expanded_memory(void* start, void* end) {
 811   assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
 812   assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 813 
 814   os::pretouch_memory(start, end);
 815 }
 816 
 817 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 818   if (os::commit_memory(start, size, alignment, executable)) {
 819     if (pre_touch || AlwaysPreTouch) {
 820       pretouch_expanded_memory(start, start + size);
 821     }
 822     return true;
 823   }
 824 
 825   debug_only(warning(
 826       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 827       " size=" SIZE_FORMAT ", executable=%d) failed",
 828       p2i(start), p2i(start + size), size, executable);)
 829 
 830   return false;
 831 }
 832 
 833 /*
 834    First we need to determine if a particular virtual space is using large
 835    pages.  This is done at the initialize function and only virtual spaces
 836    that are larger than LargePageSizeInBytes use large pages.  Once we
 837    have determined this, all expand_by and shrink_by calls must grow and
 838    shrink by large page size chunks.  If a particular request
 839    is within the current large page, the call to commit and uncommit memory
 840    can be ignored.  In the case that the low and high boundaries of this
 841    space is not large page aligned, the pages leading to the first large
 842    page address and the pages after the last large page address must be
 843    allocated with default pages.
 844 */
 845 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 846   if (uncommitted_size() < bytes) {
 847     return false;
 848   }
 849 
 850   if (special()) {
 851     // don't commit memory if the entire space is pinned in memory
 852     _high += bytes;
 853     return true;
 854   }
 855 
 856   char* previous_high = high();
 857   char* unaligned_new_high = high() + bytes;
 858   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 859 
 860   // Calculate where the new high for each of the regions should be.  If
 861   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 862   // then the unaligned lower and upper new highs would be the
 863   // lower_high() and upper_high() respectively.
 864   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 865   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 866   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 867 
 868   // Align the new highs based on the regions alignment.  lower and upper
 869   // alignment will always be default page size.  middle alignment will be
 870   // LargePageSizeInBytes if the actual size of the virtual space is in
 871   // fact larger than LargePageSizeInBytes.
 872   char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 873   char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 874   char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 875 
 876   // Determine which regions need to grow in this expand_by call.
 877   // If you are growing in the lower region, high() must be in that
 878   // region so calculate the size based on high().  For the middle and
 879   // upper regions, determine the starting point of growth based on the
 880   // location of high().  By getting the MAX of the region's low address
 881   // (or the previous region's high address) and high(), we can tell if it
 882   // is an intra or inter region growth.
 883   size_t lower_needs = 0;
 884   if (aligned_lower_new_high > lower_high()) {
 885     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 886   }
 887   size_t middle_needs = 0;
 888   if (aligned_middle_new_high > middle_high()) {
 889     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 890   }
 891   size_t upper_needs = 0;
 892   if (aligned_upper_new_high > upper_high()) {
 893     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 894   }
 895 
 896   // Check contiguity.
 897   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 898          "high address must be contained within the region");
 899   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 900          "high address must be contained within the region");
 901   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 902          "high address must be contained within the region");
 903 
 904   // Commit regions
 905   if (lower_needs > 0) {
 906     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 907     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 908       return false;
 909     }
 910     _lower_high += lower_needs;
 911   }
 912 
 913   if (middle_needs > 0) {
 914     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 915     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 916       return false;
 917     }
 918     _middle_high += middle_needs;
 919   }
 920 
 921   if (upper_needs > 0) {
 922     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 923     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 924       return false;
 925     }
 926     _upper_high += upper_needs;
 927   }
 928 
 929   _high += bytes;
 930   return true;
 931 }
 932 
 933 // A page is uncommitted if the contents of the entire page is deemed unusable.
 934 // Continue to decrement the high() pointer until it reaches a page boundary
 935 // in which case that particular page can now be uncommitted.
 936 void VirtualSpace::shrink_by(size_t size) {
 937   if (committed_size() < size)
 938     fatal("Cannot shrink virtual space to negative size");
 939 
 940   if (special()) {
 941     // don't uncommit if the entire space is pinned in memory
 942     _high -= size;
 943     return;
 944   }
 945 
 946   char* unaligned_new_high = high() - size;
 947   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 948 
 949   // Calculate new unaligned address
 950   char* unaligned_upper_new_high =
 951     MAX2(unaligned_new_high, middle_high_boundary());
 952   char* unaligned_middle_new_high =
 953     MAX2(unaligned_new_high, lower_high_boundary());
 954   char* unaligned_lower_new_high =
 955     MAX2(unaligned_new_high, low_boundary());
 956 
 957   // Align address to region's alignment
 958   char* aligned_upper_new_high =
 959     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 960   char* aligned_middle_new_high =
 961     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 962   char* aligned_lower_new_high =
 963     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 964 
 965   // Determine which regions need to shrink
 966   size_t upper_needs = 0;
 967   if (aligned_upper_new_high < upper_high()) {
 968     upper_needs =
 969       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 970   }
 971   size_t middle_needs = 0;
 972   if (aligned_middle_new_high < middle_high()) {
 973     middle_needs =
 974       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 975   }
 976   size_t lower_needs = 0;
 977   if (aligned_lower_new_high < lower_high()) {
 978     lower_needs =
 979       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 980   }
 981 
 982   // Check contiguity.
 983   assert(middle_high_boundary() <= upper_high() &&
 984          upper_high() <= upper_high_boundary(),
 985          "high address must be contained within the region");
 986   assert(lower_high_boundary() <= middle_high() &&
 987          middle_high() <= middle_high_boundary(),
 988          "high address must be contained within the region");
 989   assert(low_boundary() <= lower_high() &&
 990          lower_high() <= lower_high_boundary(),
 991          "high address must be contained within the region");
 992 
 993   // Uncommit
 994   if (upper_needs > 0) {
 995     assert(middle_high_boundary() <= aligned_upper_new_high &&
 996            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 997            "must not shrink beyond region");
 998     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 999       debug_only(warning("os::uncommit_memory failed"));
1000       return;
1001     } else {
1002       _upper_high -= upper_needs;
1003     }
1004   }
1005   if (middle_needs > 0) {
1006     assert(lower_high_boundary() <= aligned_middle_new_high &&
1007            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
1008            "must not shrink beyond region");
1009     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
1010       debug_only(warning("os::uncommit_memory failed"));
1011       return;
1012     } else {
1013       _middle_high -= middle_needs;
1014     }
1015   }
1016   if (lower_needs > 0) {
1017     assert(low_boundary() <= aligned_lower_new_high &&
1018            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
1019            "must not shrink beyond region");
1020     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
1021       debug_only(warning("os::uncommit_memory failed"));
1022       return;
1023     } else {
1024       _lower_high -= lower_needs;
1025     }
1026   }
1027 
1028   _high -= size;
1029 }
1030 
1031 #ifndef PRODUCT
1032 void VirtualSpace::check_for_contiguity() {
1033   // Check contiguity.
1034   assert(low_boundary() <= lower_high() &&
1035          lower_high() <= lower_high_boundary(),
1036          "high address must be contained within the region");
1037   assert(lower_high_boundary() <= middle_high() &&
1038          middle_high() <= middle_high_boundary(),
1039          "high address must be contained within the region");
1040   assert(middle_high_boundary() <= upper_high() &&
1041          upper_high() <= upper_high_boundary(),
1042          "high address must be contained within the region");
1043   assert(low() >= low_boundary(), "low");
1044   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1045   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1046   assert(high() <= upper_high(), "upper high");
1047 }
1048 
1049 void VirtualSpace::print_on(outputStream* out) {
1050   out->print   ("Virtual space:");
1051   if (special()) out->print(" (pinned in memory)");
1052   out->cr();
1053   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1054   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1055   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1056   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1057 }
1058 
1059 void VirtualSpace::print() {
1060   print_on(tty);
1061 }
1062 
1063 /////////////// Unit tests ///////////////
1064 
1065 #ifndef PRODUCT
1066 
1067 #define test_log(...) \
1068   do {\
1069     if (VerboseInternalVMTests) { \
1070       tty->print_cr(__VA_ARGS__); \
1071       tty->flush(); \
1072     }\
1073   } while (false)
1074 
1075 class TestReservedSpace : AllStatic {
1076  public:
1077   static void small_page_write(void* addr, size_t size) {
1078     size_t page_size = os::vm_page_size();
1079 
1080     char* end = (char*)addr + size;
1081     for (char* p = (char*)addr; p < end; p += page_size) {
1082       *p = 1;
1083     }
1084   }
1085 
1086   static void release_memory_for_test(ReservedSpace rs) {
1087     if (rs.special()) {
1088       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1089     } else {
1090       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1091     }
1092   }
1093 
1094   static void test_reserved_space1(size_t size, size_t alignment) {
1095     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1096 
1097     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1098 
1099     ReservedSpace rs(size,          // size
1100                      alignment,     // alignment
1101                      UseLargePages, // large
1102                      (char *)NULL); // requested_address
1103 
1104     test_log(" rs.special() == %d", rs.special());
1105 
1106     assert(rs.base() != NULL, "Must be");
1107     assert(rs.size() == size, "Must be");
1108 
1109     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1110     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1111 
1112     if (rs.special()) {
1113       small_page_write(rs.base(), size);
1114     }
1115 
1116     release_memory_for_test(rs);
1117   }
1118 
1119   static void test_reserved_space2(size_t size) {
1120     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1121 
1122     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1123 
1124     ReservedSpace rs(size);
1125 
1126     test_log(" rs.special() == %d", rs.special());
1127 
1128     assert(rs.base() != NULL, "Must be");
1129     assert(rs.size() == size, "Must be");
1130 
1131     if (rs.special()) {
1132       small_page_write(rs.base(), size);
1133     }
1134 
1135     release_memory_for_test(rs);
1136   }
1137 
1138   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1139     test_log("test_reserved_space3(%p, %p, %d)",
1140         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1141 
1142     if (size < alignment) {
1143       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1144       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1145       return;
1146     }
1147 
1148     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1149     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1150 
1151     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1152 
1153     ReservedSpace rs(size, alignment, large, false);
1154 
1155     test_log(" rs.special() == %d", rs.special());
1156 
1157     assert(rs.base() != NULL, "Must be");
1158     assert(rs.size() == size, "Must be");
1159 
1160     if (rs.special()) {
1161       small_page_write(rs.base(), size);
1162     }
1163 
1164     release_memory_for_test(rs);
1165   }
1166 
1167 
1168   static void test_reserved_space1() {
1169     size_t size = 2 * 1024 * 1024;
1170     size_t ag   = os::vm_allocation_granularity();
1171 
1172     test_reserved_space1(size,      ag);
1173     test_reserved_space1(size * 2,  ag);
1174     test_reserved_space1(size * 10, ag);
1175   }
1176 
1177   static void test_reserved_space2() {
1178     size_t size = 2 * 1024 * 1024;
1179     size_t ag = os::vm_allocation_granularity();
1180 
1181     test_reserved_space2(size * 1);
1182     test_reserved_space2(size * 2);
1183     test_reserved_space2(size * 10);
1184     test_reserved_space2(ag);
1185     test_reserved_space2(size - ag);
1186     test_reserved_space2(size);
1187     test_reserved_space2(size + ag);
1188     test_reserved_space2(size * 2);
1189     test_reserved_space2(size * 2 - ag);
1190     test_reserved_space2(size * 2 + ag);
1191     test_reserved_space2(size * 3);
1192     test_reserved_space2(size * 3 - ag);
1193     test_reserved_space2(size * 3 + ag);
1194     test_reserved_space2(size * 10);
1195     test_reserved_space2(size * 10 + size / 2);
1196   }
1197 
1198   static void test_reserved_space3() {
1199     size_t ag = os::vm_allocation_granularity();
1200 
1201     test_reserved_space3(ag,      ag    , false);
1202     test_reserved_space3(ag * 2,  ag    , false);
1203     test_reserved_space3(ag * 3,  ag    , false);
1204     test_reserved_space3(ag * 2,  ag * 2, false);
1205     test_reserved_space3(ag * 4,  ag * 2, false);
1206     test_reserved_space3(ag * 8,  ag * 2, false);
1207     test_reserved_space3(ag * 4,  ag * 4, false);
1208     test_reserved_space3(ag * 8,  ag * 4, false);
1209     test_reserved_space3(ag * 16, ag * 4, false);
1210 
1211     if (UseLargePages) {
1212       size_t lp = os::large_page_size();
1213 
1214       // Without large pages
1215       test_reserved_space3(lp,     ag * 4, false);
1216       test_reserved_space3(lp * 2, ag * 4, false);
1217       test_reserved_space3(lp * 4, ag * 4, false);
1218       test_reserved_space3(lp,     lp    , false);
1219       test_reserved_space3(lp * 2, lp    , false);
1220       test_reserved_space3(lp * 3, lp    , false);
1221       test_reserved_space3(lp * 2, lp * 2, false);
1222       test_reserved_space3(lp * 4, lp * 2, false);
1223       test_reserved_space3(lp * 8, lp * 2, false);
1224 
1225       // With large pages
1226       test_reserved_space3(lp, ag * 4    , true);
1227       test_reserved_space3(lp * 2, ag * 4, true);
1228       test_reserved_space3(lp * 4, ag * 4, true);
1229       test_reserved_space3(lp, lp        , true);
1230       test_reserved_space3(lp * 2, lp    , true);
1231       test_reserved_space3(lp * 3, lp    , true);
1232       test_reserved_space3(lp * 2, lp * 2, true);
1233       test_reserved_space3(lp * 4, lp * 2, true);
1234       test_reserved_space3(lp * 8, lp * 2, true);
1235     }
1236   }
1237 
1238   static void test_reserved_space() {
1239     test_reserved_space1();
1240     test_reserved_space2();
1241     test_reserved_space3();
1242   }
1243 };
1244 
1245 void TestReservedSpace_test() {
1246   TestReservedSpace::test_reserved_space();
1247 }
1248 
1249 #define assert_equals(actual, expected)  \
1250   assert(actual == expected,             \
1251          "Got " SIZE_FORMAT " expected " \
1252          SIZE_FORMAT, actual, expected);
1253 
1254 #define assert_ge(value1, value2)                  \
1255   assert(value1 >= value2,                         \
1256          "'" #value1 "': " SIZE_FORMAT " '"        \
1257          #value2 "': " SIZE_FORMAT, value1, value2);
1258 
1259 #define assert_lt(value1, value2)                  \
1260   assert(value1 < value2,                          \
1261          "'" #value1 "': " SIZE_FORMAT " '"        \
1262          #value2 "': " SIZE_FORMAT, value1, value2);
1263 
1264 
1265 class TestVirtualSpace : AllStatic {
1266   enum TestLargePages {
1267     Default,
1268     Disable,
1269     Reserve,
1270     Commit
1271   };
1272 
1273   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1274     switch(mode) {
1275     default:
1276     case Default:
1277     case Reserve:
1278       return ReservedSpace(reserve_size_aligned);
1279     case Disable:
1280     case Commit:
1281       return ReservedSpace(reserve_size_aligned,
1282                            os::vm_allocation_granularity(),
1283                            /* large */ false, /* exec */ false);
1284     }
1285   }
1286 
1287   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1288     switch(mode) {
1289     default:
1290     case Default:
1291     case Reserve:
1292       return vs.initialize(rs, 0);
1293     case Disable:
1294       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1295     case Commit:
1296       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1297     }
1298   }
1299 
1300  public:
1301   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1302                                                         TestLargePages mode = Default) {
1303     size_t granularity = os::vm_allocation_granularity();
1304     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1305 
1306     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1307 
1308     assert(reserved.is_reserved(), "Must be");
1309 
1310     VirtualSpace vs;
1311     bool initialized = initialize_virtual_space(vs, reserved, mode);
1312     assert(initialized, "Failed to initialize VirtualSpace");
1313 
1314     vs.expand_by(commit_size, false);
1315 
1316     if (vs.special()) {
1317       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1318     } else {
1319       assert_ge(vs.actual_committed_size(), commit_size);
1320       // Approximate the commit granularity.
1321       // Make sure that we don't commit using large pages
1322       // if large pages has been disabled for this VirtualSpace.
1323       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1324                                    os::vm_page_size() : os::large_page_size();
1325       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1326     }
1327 
1328     reserved.release();
1329   }
1330 
1331   static void test_virtual_space_actual_committed_space_one_large_page() {
1332     if (!UseLargePages) {
1333       return;
1334     }
1335 
1336     size_t large_page_size = os::large_page_size();
1337 
1338     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1339 
1340     assert(reserved.is_reserved(), "Must be");
1341 
1342     VirtualSpace vs;
1343     bool initialized = vs.initialize(reserved, 0);
1344     assert(initialized, "Failed to initialize VirtualSpace");
1345 
1346     vs.expand_by(large_page_size, false);
1347 
1348     assert_equals(vs.actual_committed_size(), large_page_size);
1349 
1350     reserved.release();
1351   }
1352 
1353   static void test_virtual_space_actual_committed_space() {
1354     test_virtual_space_actual_committed_space(4 * K, 0);
1355     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1356     test_virtual_space_actual_committed_space(8 * K, 0);
1357     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1358     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1359     test_virtual_space_actual_committed_space(12 * K, 0);
1360     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1361     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1362     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1363     test_virtual_space_actual_committed_space(64 * K, 0);
1364     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1365     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1366     test_virtual_space_actual_committed_space(2 * M, 0);
1367     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1368     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1369     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1370     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1371     test_virtual_space_actual_committed_space(10 * M, 0);
1372     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1373     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1374     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1375     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1376     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1377     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1378   }
1379 
1380   static void test_virtual_space_disable_large_pages() {
1381     if (!UseLargePages) {
1382       return;
1383     }
1384     // These test cases verify that if we force VirtualSpace to disable large pages
1385     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1386     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1387     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1388     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1389     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1390     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1391     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1392 
1393     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1394     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1395     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1396     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1397     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1398     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1399     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1400 
1401     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1402     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1403     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1404     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1405     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1406     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1407     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1408   }
1409 
1410   static void test_virtual_space() {
1411     test_virtual_space_actual_committed_space();
1412     test_virtual_space_actual_committed_space_one_large_page();
1413     test_virtual_space_disable_large_pages();
1414   }
1415 };
1416 
1417 void TestVirtualSpace_test() {
1418   TestVirtualSpace::test_virtual_space();
1419 }
1420 
1421 #endif // PRODUCT
1422 
1423 #endif