1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/resourceArea.hpp"
  29 #include "memory/virtualspace.hpp"
  30 #include "oops/markOop.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "services/memTracker.hpp"
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     log_debug(gc, heap, coops)("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, p2i(base), p2i(requested_address));
  84     // OS ignored requested address. Try different address.
  85     if (special) {
  86       if (!os::release_memory_special(base, size)) {
  87         fatal("os::release_memory_special failed");
  88       }
  89     } else {
  90       if (!os::release_memory(base, size)) {
  91         fatal("os::release_memory failed");
  92       }
  93     }
  94   }
  95   return true;
  96 }
  97 
  98 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
  99                                char* requested_address,
 100                                bool executable) {
 101   const size_t granularity = os::vm_allocation_granularity();
 102   assert((size & (granularity - 1)) == 0,
 103          "size not aligned to os::vm_allocation_granularity()");
 104   assert((alignment & (granularity - 1)) == 0,
 105          "alignment not aligned to os::vm_allocation_granularity()");
 106   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 107          "not a power of 2");
 108 
 109   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 110 
 111   _base = NULL;
 112   _size = 0;
 113   _special = false;
 114   _executable = executable;
 115   _alignment = 0;
 116   _noaccess_prefix = 0;
 117   if (size == 0) {
 118     return;
 119   }
 120 
 121   // If OS doesn't support demand paging for large page memory, we need
 122   // to use reserve_memory_special() to reserve and pin the entire region.
 123   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 124   // So we ignore the UseLargePages flag in this case.
 125   bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
 126   char* base = NULL;
 127 
 128   if (special) {
 129 
 130     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 131 
 132     if (base != NULL) {
 133       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 134         // OS ignored requested address. Try different address.
 135         return;
 136       }
 137       // Check alignment constraints.
 138       assert((uintptr_t) base % alignment == 0,
 139              "Large pages returned a non-aligned address, base: "
 140              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 141              p2i(base), alignment);
 142       _special = true;
 143     } else {
 144       // failed; try to reserve regular memory below
 145       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 146                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 147         log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 148       }
 149     }
 150   }
 151 
 152   if (base == NULL) {
 153     // Optimistically assume that the OSes returns an aligned base pointer.
 154     // When reserving a large address range, most OSes seem to align to at
 155     // least 64K.
 156 
 157     // If the memory was requested at a particular address, use
 158     // os::attempt_reserve_memory_at() to avoid over mapping something
 159     // important.  If available space is not detected, return NULL.
 160 
 161     if (requested_address != 0) {
 162       base = os::attempt_reserve_memory_at(size, requested_address);
 163       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 164         // OS ignored requested address. Try different address.
 165         base = NULL;
 166       }
 167     } else {
 168       base = os::reserve_memory(size, NULL, alignment);
 169     }
 170 
 171     if (base == NULL) return;
 172 
 173     // Check alignment constraints
 174     if ((((size_t)base) & (alignment - 1)) != 0) {
 175       // Base not aligned, retry
 176       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 177       // Make sure that size is aligned
 178       size = align_size_up(size, alignment);
 179       base = os::reserve_memory_aligned(size, alignment);
 180 
 181       if (requested_address != 0 &&
 182           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 183         // As a result of the alignment constraints, the allocated base differs
 184         // from the requested address. Return back to the caller who can
 185         // take remedial action (like try again without a requested address).
 186         assert(_base == NULL, "should be");
 187         return;
 188       }
 189     }
 190   }
 191   // Done
 192   _base = base;
 193   _size = size;
 194   _alignment = alignment;
 195 
 196   if (_backingFileDir != NULL) {
 197         // At this point a virtual address range is reserved, now map this memory to a file
 198     os::map_memory_to_file(base, size, _backingFileDir);
 199     // mark this virtual space as _special because the physical memory is committed.
 200     _special = true;
 201   }
 202 }
 203 
 204 
 205 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 206                              bool special, bool executable) {
 207   assert((size % os::vm_allocation_granularity()) == 0,
 208          "size not allocation aligned");
 209   _base = base;
 210   _size = size;
 211   _alignment = alignment;
 212   _noaccess_prefix = 0;
 213   _special = special;
 214   _executable = executable;
 215 }
 216 
 217 
 218 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 219                                         bool split, bool realloc) {
 220   assert(partition_size <= size(), "partition failed");
 221   if (split) {
 222     os::split_reserved_memory(base(), size(), partition_size, realloc);
 223   }
 224   ReservedSpace result(base(), partition_size, alignment, special(),
 225                        executable());
 226   return result;
 227 }
 228 
 229 
 230 ReservedSpace
 231 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 232   assert(partition_size <= size(), "partition failed");
 233   ReservedSpace result(base() + partition_size, size() - partition_size,
 234                        alignment, special(), executable());
 235   return result;
 236 }
 237 
 238 
 239 size_t ReservedSpace::page_align_size_up(size_t size) {
 240   return align_size_up(size, os::vm_page_size());
 241 }
 242 
 243 
 244 size_t ReservedSpace::page_align_size_down(size_t size) {
 245   return align_size_down(size, os::vm_page_size());
 246 }
 247 
 248 
 249 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 250   return align_size_up(size, os::vm_allocation_granularity());
 251 }
 252 
 253 
 254 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 255   return align_size_down(size, os::vm_allocation_granularity());
 256 }
 257 
 258 
 259 void ReservedSpace::release() {
 260   if (is_reserved()) {
 261     char *real_base = _base - _noaccess_prefix;
 262     const size_t real_size = _size + _noaccess_prefix;
 263     if (special()) {
 264       os::release_memory_special(real_base, real_size);
 265     } else{
 266       os::release_memory(real_base, real_size);
 267     }
 268     _base = NULL;
 269     _size = 0;
 270     _noaccess_prefix = 0;
 271     _alignment = 0;
 272     _special = false;
 273     _executable = false;
 274   }
 275 }
 276 
 277 static size_t noaccess_prefix_size(size_t alignment) {
 278   return lcm(os::vm_page_size(), alignment);
 279 }
 280 
 281 void ReservedHeapSpace::establish_noaccess_prefix() {
 282   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 283   _noaccess_prefix = noaccess_prefix_size(_alignment);
 284 
 285   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 286     if (true
 287         WIN64_ONLY(&& !UseLargePages)
 288         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 289       // Protect memory at the base of the allocated region.
 290       // If special, the page was committed (only matters on windows)
 291       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 292         fatal("cannot protect protection page");
 293       }
 294       log_debug(gc, heap, coops)("Protected page at the reserved heap base: "
 295                                  PTR_FORMAT " / " INTX_FORMAT " bytes",
 296                                  p2i(_base),
 297                                  _noaccess_prefix);
 298       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 299     } else {
 300       Universe::set_narrow_oop_use_implicit_null_checks(false);
 301     }
 302   }
 303 
 304   _base += _noaccess_prefix;
 305   _size -= _noaccess_prefix;
 306   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 307 }
 308 
 309 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 310 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 311 // might still fulfill the wishes of the caller.
 312 // Assures the memory is aligned to 'alignment'.
 313 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 314 void ReservedHeapSpace::try_reserve_heap(size_t size,
 315                                          size_t alignment,
 316                                          bool large,
 317                                          char* requested_address) {
 318   if (_base != NULL) {
 319     // We tried before, but we didn't like the address delivered.
 320     release();
 321   }
 322 
 323   // If OS doesn't support demand paging for large page memory, we need
 324   // to use reserve_memory_special() to reserve and pin the entire region.
 325   // If there is a backing file directory for this VirtualSpace then whether largepages are allocated is upto the filesystem the dir resides in.
 326   // So we ignore the UseLargePages flag in this case.
 327   bool special = (_backingFileDir == NULL) && (large && !os::can_commit_large_page_memory());
 328   char* base = NULL;
 329 
 330   log_trace(gc, heap, coops)("Trying to allocate at address " PTR_FORMAT
 331                              " heap of size " SIZE_FORMAT_HEX,
 332                              p2i(requested_address),
 333                              size);
 334 
 335   if (special) {
 336     base = os::reserve_memory_special(size, alignment, requested_address, false);
 337 
 338     if (base != NULL) {
 339       // Check alignment constraints.
 340       assert((uintptr_t) base % alignment == 0,
 341              "Large pages returned a non-aligned address, base: "
 342              PTR_FORMAT " alignment: " SIZE_FORMAT_HEX,
 343              p2i(base), alignment);
 344       _special = true;
 345     }
 346   }
 347 
 348   if (base == NULL) {
 349     // Failed; try to reserve regular memory below
 350     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 351                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 352       log_debug(gc, heap, coops)("Reserve regular memory without large pages");
 353     }
 354 
 355     // Optimistically assume that the OSes returns an aligned base pointer.
 356     // When reserving a large address range, most OSes seem to align to at
 357     // least 64K.
 358 
 359     // If the memory was requested at a particular address, use
 360     // os::attempt_reserve_memory_at() to avoid over mapping something
 361     // important.  If available space is not detected, return NULL.
 362 
 363     if (requested_address != 0) {
 364       base = os::attempt_reserve_memory_at(size, requested_address);
 365     } else {
 366       base = os::reserve_memory(size, NULL, alignment);
 367     }
 368   }
 369   if (base == NULL) { return; }
 370 
 371   // Done
 372   _base = base;
 373   _size = size;
 374   _alignment = alignment;
 375 
 376   // Check alignment constraints
 377   if ((((size_t)base) & (alignment - 1)) != 0) {
 378     // Base not aligned, retry.
 379     release();
 380     return;
 381   }
 382   if (_backingFileDir != NULL) {
 383         // At this point a virtual address range is reserved, now map this memory to a file
 384     if (!os::map_memory_to_file(base, size, _backingFileDir)) {
 385       vm_exit_during_initialization(err_msg("Error in mapping object heap at the given filesystem dir %s", _backingFileDir));
 386     }
 387     // mark this virtual space as _special because the physical memory is committed.
 388     _special = true;
 389   }
 390 }
 391 
 392 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 393                                           char *lowest_start,
 394                                           size_t attach_point_alignment,
 395                                           char *aligned_heap_base_min_address,
 396                                           char *upper_bound,
 397                                           size_t size,
 398                                           size_t alignment,
 399                                           bool large) {
 400   const size_t attach_range = highest_start - lowest_start;
 401   // Cap num_attempts at possible number.
 402   // At least one is possible even for 0 sized attach range.
 403   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 404   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 405 
 406   const size_t stepsize = (attach_range == 0) ? // Only one try.
 407     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 408 
 409   // Try attach points from top to bottom.
 410   char* attach_point = highest_start;
 411   while (attach_point >= lowest_start  &&
 412          attach_point <= highest_start &&  // Avoid wrap around.
 413          ((_base == NULL) ||
 414           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 415     try_reserve_heap(size, alignment, large, attach_point);
 416     attach_point -= stepsize;
 417   }
 418 }
 419 
 420 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 421 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 422 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 423 
 424 // Helper for heap allocation. Returns an array with addresses
 425 // (OS-specific) which are suited for disjoint base mode. Array is
 426 // NULL terminated.
 427 static char** get_attach_addresses_for_disjoint_mode() {
 428   static uint64_t addresses[] = {
 429      2 * SIZE_32G,
 430      3 * SIZE_32G,
 431      4 * SIZE_32G,
 432      8 * SIZE_32G,
 433     10 * SIZE_32G,
 434      1 * SIZE_64K * SIZE_32G,
 435      2 * SIZE_64K * SIZE_32G,
 436      3 * SIZE_64K * SIZE_32G,
 437      4 * SIZE_64K * SIZE_32G,
 438     16 * SIZE_64K * SIZE_32G,
 439     32 * SIZE_64K * SIZE_32G,
 440     34 * SIZE_64K * SIZE_32G,
 441     0
 442   };
 443 
 444   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 445   // the array is sorted.
 446   uint i = 0;
 447   while (addresses[i] != 0 &&
 448          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 449     i++;
 450   }
 451   uint start = i;
 452 
 453   // Avoid more steps than requested.
 454   i = 0;
 455   while (addresses[start+i] != 0) {
 456     if (i == HeapSearchSteps) {
 457       addresses[start+i] = 0;
 458       break;
 459     }
 460     i++;
 461   }
 462 
 463   return (char**) &addresses[start];
 464 }
 465 
 466 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 467   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 468             "can not allocate compressed oop heap for this size");
 469   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 470   assert(HeapBaseMinAddress > 0, "sanity");
 471 
 472   const size_t granularity = os::vm_allocation_granularity();
 473   assert((size & (granularity - 1)) == 0,
 474          "size not aligned to os::vm_allocation_granularity()");
 475   assert((alignment & (granularity - 1)) == 0,
 476          "alignment not aligned to os::vm_allocation_granularity()");
 477   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 478          "not a power of 2");
 479 
 480   // The necessary attach point alignment for generated wish addresses.
 481   // This is needed to increase the chance of attaching for mmap and shmat.
 482   const size_t os_attach_point_alignment =
 483     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 484     NOT_AIX(os::vm_allocation_granularity());
 485   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 486 
 487   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 488   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 489     noaccess_prefix_size(alignment) : 0;
 490 
 491   // Attempt to alloc at user-given address.
 492   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 493     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 494     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 495       release();
 496     }
 497   }
 498 
 499   // Keep heap at HeapBaseMinAddress.
 500   if (_base == NULL) {
 501 
 502     // Try to allocate the heap at addresses that allow efficient oop compression.
 503     // Different schemes are tried, in order of decreasing optimization potential.
 504     //
 505     // For this, try_reserve_heap() is called with the desired heap base addresses.
 506     // A call into the os layer to allocate at a given address can return memory
 507     // at a different address than requested.  Still, this might be memory at a useful
 508     // address. try_reserve_heap() always returns this allocated memory, as only here
 509     // the criteria for a good heap are checked.
 510 
 511     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 512     // Give it several tries from top of range to bottom.
 513     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 514 
 515       // Calc address range within we try to attach (range of possible start addresses).
 516       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 517       char* const lowest_start  = (char *)align_ptr_up(aligned_heap_base_min_address, attach_point_alignment);
 518       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 519                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 520     }
 521 
 522     // zerobased: Attempt to allocate in the lower 32G.
 523     // But leave room for the compressed class pointers, which is allocated above
 524     // the heap.
 525     char *zerobased_max = (char *)OopEncodingHeapMax;
 526     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 527     // For small heaps, save some space for compressed class pointer
 528     // space so it can be decoded with no base.
 529     if (UseCompressedClassPointers && !UseSharedSpaces &&
 530         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 531         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 532       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 533     }
 534 
 535     // Give it several tries from top of range to bottom.
 536     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 537         ((_base == NULL) ||                        // No previous try succeeded.
 538          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 539 
 540       // Calc address range within we try to attach (range of possible start addresses).
 541       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 542       // Need to be careful about size being guaranteed to be less
 543       // than UnscaledOopHeapMax due to type constraints.
 544       char *lowest_start = aligned_heap_base_min_address;
 545       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 546       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 547         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 548       }
 549       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 550       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 551                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 552     }
 553 
 554     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 555     // implement null checks.
 556     noaccess_prefix = noaccess_prefix_size(alignment);
 557 
 558     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 559     char** addresses = get_attach_addresses_for_disjoint_mode();
 560     int i = 0;
 561     while (addresses[i] &&                                 // End of array not yet reached.
 562            ((_base == NULL) ||                             // No previous try succeeded.
 563             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 564              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 565       char* const attach_point = addresses[i];
 566       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 567       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 568       i++;
 569     }
 570 
 571     // Last, desperate try without any placement.
 572     if (_base == NULL) {
 573       log_trace(gc, heap, coops)("Trying to allocate at address NULL heap of size " SIZE_FORMAT_HEX, size + noaccess_prefix);
 574       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 575     }
 576   }
 577 }
 578 
 579 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large, const char* backingFSforHeap) : ReservedSpace() {
 580 
 581   if (size == 0) {
 582     return;
 583   }
 584 
 585   _backingFileDir= backingFSforHeap;
 586   // Heap size should be aligned to alignment, too.
 587   guarantee(is_size_aligned(size, alignment), "set by caller");
 588 
 589   if (UseCompressedOops) {
 590     initialize_compressed_heap(size, alignment, large);
 591     if (_size > size) {
 592       // We allocated heap with noaccess prefix.
 593       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 594       // if we had to try at arbitrary address.
 595       establish_noaccess_prefix();
 596     }
 597   } else {
 598     initialize(size, alignment, large, NULL, false);
 599   }
 600 
 601   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 602          "area must be distinguishable from marks for mark-sweep");
 603   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 604          "area must be distinguishable from marks for mark-sweep");
 605 
 606   if (base() > 0) {
 607     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 608   }
 609 }
 610 
 611 // Reserve space for code segment.  Same as Java heap only we mark this as
 612 // executable.
 613 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 614                                      size_t rs_align,
 615                                      bool large) :
 616   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 617   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 618 }
 619 
 620 // VirtualSpace
 621 
 622 VirtualSpace::VirtualSpace() {
 623   _low_boundary           = NULL;
 624   _high_boundary          = NULL;
 625   _low                    = NULL;
 626   _high                   = NULL;
 627   _lower_high             = NULL;
 628   _middle_high            = NULL;
 629   _upper_high             = NULL;
 630   _lower_high_boundary    = NULL;
 631   _middle_high_boundary   = NULL;
 632   _upper_high_boundary    = NULL;
 633   _lower_alignment        = 0;
 634   _middle_alignment       = 0;
 635   _upper_alignment        = 0;
 636   _special                = false;
 637   _executable             = false;
 638 }
 639 
 640 
 641 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 642   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 643   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 644 }
 645 
 646 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 647   if(!rs.is_reserved()) return false;  // allocation failed.
 648   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 649   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 650 
 651   _low_boundary  = rs.base();
 652   _high_boundary = low_boundary() + rs.size();
 653 
 654   _low = low_boundary();
 655   _high = low();
 656 
 657   _special = rs.special();
 658   _executable = rs.executable();
 659 
 660   // When a VirtualSpace begins life at a large size, make all future expansion
 661   // and shrinking occur aligned to a granularity of large pages.  This avoids
 662   // fragmentation of physical addresses that inhibits the use of large pages
 663   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 664   // page size, the only spaces that get handled this way are codecache and
 665   // the heap itself, both of which provide a substantial performance
 666   // boost in many benchmarks when covered by large pages.
 667   //
 668   // No attempt is made to force large page alignment at the very top and
 669   // bottom of the space if they are not aligned so already.
 670   _lower_alignment  = os::vm_page_size();
 671   _middle_alignment = max_commit_granularity;
 672   _upper_alignment  = os::vm_page_size();
 673 
 674   // End of each region
 675   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 676   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 677   _upper_high_boundary = high_boundary();
 678 
 679   // High address of each region
 680   _lower_high = low_boundary();
 681   _middle_high = lower_high_boundary();
 682   _upper_high = middle_high_boundary();
 683 
 684   // commit to initial size
 685   if (committed_size > 0) {
 686     if (!expand_by(committed_size)) {
 687       return false;
 688     }
 689   }
 690   return true;
 691 }
 692 
 693 
 694 VirtualSpace::~VirtualSpace() {
 695   release();
 696 }
 697 
 698 
 699 void VirtualSpace::release() {
 700   // This does not release memory it never reserved.
 701   // Caller must release via rs.release();
 702   _low_boundary           = NULL;
 703   _high_boundary          = NULL;
 704   _low                    = NULL;
 705   _high                   = NULL;
 706   _lower_high             = NULL;
 707   _middle_high            = NULL;
 708   _upper_high             = NULL;
 709   _lower_high_boundary    = NULL;
 710   _middle_high_boundary   = NULL;
 711   _upper_high_boundary    = NULL;
 712   _lower_alignment        = 0;
 713   _middle_alignment       = 0;
 714   _upper_alignment        = 0;
 715   _special                = false;
 716   _executable             = false;
 717 }
 718 
 719 
 720 size_t VirtualSpace::committed_size() const {
 721   return pointer_delta(high(), low(), sizeof(char));
 722 }
 723 
 724 
 725 size_t VirtualSpace::reserved_size() const {
 726   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 727 }
 728 
 729 
 730 size_t VirtualSpace::uncommitted_size()  const {
 731   return reserved_size() - committed_size();
 732 }
 733 
 734 size_t VirtualSpace::actual_committed_size() const {
 735   // Special VirtualSpaces commit all reserved space up front.
 736   if (special()) {
 737     return reserved_size();
 738   }
 739 
 740   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 741   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 742   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 743 
 744 #ifdef ASSERT
 745   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 746   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 747   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 748 
 749   if (committed_high > 0) {
 750     assert(committed_low == lower, "Must be");
 751     assert(committed_middle == middle, "Must be");
 752   }
 753 
 754   if (committed_middle > 0) {
 755     assert(committed_low == lower, "Must be");
 756   }
 757   if (committed_middle < middle) {
 758     assert(committed_high == 0, "Must be");
 759   }
 760 
 761   if (committed_low < lower) {
 762     assert(committed_high == 0, "Must be");
 763     assert(committed_middle == 0, "Must be");
 764   }
 765 #endif
 766 
 767   return committed_low + committed_middle + committed_high;
 768 }
 769 
 770 
 771 bool VirtualSpace::contains(const void* p) const {
 772   return low() <= (const char*) p && (const char*) p < high();
 773 }
 774 
 775 static void pretouch_expanded_memory(void* start, void* end) {
 776   assert(is_ptr_aligned(start, os::vm_page_size()), "Unexpected alignment");
 777   assert(is_ptr_aligned(end,   os::vm_page_size()), "Unexpected alignment");
 778 
 779   os::pretouch_memory(start, end);
 780 }
 781 
 782 static bool commit_expanded(char* start, size_t size, size_t alignment, bool pre_touch, bool executable) {
 783   if (os::commit_memory(start, size, alignment, executable)) {
 784     if (pre_touch || AlwaysPreTouch) {
 785       pretouch_expanded_memory(start, start + size);
 786     }
 787     return true;
 788   }
 789 
 790   debug_only(warning(
 791       "INFO: os::commit_memory(" PTR_FORMAT ", " PTR_FORMAT
 792       " size=" SIZE_FORMAT ", executable=%d) failed",
 793       p2i(start), p2i(start + size), size, executable);)
 794 
 795   return false;
 796 }
 797 
 798 /*
 799    First we need to determine if a particular virtual space is using large
 800    pages.  This is done at the initialize function and only virtual spaces
 801    that are larger than LargePageSizeInBytes use large pages.  Once we
 802    have determined this, all expand_by and shrink_by calls must grow and
 803    shrink by large page size chunks.  If a particular request
 804    is within the current large page, the call to commit and uncommit memory
 805    can be ignored.  In the case that the low and high boundaries of this
 806    space is not large page aligned, the pages leading to the first large
 807    page address and the pages after the last large page address must be
 808    allocated with default pages.
 809 */
 810 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 811   if (uncommitted_size() < bytes) {
 812     return false;
 813   }
 814 
 815   if (special()) {
 816     // don't commit memory if the entire space is pinned in memory
 817     _high += bytes;
 818     return true;
 819   }
 820 
 821   char* previous_high = high();
 822   char* unaligned_new_high = high() + bytes;
 823   assert(unaligned_new_high <= high_boundary(), "cannot expand by more than upper boundary");
 824 
 825   // Calculate where the new high for each of the regions should be.  If
 826   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 827   // then the unaligned lower and upper new highs would be the
 828   // lower_high() and upper_high() respectively.
 829   char* unaligned_lower_new_high =  MIN2(unaligned_new_high, lower_high_boundary());
 830   char* unaligned_middle_new_high = MIN2(unaligned_new_high, middle_high_boundary());
 831   char* unaligned_upper_new_high =  MIN2(unaligned_new_high, upper_high_boundary());
 832 
 833   // Align the new highs based on the regions alignment.  lower and upper
 834   // alignment will always be default page size.  middle alignment will be
 835   // LargePageSizeInBytes if the actual size of the virtual space is in
 836   // fact larger than LargePageSizeInBytes.
 837   char* aligned_lower_new_high =  (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 838   char* aligned_middle_new_high = (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 839   char* aligned_upper_new_high =  (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 840 
 841   // Determine which regions need to grow in this expand_by call.
 842   // If you are growing in the lower region, high() must be in that
 843   // region so calculate the size based on high().  For the middle and
 844   // upper regions, determine the starting point of growth based on the
 845   // location of high().  By getting the MAX of the region's low address
 846   // (or the previous region's high address) and high(), we can tell if it
 847   // is an intra or inter region growth.
 848   size_t lower_needs = 0;
 849   if (aligned_lower_new_high > lower_high()) {
 850     lower_needs = pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 851   }
 852   size_t middle_needs = 0;
 853   if (aligned_middle_new_high > middle_high()) {
 854     middle_needs = pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 855   }
 856   size_t upper_needs = 0;
 857   if (aligned_upper_new_high > upper_high()) {
 858     upper_needs = pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 859   }
 860 
 861   // Check contiguity.
 862   assert(low_boundary() <= lower_high() && lower_high() <= lower_high_boundary(),
 863          "high address must be contained within the region");
 864   assert(lower_high_boundary() <= middle_high() && middle_high() <= middle_high_boundary(),
 865          "high address must be contained within the region");
 866   assert(middle_high_boundary() <= upper_high() && upper_high() <= upper_high_boundary(),
 867          "high address must be contained within the region");
 868 
 869   // Commit regions
 870   if (lower_needs > 0) {
 871     assert(lower_high() + lower_needs <= lower_high_boundary(), "must not expand beyond region");
 872     if (!commit_expanded(lower_high(), lower_needs, _lower_alignment, pre_touch, _executable)) {
 873       return false;
 874     }
 875     _lower_high += lower_needs;
 876   }
 877 
 878   if (middle_needs > 0) {
 879     assert(middle_high() + middle_needs <= middle_high_boundary(), "must not expand beyond region");
 880     if (!commit_expanded(middle_high(), middle_needs, _middle_alignment, pre_touch, _executable)) {
 881       return false;
 882     }
 883     _middle_high += middle_needs;
 884   }
 885 
 886   if (upper_needs > 0) {
 887     assert(upper_high() + upper_needs <= upper_high_boundary(), "must not expand beyond region");
 888     if (!commit_expanded(upper_high(), upper_needs, _upper_alignment, pre_touch, _executable)) {
 889       return false;
 890     }
 891     _upper_high += upper_needs;
 892   }
 893 
 894   _high += bytes;
 895   return true;
 896 }
 897 
 898 // A page is uncommitted if the contents of the entire page is deemed unusable.
 899 // Continue to decrement the high() pointer until it reaches a page boundary
 900 // in which case that particular page can now be uncommitted.
 901 void VirtualSpace::shrink_by(size_t size) {
 902   if (committed_size() < size)
 903     fatal("Cannot shrink virtual space to negative size");
 904 
 905   if (special()) {
 906     // don't uncommit if the entire space is pinned in memory
 907     _high -= size;
 908     return;
 909   }
 910 
 911   char* unaligned_new_high = high() - size;
 912   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 913 
 914   // Calculate new unaligned address
 915   char* unaligned_upper_new_high =
 916     MAX2(unaligned_new_high, middle_high_boundary());
 917   char* unaligned_middle_new_high =
 918     MAX2(unaligned_new_high, lower_high_boundary());
 919   char* unaligned_lower_new_high =
 920     MAX2(unaligned_new_high, low_boundary());
 921 
 922   // Align address to region's alignment
 923   char* aligned_upper_new_high =
 924     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 925   char* aligned_middle_new_high =
 926     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 927   char* aligned_lower_new_high =
 928     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 929 
 930   // Determine which regions need to shrink
 931   size_t upper_needs = 0;
 932   if (aligned_upper_new_high < upper_high()) {
 933     upper_needs =
 934       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 935   }
 936   size_t middle_needs = 0;
 937   if (aligned_middle_new_high < middle_high()) {
 938     middle_needs =
 939       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 940   }
 941   size_t lower_needs = 0;
 942   if (aligned_lower_new_high < lower_high()) {
 943     lower_needs =
 944       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 945   }
 946 
 947   // Check contiguity.
 948   assert(middle_high_boundary() <= upper_high() &&
 949          upper_high() <= upper_high_boundary(),
 950          "high address must be contained within the region");
 951   assert(lower_high_boundary() <= middle_high() &&
 952          middle_high() <= middle_high_boundary(),
 953          "high address must be contained within the region");
 954   assert(low_boundary() <= lower_high() &&
 955          lower_high() <= lower_high_boundary(),
 956          "high address must be contained within the region");
 957 
 958   // Uncommit
 959   if (upper_needs > 0) {
 960     assert(middle_high_boundary() <= aligned_upper_new_high &&
 961            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 962            "must not shrink beyond region");
 963     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 964       debug_only(warning("os::uncommit_memory failed"));
 965       return;
 966     } else {
 967       _upper_high -= upper_needs;
 968     }
 969   }
 970   if (middle_needs > 0) {
 971     assert(lower_high_boundary() <= aligned_middle_new_high &&
 972            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 973            "must not shrink beyond region");
 974     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 975       debug_only(warning("os::uncommit_memory failed"));
 976       return;
 977     } else {
 978       _middle_high -= middle_needs;
 979     }
 980   }
 981   if (lower_needs > 0) {
 982     assert(low_boundary() <= aligned_lower_new_high &&
 983            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 984            "must not shrink beyond region");
 985     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 986       debug_only(warning("os::uncommit_memory failed"));
 987       return;
 988     } else {
 989       _lower_high -= lower_needs;
 990     }
 991   }
 992 
 993   _high -= size;
 994 }
 995 
 996 #ifndef PRODUCT
 997 void VirtualSpace::check_for_contiguity() {
 998   // Check contiguity.
 999   assert(low_boundary() <= lower_high() &&
1000          lower_high() <= lower_high_boundary(),
1001          "high address must be contained within the region");
1002   assert(lower_high_boundary() <= middle_high() &&
1003          middle_high() <= middle_high_boundary(),
1004          "high address must be contained within the region");
1005   assert(middle_high_boundary() <= upper_high() &&
1006          upper_high() <= upper_high_boundary(),
1007          "high address must be contained within the region");
1008   assert(low() >= low_boundary(), "low");
1009   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1010   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1011   assert(high() <= upper_high(), "upper high");
1012 }
1013 
1014 void VirtualSpace::print_on(outputStream* out) {
1015   out->print   ("Virtual space:");
1016   if (special()) out->print(" (pinned in memory)");
1017   out->cr();
1018   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1019   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1020   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low()), p2i(high()));
1021   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  p2i(low_boundary()), p2i(high_boundary()));
1022 }
1023 
1024 void VirtualSpace::print() {
1025   print_on(tty);
1026 }
1027 
1028 /////////////// Unit tests ///////////////
1029 
1030 #ifndef PRODUCT
1031 
1032 #define test_log(...) \
1033   do {\
1034     if (VerboseInternalVMTests) { \
1035       tty->print_cr(__VA_ARGS__); \
1036       tty->flush(); \
1037     }\
1038   } while (false)
1039 
1040 class TestReservedSpace : AllStatic {
1041  public:
1042   static void small_page_write(void* addr, size_t size) {
1043     size_t page_size = os::vm_page_size();
1044 
1045     char* end = (char*)addr + size;
1046     for (char* p = (char*)addr; p < end; p += page_size) {
1047       *p = 1;
1048     }
1049   }
1050 
1051   static void release_memory_for_test(ReservedSpace rs) {
1052     if (rs.special()) {
1053       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1054     } else {
1055       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1056     }
1057   }
1058 
1059   static void test_reserved_space1(size_t size, size_t alignment) {
1060     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1061 
1062     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1063 
1064     ReservedSpace rs(size,          // size
1065                      alignment,     // alignment
1066                      UseLargePages, // large
1067                      (char *)NULL); // requested_address
1068 
1069     test_log(" rs.special() == %d", rs.special());
1070 
1071     assert(rs.base() != NULL, "Must be");
1072     assert(rs.size() == size, "Must be");
1073 
1074     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1075     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1076 
1077     if (rs.special()) {
1078       small_page_write(rs.base(), size);
1079     }
1080 
1081     release_memory_for_test(rs);
1082   }
1083 
1084   static void test_reserved_space2(size_t size) {
1085     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1086 
1087     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1088 
1089     ReservedSpace rs(size);
1090 
1091     test_log(" rs.special() == %d", rs.special());
1092 
1093     assert(rs.base() != NULL, "Must be");
1094     assert(rs.size() == size, "Must be");
1095 
1096     if (rs.special()) {
1097       small_page_write(rs.base(), size);
1098     }
1099 
1100     release_memory_for_test(rs);
1101   }
1102 
1103   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1104     test_log("test_reserved_space3(%p, %p, %d)",
1105         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1106 
1107     if (size < alignment) {
1108       // Tests might set -XX:LargePageSizeInBytes=<small pages> and cause unexpected input arguments for this test.
1109       assert((size_t)os::vm_page_size() == os::large_page_size(), "Test needs further refinement");
1110       return;
1111     }
1112 
1113     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1114     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1115 
1116     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1117 
1118     ReservedSpace rs(size, alignment, large, false);
1119 
1120     test_log(" rs.special() == %d", rs.special());
1121 
1122     assert(rs.base() != NULL, "Must be");
1123     assert(rs.size() == size, "Must be");
1124 
1125     if (rs.special()) {
1126       small_page_write(rs.base(), size);
1127     }
1128 
1129     release_memory_for_test(rs);
1130   }
1131 
1132 
1133   static void test_reserved_space1() {
1134     size_t size = 2 * 1024 * 1024;
1135     size_t ag   = os::vm_allocation_granularity();
1136 
1137     test_reserved_space1(size,      ag);
1138     test_reserved_space1(size * 2,  ag);
1139     test_reserved_space1(size * 10, ag);
1140   }
1141 
1142   static void test_reserved_space2() {
1143     size_t size = 2 * 1024 * 1024;
1144     size_t ag = os::vm_allocation_granularity();
1145 
1146     test_reserved_space2(size * 1);
1147     test_reserved_space2(size * 2);
1148     test_reserved_space2(size * 10);
1149     test_reserved_space2(ag);
1150     test_reserved_space2(size - ag);
1151     test_reserved_space2(size);
1152     test_reserved_space2(size + ag);
1153     test_reserved_space2(size * 2);
1154     test_reserved_space2(size * 2 - ag);
1155     test_reserved_space2(size * 2 + ag);
1156     test_reserved_space2(size * 3);
1157     test_reserved_space2(size * 3 - ag);
1158     test_reserved_space2(size * 3 + ag);
1159     test_reserved_space2(size * 10);
1160     test_reserved_space2(size * 10 + size / 2);
1161   }
1162 
1163   static void test_reserved_space3() {
1164     size_t ag = os::vm_allocation_granularity();
1165 
1166     test_reserved_space3(ag,      ag    , false);
1167     test_reserved_space3(ag * 2,  ag    , false);
1168     test_reserved_space3(ag * 3,  ag    , false);
1169     test_reserved_space3(ag * 2,  ag * 2, false);
1170     test_reserved_space3(ag * 4,  ag * 2, false);
1171     test_reserved_space3(ag * 8,  ag * 2, false);
1172     test_reserved_space3(ag * 4,  ag * 4, false);
1173     test_reserved_space3(ag * 8,  ag * 4, false);
1174     test_reserved_space3(ag * 16, ag * 4, false);
1175 
1176     if (UseLargePages) {
1177       size_t lp = os::large_page_size();
1178 
1179       // Without large pages
1180       test_reserved_space3(lp,     ag * 4, false);
1181       test_reserved_space3(lp * 2, ag * 4, false);
1182       test_reserved_space3(lp * 4, ag * 4, false);
1183       test_reserved_space3(lp,     lp    , false);
1184       test_reserved_space3(lp * 2, lp    , false);
1185       test_reserved_space3(lp * 3, lp    , false);
1186       test_reserved_space3(lp * 2, lp * 2, false);
1187       test_reserved_space3(lp * 4, lp * 2, false);
1188       test_reserved_space3(lp * 8, lp * 2, false);
1189 
1190       // With large pages
1191       test_reserved_space3(lp, ag * 4    , true);
1192       test_reserved_space3(lp * 2, ag * 4, true);
1193       test_reserved_space3(lp * 4, ag * 4, true);
1194       test_reserved_space3(lp, lp        , true);
1195       test_reserved_space3(lp * 2, lp    , true);
1196       test_reserved_space3(lp * 3, lp    , true);
1197       test_reserved_space3(lp * 2, lp * 2, true);
1198       test_reserved_space3(lp * 4, lp * 2, true);
1199       test_reserved_space3(lp * 8, lp * 2, true);
1200     }
1201   }
1202 
1203   static void test_reserved_space() {
1204     test_reserved_space1();
1205     test_reserved_space2();
1206     test_reserved_space3();
1207   }
1208 };
1209 
1210 void TestReservedSpace_test() {
1211   TestReservedSpace::test_reserved_space();
1212 }
1213 
1214 #define assert_equals(actual, expected)  \
1215   assert(actual == expected,             \
1216          "Got " SIZE_FORMAT " expected " \
1217          SIZE_FORMAT, actual, expected);
1218 
1219 #define assert_ge(value1, value2)                  \
1220   assert(value1 >= value2,                         \
1221          "'" #value1 "': " SIZE_FORMAT " '"        \
1222          #value2 "': " SIZE_FORMAT, value1, value2);
1223 
1224 #define assert_lt(value1, value2)                  \
1225   assert(value1 < value2,                          \
1226          "'" #value1 "': " SIZE_FORMAT " '"        \
1227          #value2 "': " SIZE_FORMAT, value1, value2);
1228 
1229 
1230 class TestVirtualSpace : AllStatic {
1231   enum TestLargePages {
1232     Default,
1233     Disable,
1234     Reserve,
1235     Commit
1236   };
1237 
1238   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1239     switch(mode) {
1240     default:
1241     case Default:
1242     case Reserve:
1243       return ReservedSpace(reserve_size_aligned);
1244     case Disable:
1245     case Commit:
1246       return ReservedSpace(reserve_size_aligned,
1247                            os::vm_allocation_granularity(),
1248                            /* large */ false, /* exec */ false);
1249     }
1250   }
1251 
1252   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1253     switch(mode) {
1254     default:
1255     case Default:
1256     case Reserve:
1257       return vs.initialize(rs, 0);
1258     case Disable:
1259       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1260     case Commit:
1261       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1262     }
1263   }
1264 
1265  public:
1266   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1267                                                         TestLargePages mode = Default) {
1268     size_t granularity = os::vm_allocation_granularity();
1269     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1270 
1271     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1272 
1273     assert(reserved.is_reserved(), "Must be");
1274 
1275     VirtualSpace vs;
1276     bool initialized = initialize_virtual_space(vs, reserved, mode);
1277     assert(initialized, "Failed to initialize VirtualSpace");
1278 
1279     vs.expand_by(commit_size, false);
1280 
1281     if (vs.special()) {
1282       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1283     } else {
1284       assert_ge(vs.actual_committed_size(), commit_size);
1285       // Approximate the commit granularity.
1286       // Make sure that we don't commit using large pages
1287       // if large pages has been disabled for this VirtualSpace.
1288       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1289                                    os::vm_page_size() : os::large_page_size();
1290       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1291     }
1292 
1293     reserved.release();
1294   }
1295 
1296   static void test_virtual_space_actual_committed_space_one_large_page() {
1297     if (!UseLargePages) {
1298       return;
1299     }
1300 
1301     size_t large_page_size = os::large_page_size();
1302 
1303     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1304 
1305     assert(reserved.is_reserved(), "Must be");
1306 
1307     VirtualSpace vs;
1308     bool initialized = vs.initialize(reserved, 0);
1309     assert(initialized, "Failed to initialize VirtualSpace");
1310 
1311     vs.expand_by(large_page_size, false);
1312 
1313     assert_equals(vs.actual_committed_size(), large_page_size);
1314 
1315     reserved.release();
1316   }
1317 
1318   static void test_virtual_space_actual_committed_space() {
1319     test_virtual_space_actual_committed_space(4 * K, 0);
1320     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1321     test_virtual_space_actual_committed_space(8 * K, 0);
1322     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1323     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1324     test_virtual_space_actual_committed_space(12 * K, 0);
1325     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1326     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1327     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1328     test_virtual_space_actual_committed_space(64 * K, 0);
1329     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1330     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1331     test_virtual_space_actual_committed_space(2 * M, 0);
1332     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1333     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1334     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1335     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1336     test_virtual_space_actual_committed_space(10 * M, 0);
1337     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1338     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1339     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1340     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1341     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1342     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1343   }
1344 
1345   static void test_virtual_space_disable_large_pages() {
1346     if (!UseLargePages) {
1347       return;
1348     }
1349     // These test cases verify that if we force VirtualSpace to disable large pages
1350     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1351     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1352     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1353     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1354     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1355     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1356     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1357 
1358     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1359     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1360     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1361     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1362     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1363     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1364     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1365 
1366     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1367     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1368     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1369     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1370     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1371     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1372     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1373   }
1374 
1375   static void test_virtual_space() {
1376     test_virtual_space_actual_committed_space();
1377     test_virtual_space_actual_committed_space_one_large_page();
1378     test_virtual_space_disable_large_pages();
1379   }
1380 };
1381 
1382 void TestVirtualSpace_test() {
1383   TestVirtualSpace::test_virtual_space();
1384 }
1385 
1386 #endif // PRODUCT
1387 
1388 #endif