1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "code/codeCacheExtensions.hpp"
  27 #include "memory/virtualspace.hpp"
  28 #include "oops/markOop.hpp"
  29 #include "oops/oop.inline.hpp"
  30 #include "services/memTracker.hpp"
  31 
  32 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  33 
  34 // ReservedSpace
  35 
  36 // Dummy constructor
  37 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  38     _alignment(0), _special(false), _executable(false) {
  39 }
  40 
  41 ReservedSpace::ReservedSpace(size_t size, size_t preferred_page_size) {
  42   bool has_preferred_page_size = preferred_page_size != 0;
  43   // Want to use large pages where possible and pad with small pages.
  44   size_t page_size = has_preferred_page_size ? preferred_page_size : os::page_size_for_region_unaligned(size, 1);
  45   bool large_pages = page_size != (size_t)os::vm_page_size();
  46   size_t alignment;
  47   if (large_pages && has_preferred_page_size) {
  48     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  49     // ReservedSpace initialization requires size to be aligned to the given
  50     // alignment. Align the size up.
  51     size = align_size_up(size, alignment);
  52   } else {
  53     // Don't force the alignment to be large page aligned,
  54     // since that will waste memory.
  55     alignment = os::vm_allocation_granularity();
  56   }
  57   initialize(size, alignment, large_pages, NULL, false);
  58 }
  59 
  60 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  61                              bool large,
  62                              char* requested_address) {
  63   initialize(size, alignment, large, requested_address, false);
  64 }
  65 
  66 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  67                              bool large,
  68                              bool executable) {
  69   initialize(size, alignment, large, NULL, executable);
  70 }
  71 
  72 // Helper method.
  73 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  74                                            const size_t size, bool special)
  75 {
  76   if (base == requested_address || requested_address == NULL)
  77     return false; // did not fail
  78 
  79   if (base != NULL) {
  80     // Different reserve address may be acceptable in other cases
  81     // but for compressed oops heap should be at requested address.
  82     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  83     if (PrintCompressedOopsMode) {
  84       tty->cr();
  85       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  86     }
  87     // OS ignored requested address. Try different address.
  88     if (special) {
  89       if (!os::release_memory_special(base, size)) {
  90         fatal("os::release_memory_special failed");
  91       }
  92     } else {
  93       if (!os::release_memory(base, size)) {
  94         fatal("os::release_memory failed");
  95       }
  96     }
  97   }
  98   return true;
  99 }
 100 
 101 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 102                                char* requested_address,
 103                                bool executable) {
 104   const size_t granularity = os::vm_allocation_granularity();
 105   assert((size & (granularity - 1)) == 0,
 106          "size not aligned to os::vm_allocation_granularity()");
 107   assert((alignment & (granularity - 1)) == 0,
 108          "alignment not aligned to os::vm_allocation_granularity()");
 109   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 110          "not a power of 2");
 111 
 112   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 113 
 114   _base = NULL;
 115   _size = 0;
 116   _special = false;
 117   _executable = executable;
 118   _alignment = 0;
 119   _noaccess_prefix = 0;
 120   if (size == 0) {
 121     return;
 122   }
 123 
 124   // If OS doesn't support demand paging for large page memory, we need
 125   // to use reserve_memory_special() to reserve and pin the entire region.
 126   bool special = large && !os::can_commit_large_page_memory();
 127   char* base = NULL;
 128 
 129   if (special) {
 130 
 131     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 132 
 133     if (base != NULL) {
 134       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 135         // OS ignored requested address. Try different address.
 136         return;
 137       }
 138       // Check alignment constraints.
 139       assert((uintptr_t) base % alignment == 0,
 140              err_msg("Large pages returned a non-aligned address, base: "
 141                  PTR_FORMAT " alignment: " PTR_FORMAT,
 142                  base, (void*)(uintptr_t)alignment));
 143       _special = true;
 144     } else {
 145       // failed; try to reserve regular memory below
 146       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 147                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 148         if (PrintCompressedOopsMode) {
 149           tty->cr();
 150           tty->print_cr("Reserve regular memory without large pages.");
 151         }
 152       }
 153     }
 154   }
 155 
 156   if (base == NULL) {
 157     // Optimistically assume that the OSes returns an aligned base pointer.
 158     // When reserving a large address range, most OSes seem to align to at
 159     // least 64K.
 160 
 161     // If the memory was requested at a particular address, use
 162     // os::attempt_reserve_memory_at() to avoid over mapping something
 163     // important.  If available space is not detected, return NULL.
 164 
 165     if (requested_address != 0) {
 166       base = os::attempt_reserve_memory_at(size, requested_address);
 167       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 168         // OS ignored requested address. Try different address.
 169         base = NULL;
 170       }
 171     } else {
 172       base = os::reserve_memory(size, NULL, alignment);
 173     }
 174 
 175     if (base == NULL) return;
 176 
 177     // Check alignment constraints
 178     if ((((size_t)base) & (alignment - 1)) != 0) {
 179       // Base not aligned, retry
 180       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 181       // Make sure that size is aligned
 182       size = align_size_up(size, alignment);
 183       base = os::reserve_memory_aligned(size, alignment);
 184 
 185       if (requested_address != 0 &&
 186           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 187         // As a result of the alignment constraints, the allocated base differs
 188         // from the requested address. Return back to the caller who can
 189         // take remedial action (like try again without a requested address).
 190         assert(_base == NULL, "should be");
 191         return;
 192       }
 193     }
 194   }
 195   // Done
 196   _base = base;
 197   _size = size;
 198   _alignment = alignment;
 199 }
 200 
 201 
 202 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 203                              bool special, bool executable) {
 204   assert((size % os::vm_allocation_granularity()) == 0,
 205          "size not allocation aligned");
 206   _base = base;
 207   _size = size;
 208   _alignment = alignment;
 209   _noaccess_prefix = 0;
 210   _special = special;
 211   _executable = executable;
 212 }
 213 
 214 
 215 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 216                                         bool split, bool realloc) {
 217   assert(partition_size <= size(), "partition failed");
 218   if (split) {
 219     os::split_reserved_memory(base(), size(), partition_size, realloc);
 220   }
 221   ReservedSpace result(base(), partition_size, alignment, special(),
 222                        executable());
 223   return result;
 224 }
 225 
 226 
 227 ReservedSpace
 228 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 229   assert(partition_size <= size(), "partition failed");
 230   ReservedSpace result(base() + partition_size, size() - partition_size,
 231                        alignment, special(), executable());
 232   return result;
 233 }
 234 
 235 
 236 size_t ReservedSpace::page_align_size_up(size_t size) {
 237   return align_size_up(size, os::vm_page_size());
 238 }
 239 
 240 
 241 size_t ReservedSpace::page_align_size_down(size_t size) {
 242   return align_size_down(size, os::vm_page_size());
 243 }
 244 
 245 
 246 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 247   return align_size_up(size, os::vm_allocation_granularity());
 248 }
 249 
 250 
 251 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 252   return align_size_down(size, os::vm_allocation_granularity());
 253 }
 254 
 255 
 256 void ReservedSpace::release() {
 257   if (is_reserved()) {
 258     char *real_base = _base - _noaccess_prefix;
 259     const size_t real_size = _size + _noaccess_prefix;
 260     if (special()) {
 261       os::release_memory_special(real_base, real_size);
 262     } else{
 263       os::release_memory(real_base, real_size);
 264     }
 265     _base = NULL;
 266     _size = 0;
 267     _noaccess_prefix = 0;
 268     _alignment = 0;
 269     _special = false;
 270     _executable = false;
 271   }
 272 }
 273 
 274 static size_t noaccess_prefix_size(size_t alignment) {
 275   return lcm(os::vm_page_size(), alignment);
 276 }
 277 
 278 void ReservedHeapSpace::establish_noaccess_prefix() {
 279   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 280   _noaccess_prefix = noaccess_prefix_size(_alignment);
 281 
 282   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 283     if (true
 284         WIN64_ONLY(&& !UseLargePages)
 285         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 286       // Protect memory at the base of the allocated region.
 287       // If special, the page was committed (only matters on windows)
 288       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 289         fatal("cannot protect protection page");
 290       }
 291       if (PrintCompressedOopsMode) {
 292         tty->cr();
 293         tty->print_cr("Protected page at the reserved heap base: "
 294                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 295       }
 296       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 297     } else {
 298       Universe::set_narrow_oop_use_implicit_null_checks(false);
 299     }
 300   }
 301 
 302   _base += _noaccess_prefix;
 303   _size -= _noaccess_prefix;
 304   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 305 }
 306 
 307 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 308 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 309 // might still fulfill the wishes of the caller.
 310 // Assures the memory is aligned to 'alignment'.
 311 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 312 void ReservedHeapSpace::try_reserve_heap(size_t size,
 313                                          size_t alignment,
 314                                          bool large,
 315                                          char* requested_address) {
 316   if (_base != NULL) {
 317     // We tried before, but we didn't like the address delivered.
 318     release();
 319   }
 320 
 321   // If OS doesn't support demand paging for large page memory, we need
 322   // to use reserve_memory_special() to reserve and pin the entire region.
 323   bool special = large && !os::can_commit_large_page_memory();
 324   char* base = NULL;
 325 
 326   if (PrintCompressedOopsMode && Verbose) {
 327     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
 328                requested_address, (address)size);
 329   }
 330 
 331   if (special) {
 332     base = os::reserve_memory_special(size, alignment, requested_address, false);
 333 
 334     if (base != NULL) {
 335       // Check alignment constraints.
 336       assert((uintptr_t) base % alignment == 0,
 337              err_msg("Large pages returned a non-aligned address, base: "
 338                      PTR_FORMAT " alignment: " PTR_FORMAT,
 339                      base, (void*)(uintptr_t)alignment));
 340       _special = true;
 341     }
 342   }
 343 
 344   if (base == NULL) {
 345     // Failed; try to reserve regular memory below
 346     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 347                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 348       if (PrintCompressedOopsMode) {
 349         tty->cr();
 350         tty->print_cr("Reserve regular memory without large pages.");
 351       }
 352     }
 353 
 354     // Optimistically assume that the OSes returns an aligned base pointer.
 355     // When reserving a large address range, most OSes seem to align to at
 356     // least 64K.
 357 
 358     // If the memory was requested at a particular address, use
 359     // os::attempt_reserve_memory_at() to avoid over mapping something
 360     // important.  If available space is not detected, return NULL.
 361 
 362     if (requested_address != 0) {
 363       base = os::attempt_reserve_memory_at(size, requested_address);
 364     } else {
 365       base = os::reserve_memory(size, NULL, alignment);
 366     }
 367   }
 368   if (base == NULL) { return; }
 369 
 370   // Done
 371   _base = base;
 372   _size = size;
 373   _alignment = alignment;
 374 
 375   // Check alignment constraints
 376   if ((((size_t)base) & (alignment - 1)) != 0) {
 377     // Base not aligned, retry.
 378     release();
 379   }
 380 }
 381 
 382 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 383                                           char *lowest_start,
 384                                           size_t attach_point_alignment,
 385                                           char *aligned_heap_base_min_address,
 386                                           char *upper_bound,
 387                                           size_t size,
 388                                           size_t alignment,
 389                                           bool large) {
 390   const size_t attach_range = highest_start - lowest_start;
 391   // Cap num_attempts at possible number.
 392   // At least one is possible even for 0 sized attach range.
 393   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 394   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 395 
 396   const size_t stepsize = (attach_range == 0) ? // Only one try.
 397     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 398 
 399   // Try attach points from top to bottom.
 400   char* attach_point = highest_start;
 401   while (attach_point >= lowest_start  &&
 402          attach_point <= highest_start &&  // Avoid wrap around.
 403          ((_base == NULL) ||
 404           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 405     try_reserve_heap(size, alignment, large, attach_point);
 406     attach_point -= stepsize;
 407   }
 408 }
 409 
 410 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 411 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 412 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 413 
 414 // Helper for heap allocation. Returns an array with addresses
 415 // (OS-specific) which are suited for disjoint base mode. Array is
 416 // NULL terminated.
 417 static char** get_attach_addresses_for_disjoint_mode() {
 418   static uint64_t addresses[] = {
 419      2 * SIZE_32G,
 420      3 * SIZE_32G,
 421      4 * SIZE_32G,
 422      8 * SIZE_32G,
 423     10 * SIZE_32G,
 424      1 * SIZE_64K * SIZE_32G,
 425      2 * SIZE_64K * SIZE_32G,
 426      3 * SIZE_64K * SIZE_32G,
 427      4 * SIZE_64K * SIZE_32G,
 428     16 * SIZE_64K * SIZE_32G,
 429     32 * SIZE_64K * SIZE_32G,
 430     34 * SIZE_64K * SIZE_32G,
 431     0
 432   };
 433 
 434   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 435   // the array is sorted.
 436   uint i = 0;
 437   while (addresses[i] != 0 &&
 438          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 439     i++;
 440   }
 441   uint start = i;
 442 
 443   // Avoid more steps than requested.
 444   i = 0;
 445   while (addresses[start+i] != 0) {
 446     if (i == HeapSearchSteps) {
 447       addresses[start+i] = 0;
 448       break;
 449     }
 450     i++;
 451   }
 452 
 453   return (char**) &addresses[start];
 454 }
 455 
 456 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 457   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 458             "can not allocate compressed oop heap for this size");
 459   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 460   assert(HeapBaseMinAddress > 0, "sanity");
 461 
 462   const size_t granularity = os::vm_allocation_granularity();
 463   assert((size & (granularity - 1)) == 0,
 464          "size not aligned to os::vm_allocation_granularity()");
 465   assert((alignment & (granularity - 1)) == 0,
 466          "alignment not aligned to os::vm_allocation_granularity()");
 467   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 468          "not a power of 2");
 469 
 470   // The necessary attach point alignment for generated wish addresses.
 471   // This is needed to increase the chance of attaching for mmap and shmat.
 472   const size_t os_attach_point_alignment =
 473     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 474     NOT_AIX(os::vm_allocation_granularity());
 475   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 476 
 477   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 478   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 479     noaccess_prefix_size(alignment) : 0;
 480 
 481   // Attempt to alloc at user-given address.
 482   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 483     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 484     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 485       release();
 486     }
 487   }
 488 
 489   // Keep heap at HeapBaseMinAddress.
 490   if (_base == NULL) {
 491 
 492     // Try to allocate the heap at addresses that allow efficient oop compression.
 493     // Different schemes are tried, in order of decreasing optimization potential.
 494     //
 495     // For this, try_reserve_heap() is called with the desired heap base addresses.
 496     // A call into the os layer to allocate at a given address can return memory
 497     // at a different address than requested.  Still, this might be memory at a useful
 498     // address. try_reserve_heap() always returns this allocated memory, as only here
 499     // the criteria for a good heap are checked.
 500 
 501     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 502     // Give it several tries from top of range to bottom.
 503     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 504 
 505       // Calc address range within we try to attach (range of possible start addresses).
 506       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 507       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
 508       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 509                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 510     }
 511 
 512     // zerobased: Attempt to allocate in the lower 32G.
 513     // But leave room for the compressed class pointers, which is allocated above
 514     // the heap.
 515     char *zerobased_max = (char *)OopEncodingHeapMax;
 516     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 517     // For small heaps, save some space for compressed class pointer
 518     // space so it can be decoded with no base.
 519     if (UseCompressedClassPointers && !UseSharedSpaces &&
 520         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 521         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 522       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 523     }
 524 
 525     // Give it several tries from top of range to bottom.
 526     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 527         ((_base == NULL) ||                        // No previous try succeeded.
 528          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 529 
 530       // Calc address range within we try to attach (range of possible start addresses).
 531       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 532       // Need to be careful about size being guaranteed to be less
 533       // than UnscaledOopHeapMax due to type constraints.
 534       char *lowest_start = aligned_heap_base_min_address;
 535       uint64_t unscaled_end = UnscaledOopHeapMax - size;
 536       if (unscaled_end < UnscaledOopHeapMax) { // unscaled_end wrapped if size is large
 537         lowest_start = MAX2(lowest_start, (char*)unscaled_end);
 538       }
 539       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 540       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 541                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 542     }
 543 
 544     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 545     // implement null checks.
 546     noaccess_prefix = noaccess_prefix_size(alignment);
 547 
 548     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 549     char** addresses = get_attach_addresses_for_disjoint_mode();
 550     int i = 0;
 551     while (addresses[i] &&                                 // End of array not yet reached.
 552            ((_base == NULL) ||                             // No previous try succeeded.
 553             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 554              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 555       char* const attach_point = addresses[i];
 556       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 557       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 558       i++;
 559     }
 560 
 561     // Last, desperate try without any placement.
 562     if (_base == NULL) {
 563       if (PrintCompressedOopsMode && Verbose) {
 564         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
 565       }
 566       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 567     }
 568   }
 569 }
 570 
 571 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 572 
 573   if (size == 0) {
 574     return;
 575   }
 576 
 577   // Heap size should be aligned to alignment, too.
 578   guarantee(is_size_aligned(size, alignment), "set by caller");
 579 
 580   if (UseCompressedOops) {
 581     initialize_compressed_heap(size, alignment, large);
 582     if (_size > size) {
 583       // We allocated heap with noaccess prefix.
 584       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 585       // if we had to try at arbitrary address.
 586       establish_noaccess_prefix();
 587     }
 588   } else {
 589     initialize(size, alignment, large, NULL, false);
 590   }
 591 
 592   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 593          "area must be distinguishable from marks for mark-sweep");
 594   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 595          "area must be distinguishable from marks for mark-sweep");
 596 
 597   if (base() > 0) {
 598     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 599   }
 600 }
 601 
 602 // Reserve space for code segment.  Same as Java heap only we mark this as
 603 // executable.
 604 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 605                                      size_t rs_align,
 606                                      bool large) :
 607   ReservedSpace(r_size, rs_align, large, /*executable*/ CodeCacheExtensions::support_dynamic_code()) {
 608   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 609 }
 610 
 611 // VirtualSpace
 612 
 613 VirtualSpace::VirtualSpace() {
 614   _low_boundary           = NULL;
 615   _high_boundary          = NULL;
 616   _low                    = NULL;
 617   _high                   = NULL;
 618   _lower_high             = NULL;
 619   _middle_high            = NULL;
 620   _upper_high             = NULL;
 621   _lower_high_boundary    = NULL;
 622   _middle_high_boundary   = NULL;
 623   _upper_high_boundary    = NULL;
 624   _lower_alignment        = 0;
 625   _middle_alignment       = 0;
 626   _upper_alignment        = 0;
 627   _special                = false;
 628   _executable             = false;
 629 }
 630 
 631 
 632 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 633   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 634   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 635 }
 636 
 637 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 638   if(!rs.is_reserved()) return false;  // allocation failed.
 639   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 640   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 641 
 642   _low_boundary  = rs.base();
 643   _high_boundary = low_boundary() + rs.size();
 644 
 645   _low = low_boundary();
 646   _high = low();
 647 
 648   _special = rs.special();
 649   _executable = rs.executable();
 650 
 651   // When a VirtualSpace begins life at a large size, make all future expansion
 652   // and shrinking occur aligned to a granularity of large pages.  This avoids
 653   // fragmentation of physical addresses that inhibits the use of large pages
 654   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 655   // page size, the only spaces that get handled this way are codecache and
 656   // the heap itself, both of which provide a substantial performance
 657   // boost in many benchmarks when covered by large pages.
 658   //
 659   // No attempt is made to force large page alignment at the very top and
 660   // bottom of the space if they are not aligned so already.
 661   _lower_alignment  = os::vm_page_size();
 662   _middle_alignment = max_commit_granularity;
 663   _upper_alignment  = os::vm_page_size();
 664 
 665   // End of each region
 666   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 667   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 668   _upper_high_boundary = high_boundary();
 669 
 670   // High address of each region
 671   _lower_high = low_boundary();
 672   _middle_high = lower_high_boundary();
 673   _upper_high = middle_high_boundary();
 674 
 675   // commit to initial size
 676   if (committed_size > 0) {
 677     if (!expand_by(committed_size)) {
 678       return false;
 679     }
 680   }
 681   return true;
 682 }
 683 
 684 
 685 VirtualSpace::~VirtualSpace() {
 686   release();
 687 }
 688 
 689 
 690 void VirtualSpace::release() {
 691   // This does not release memory it never reserved.
 692   // Caller must release via rs.release();
 693   _low_boundary           = NULL;
 694   _high_boundary          = NULL;
 695   _low                    = NULL;
 696   _high                   = NULL;
 697   _lower_high             = NULL;
 698   _middle_high            = NULL;
 699   _upper_high             = NULL;
 700   _lower_high_boundary    = NULL;
 701   _middle_high_boundary   = NULL;
 702   _upper_high_boundary    = NULL;
 703   _lower_alignment        = 0;
 704   _middle_alignment       = 0;
 705   _upper_alignment        = 0;
 706   _special                = false;
 707   _executable             = false;
 708 }
 709 
 710 
 711 size_t VirtualSpace::committed_size() const {
 712   return pointer_delta(high(), low(), sizeof(char));
 713 }
 714 
 715 
 716 size_t VirtualSpace::reserved_size() const {
 717   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 718 }
 719 
 720 
 721 size_t VirtualSpace::uncommitted_size()  const {
 722   return reserved_size() - committed_size();
 723 }
 724 
 725 size_t VirtualSpace::actual_committed_size() const {
 726   // Special VirtualSpaces commit all reserved space up front.
 727   if (special()) {
 728     return reserved_size();
 729   }
 730 
 731   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 732   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 733   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 734 
 735 #ifdef ASSERT
 736   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 737   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 738   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 739 
 740   if (committed_high > 0) {
 741     assert(committed_low == lower, "Must be");
 742     assert(committed_middle == middle, "Must be");
 743   }
 744 
 745   if (committed_middle > 0) {
 746     assert(committed_low == lower, "Must be");
 747   }
 748   if (committed_middle < middle) {
 749     assert(committed_high == 0, "Must be");
 750   }
 751 
 752   if (committed_low < lower) {
 753     assert(committed_high == 0, "Must be");
 754     assert(committed_middle == 0, "Must be");
 755   }
 756 #endif
 757 
 758   return committed_low + committed_middle + committed_high;
 759 }
 760 
 761 
 762 bool VirtualSpace::contains(const void* p) const {
 763   return low() <= (const char*) p && (const char*) p < high();
 764 }
 765 
 766 /*
 767    First we need to determine if a particular virtual space is using large
 768    pages.  This is done at the initialize function and only virtual spaces
 769    that are larger than LargePageSizeInBytes use large pages.  Once we
 770    have determined this, all expand_by and shrink_by calls must grow and
 771    shrink by large page size chunks.  If a particular request
 772    is within the current large page, the call to commit and uncommit memory
 773    can be ignored.  In the case that the low and high boundaries of this
 774    space is not large page aligned, the pages leading to the first large
 775    page address and the pages after the last large page address must be
 776    allocated with default pages.
 777 */
 778 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 779   if (uncommitted_size() < bytes) return false;
 780 
 781   if (special()) {
 782     // don't commit memory if the entire space is pinned in memory
 783     _high += bytes;
 784     return true;
 785   }
 786 
 787   char* previous_high = high();
 788   char* unaligned_new_high = high() + bytes;
 789   assert(unaligned_new_high <= high_boundary(),
 790          "cannot expand by more than upper boundary");
 791 
 792   // Calculate where the new high for each of the regions should be.  If
 793   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 794   // then the unaligned lower and upper new highs would be the
 795   // lower_high() and upper_high() respectively.
 796   char* unaligned_lower_new_high =
 797     MIN2(unaligned_new_high, lower_high_boundary());
 798   char* unaligned_middle_new_high =
 799     MIN2(unaligned_new_high, middle_high_boundary());
 800   char* unaligned_upper_new_high =
 801     MIN2(unaligned_new_high, upper_high_boundary());
 802 
 803   // Align the new highs based on the regions alignment.  lower and upper
 804   // alignment will always be default page size.  middle alignment will be
 805   // LargePageSizeInBytes if the actual size of the virtual space is in
 806   // fact larger than LargePageSizeInBytes.
 807   char* aligned_lower_new_high =
 808     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 809   char* aligned_middle_new_high =
 810     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 811   char* aligned_upper_new_high =
 812     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 813 
 814   // Determine which regions need to grow in this expand_by call.
 815   // If you are growing in the lower region, high() must be in that
 816   // region so calculate the size based on high().  For the middle and
 817   // upper regions, determine the starting point of growth based on the
 818   // location of high().  By getting the MAX of the region's low address
 819   // (or the previous region's high address) and high(), we can tell if it
 820   // is an intra or inter region growth.
 821   size_t lower_needs = 0;
 822   if (aligned_lower_new_high > lower_high()) {
 823     lower_needs =
 824       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 825   }
 826   size_t middle_needs = 0;
 827   if (aligned_middle_new_high > middle_high()) {
 828     middle_needs =
 829       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 830   }
 831   size_t upper_needs = 0;
 832   if (aligned_upper_new_high > upper_high()) {
 833     upper_needs =
 834       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 835   }
 836 
 837   // Check contiguity.
 838   assert(low_boundary() <= lower_high() &&
 839          lower_high() <= lower_high_boundary(),
 840          "high address must be contained within the region");
 841   assert(lower_high_boundary() <= middle_high() &&
 842          middle_high() <= middle_high_boundary(),
 843          "high address must be contained within the region");
 844   assert(middle_high_boundary() <= upper_high() &&
 845          upper_high() <= upper_high_boundary(),
 846          "high address must be contained within the region");
 847 
 848   // Commit regions
 849   if (lower_needs > 0) {
 850     assert(low_boundary() <= lower_high() &&
 851            lower_high() + lower_needs <= lower_high_boundary(),
 852            "must not expand beyond region");
 853     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 854       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 855                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 856                          lower_high(), lower_needs, _executable);)
 857       return false;
 858     } else {
 859       _lower_high += lower_needs;
 860     }
 861   }
 862   if (middle_needs > 0) {
 863     assert(lower_high_boundary() <= middle_high() &&
 864            middle_high() + middle_needs <= middle_high_boundary(),
 865            "must not expand beyond region");
 866     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 867                            _executable)) {
 868       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 869                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 870                          ", %d) failed", middle_high(), middle_needs,
 871                          middle_alignment(), _executable);)
 872       return false;
 873     }
 874     _middle_high += middle_needs;
 875   }
 876   if (upper_needs > 0) {
 877     assert(middle_high_boundary() <= upper_high() &&
 878            upper_high() + upper_needs <= upper_high_boundary(),
 879            "must not expand beyond region");
 880     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 881       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 882                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 883                          upper_high(), upper_needs, _executable);)
 884       return false;
 885     } else {
 886       _upper_high += upper_needs;
 887     }
 888   }
 889 
 890   if (pre_touch || AlwaysPreTouch) {
 891     os::pretouch_memory(previous_high, unaligned_new_high);
 892   }
 893 
 894   _high += bytes;
 895   return true;
 896 }
 897 
 898 // A page is uncommitted if the contents of the entire page is deemed unusable.
 899 // Continue to decrement the high() pointer until it reaches a page boundary
 900 // in which case that particular page can now be uncommitted.
 901 void VirtualSpace::shrink_by(size_t size) {
 902   if (committed_size() < size)
 903     fatal("Cannot shrink virtual space to negative size");
 904 
 905   if (special()) {
 906     // don't uncommit if the entire space is pinned in memory
 907     _high -= size;
 908     return;
 909   }
 910 
 911   char* unaligned_new_high = high() - size;
 912   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 913 
 914   // Calculate new unaligned address
 915   char* unaligned_upper_new_high =
 916     MAX2(unaligned_new_high, middle_high_boundary());
 917   char* unaligned_middle_new_high =
 918     MAX2(unaligned_new_high, lower_high_boundary());
 919   char* unaligned_lower_new_high =
 920     MAX2(unaligned_new_high, low_boundary());
 921 
 922   // Align address to region's alignment
 923   char* aligned_upper_new_high =
 924     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 925   char* aligned_middle_new_high =
 926     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 927   char* aligned_lower_new_high =
 928     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 929 
 930   // Determine which regions need to shrink
 931   size_t upper_needs = 0;
 932   if (aligned_upper_new_high < upper_high()) {
 933     upper_needs =
 934       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 935   }
 936   size_t middle_needs = 0;
 937   if (aligned_middle_new_high < middle_high()) {
 938     middle_needs =
 939       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 940   }
 941   size_t lower_needs = 0;
 942   if (aligned_lower_new_high < lower_high()) {
 943     lower_needs =
 944       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 945   }
 946 
 947   // Check contiguity.
 948   assert(middle_high_boundary() <= upper_high() &&
 949          upper_high() <= upper_high_boundary(),
 950          "high address must be contained within the region");
 951   assert(lower_high_boundary() <= middle_high() &&
 952          middle_high() <= middle_high_boundary(),
 953          "high address must be contained within the region");
 954   assert(low_boundary() <= lower_high() &&
 955          lower_high() <= lower_high_boundary(),
 956          "high address must be contained within the region");
 957 
 958   // Uncommit
 959   if (upper_needs > 0) {
 960     assert(middle_high_boundary() <= aligned_upper_new_high &&
 961            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 962            "must not shrink beyond region");
 963     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 964       debug_only(warning("os::uncommit_memory failed"));
 965       return;
 966     } else {
 967       _upper_high -= upper_needs;
 968     }
 969   }
 970   if (middle_needs > 0) {
 971     assert(lower_high_boundary() <= aligned_middle_new_high &&
 972            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 973            "must not shrink beyond region");
 974     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 975       debug_only(warning("os::uncommit_memory failed"));
 976       return;
 977     } else {
 978       _middle_high -= middle_needs;
 979     }
 980   }
 981   if (lower_needs > 0) {
 982     assert(low_boundary() <= aligned_lower_new_high &&
 983            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 984            "must not shrink beyond region");
 985     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 986       debug_only(warning("os::uncommit_memory failed"));
 987       return;
 988     } else {
 989       _lower_high -= lower_needs;
 990     }
 991   }
 992 
 993   _high -= size;
 994 }
 995 
 996 #ifndef PRODUCT
 997 void VirtualSpace::check_for_contiguity() {
 998   // Check contiguity.
 999   assert(low_boundary() <= lower_high() &&
1000          lower_high() <= lower_high_boundary(),
1001          "high address must be contained within the region");
1002   assert(lower_high_boundary() <= middle_high() &&
1003          middle_high() <= middle_high_boundary(),
1004          "high address must be contained within the region");
1005   assert(middle_high_boundary() <= upper_high() &&
1006          upper_high() <= upper_high_boundary(),
1007          "high address must be contained within the region");
1008   assert(low() >= low_boundary(), "low");
1009   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1010   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1011   assert(high() <= upper_high(), "upper high");
1012 }
1013 
1014 void VirtualSpace::print_on(outputStream* out) {
1015   out->print   ("Virtual space:");
1016   if (special()) out->print(" (pinned in memory)");
1017   out->cr();
1018   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1019   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1020   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
1021   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
1022 }
1023 
1024 void VirtualSpace::print() {
1025   print_on(tty);
1026 }
1027 
1028 /////////////// Unit tests ///////////////
1029 
1030 #ifndef PRODUCT
1031 
1032 #define test_log(...) \
1033   do {\
1034     if (VerboseInternalVMTests) { \
1035       tty->print_cr(__VA_ARGS__); \
1036       tty->flush(); \
1037     }\
1038   } while (false)
1039 
1040 class TestReservedSpace : AllStatic {
1041  public:
1042   static void small_page_write(void* addr, size_t size) {
1043     size_t page_size = os::vm_page_size();
1044 
1045     char* end = (char*)addr + size;
1046     for (char* p = (char*)addr; p < end; p += page_size) {
1047       *p = 1;
1048     }
1049   }
1050 
1051   static void release_memory_for_test(ReservedSpace rs) {
1052     if (rs.special()) {
1053       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1054     } else {
1055       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1056     }
1057   }
1058 
1059   static void test_reserved_space1(size_t size, size_t alignment) {
1060     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1061 
1062     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1063 
1064     ReservedSpace rs(size,          // size
1065                      alignment,     // alignment
1066                      UseLargePages, // large
1067                      (char *)NULL); // requested_address
1068 
1069     test_log(" rs.special() == %d", rs.special());
1070 
1071     assert(rs.base() != NULL, "Must be");
1072     assert(rs.size() == size, "Must be");
1073 
1074     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1075     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1076 
1077     if (rs.special()) {
1078       small_page_write(rs.base(), size);
1079     }
1080 
1081     release_memory_for_test(rs);
1082   }
1083 
1084   static void test_reserved_space2(size_t size) {
1085     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1086 
1087     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1088 
1089     ReservedSpace rs(size);
1090 
1091     test_log(" rs.special() == %d", rs.special());
1092 
1093     assert(rs.base() != NULL, "Must be");
1094     assert(rs.size() == size, "Must be");
1095 
1096     if (rs.special()) {
1097       small_page_write(rs.base(), size);
1098     }
1099 
1100     release_memory_for_test(rs);
1101   }
1102 
1103   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1104     test_log("test_reserved_space3(%p, %p, %d)",
1105         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1106 
1107     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1108     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1109 
1110     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1111 
1112     ReservedSpace rs(size, alignment, large, false);
1113 
1114     test_log(" rs.special() == %d", rs.special());
1115 
1116     assert(rs.base() != NULL, "Must be");
1117     assert(rs.size() == size, "Must be");
1118 
1119     if (rs.special()) {
1120       small_page_write(rs.base(), size);
1121     }
1122 
1123     release_memory_for_test(rs);
1124   }
1125 
1126 
1127   static void test_reserved_space1() {
1128     size_t size = 2 * 1024 * 1024;
1129     size_t ag   = os::vm_allocation_granularity();
1130 
1131     test_reserved_space1(size,      ag);
1132     test_reserved_space1(size * 2,  ag);
1133     test_reserved_space1(size * 10, ag);
1134   }
1135 
1136   static void test_reserved_space2() {
1137     size_t size = 2 * 1024 * 1024;
1138     size_t ag = os::vm_allocation_granularity();
1139 
1140     test_reserved_space2(size * 1);
1141     test_reserved_space2(size * 2);
1142     test_reserved_space2(size * 10);
1143     test_reserved_space2(ag);
1144     test_reserved_space2(size - ag);
1145     test_reserved_space2(size);
1146     test_reserved_space2(size + ag);
1147     test_reserved_space2(size * 2);
1148     test_reserved_space2(size * 2 - ag);
1149     test_reserved_space2(size * 2 + ag);
1150     test_reserved_space2(size * 3);
1151     test_reserved_space2(size * 3 - ag);
1152     test_reserved_space2(size * 3 + ag);
1153     test_reserved_space2(size * 10);
1154     test_reserved_space2(size * 10 + size / 2);
1155   }
1156 
1157   static void test_reserved_space3() {
1158     size_t ag = os::vm_allocation_granularity();
1159 
1160     test_reserved_space3(ag,      ag    , false);
1161     test_reserved_space3(ag * 2,  ag    , false);
1162     test_reserved_space3(ag * 3,  ag    , false);
1163     test_reserved_space3(ag * 2,  ag * 2, false);
1164     test_reserved_space3(ag * 4,  ag * 2, false);
1165     test_reserved_space3(ag * 8,  ag * 2, false);
1166     test_reserved_space3(ag * 4,  ag * 4, false);
1167     test_reserved_space3(ag * 8,  ag * 4, false);
1168     test_reserved_space3(ag * 16, ag * 4, false);
1169 
1170     if (UseLargePages) {
1171       size_t lp = os::large_page_size();
1172 
1173       // Without large pages
1174       test_reserved_space3(lp,     ag * 4, false);
1175       test_reserved_space3(lp * 2, ag * 4, false);
1176       test_reserved_space3(lp * 4, ag * 4, false);
1177       test_reserved_space3(lp,     lp    , false);
1178       test_reserved_space3(lp * 2, lp    , false);
1179       test_reserved_space3(lp * 3, lp    , false);
1180       test_reserved_space3(lp * 2, lp * 2, false);
1181       test_reserved_space3(lp * 4, lp * 2, false);
1182       test_reserved_space3(lp * 8, lp * 2, false);
1183 
1184       // With large pages
1185       test_reserved_space3(lp, ag * 4    , true);
1186       test_reserved_space3(lp * 2, ag * 4, true);
1187       test_reserved_space3(lp * 4, ag * 4, true);
1188       test_reserved_space3(lp, lp        , true);
1189       test_reserved_space3(lp * 2, lp    , true);
1190       test_reserved_space3(lp * 3, lp    , true);
1191       test_reserved_space3(lp * 2, lp * 2, true);
1192       test_reserved_space3(lp * 4, lp * 2, true);
1193       test_reserved_space3(lp * 8, lp * 2, true);
1194     }
1195   }
1196 
1197   static void test_reserved_space() {
1198     test_reserved_space1();
1199     test_reserved_space2();
1200     test_reserved_space3();
1201   }
1202 };
1203 
1204 void TestReservedSpace_test() {
1205   TestReservedSpace::test_reserved_space();
1206 }
1207 
1208 #define assert_equals(actual, expected)     \
1209   assert(actual == expected,                \
1210     err_msg("Got " SIZE_FORMAT " expected " \
1211       SIZE_FORMAT, actual, expected));
1212 
1213 #define assert_ge(value1, value2)                  \
1214   assert(value1 >= value2,                         \
1215     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1216       #value2 "': " SIZE_FORMAT, value1, value2));
1217 
1218 #define assert_lt(value1, value2)                  \
1219   assert(value1 < value2,                          \
1220     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1221       #value2 "': " SIZE_FORMAT, value1, value2));
1222 
1223 
1224 class TestVirtualSpace : AllStatic {
1225   enum TestLargePages {
1226     Default,
1227     Disable,
1228     Reserve,
1229     Commit
1230   };
1231 
1232   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1233     switch(mode) {
1234     default:
1235     case Default:
1236     case Reserve:
1237       return ReservedSpace(reserve_size_aligned);
1238     case Disable:
1239     case Commit:
1240       return ReservedSpace(reserve_size_aligned,
1241                            os::vm_allocation_granularity(),
1242                            /* large */ false, /* exec */ false);
1243     }
1244   }
1245 
1246   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1247     switch(mode) {
1248     default:
1249     case Default:
1250     case Reserve:
1251       return vs.initialize(rs, 0);
1252     case Disable:
1253       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1254     case Commit:
1255       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1256     }
1257   }
1258 
1259  public:
1260   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1261                                                         TestLargePages mode = Default) {
1262     size_t granularity = os::vm_allocation_granularity();
1263     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1264 
1265     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1266 
1267     assert(reserved.is_reserved(), "Must be");
1268 
1269     VirtualSpace vs;
1270     bool initialized = initialize_virtual_space(vs, reserved, mode);
1271     assert(initialized, "Failed to initialize VirtualSpace");
1272 
1273     vs.expand_by(commit_size, false);
1274 
1275     if (vs.special()) {
1276       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1277     } else {
1278       assert_ge(vs.actual_committed_size(), commit_size);
1279       // Approximate the commit granularity.
1280       // Make sure that we don't commit using large pages
1281       // if large pages has been disabled for this VirtualSpace.
1282       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1283                                    os::vm_page_size() : os::large_page_size();
1284       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1285     }
1286 
1287     reserved.release();
1288   }
1289 
1290   static void test_virtual_space_actual_committed_space_one_large_page() {
1291     if (!UseLargePages) {
1292       return;
1293     }
1294 
1295     size_t large_page_size = os::large_page_size();
1296 
1297     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1298 
1299     assert(reserved.is_reserved(), "Must be");
1300 
1301     VirtualSpace vs;
1302     bool initialized = vs.initialize(reserved, 0);
1303     assert(initialized, "Failed to initialize VirtualSpace");
1304 
1305     vs.expand_by(large_page_size, false);
1306 
1307     assert_equals(vs.actual_committed_size(), large_page_size);
1308 
1309     reserved.release();
1310   }
1311 
1312   static void test_virtual_space_actual_committed_space() {
1313     test_virtual_space_actual_committed_space(4 * K, 0);
1314     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1315     test_virtual_space_actual_committed_space(8 * K, 0);
1316     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1317     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1318     test_virtual_space_actual_committed_space(12 * K, 0);
1319     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1320     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1321     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1322     test_virtual_space_actual_committed_space(64 * K, 0);
1323     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1324     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1325     test_virtual_space_actual_committed_space(2 * M, 0);
1326     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1327     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1328     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1329     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1330     test_virtual_space_actual_committed_space(10 * M, 0);
1331     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1332     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1333     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1334     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1335     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1336     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1337   }
1338 
1339   static void test_virtual_space_disable_large_pages() {
1340     if (!UseLargePages) {
1341       return;
1342     }
1343     // These test cases verify that if we force VirtualSpace to disable large pages
1344     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1345     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1346     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1347     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1348     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1349     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1350     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1351 
1352     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1353     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1354     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1355     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1356     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1357     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1358     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1359 
1360     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1361     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1362     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1363     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1364     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1365     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1366     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1367   }
1368 
1369   static void test_virtual_space() {
1370     test_virtual_space_actual_committed_space();
1371     test_virtual_space_actual_committed_space_one_large_page();
1372     test_virtual_space_disable_large_pages();
1373   }
1374 };
1375 
1376 void TestVirtualSpace_test() {
1377   TestVirtualSpace::test_virtual_space();
1378 }
1379 
1380 #endif // PRODUCT
1381 
1382 #endif