1 /*
   2  * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "oops/markOop.hpp"
  27 #include "oops/oop.inline.hpp"
  28 #include "runtime/virtualspace.hpp"
  29 #include "services/memTracker.hpp"
  30 
  31 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  32 
  33 // ReservedSpace
  34 
  35 // Dummy constructor
  36 ReservedSpace::ReservedSpace() : _base(NULL), _size(0), _noaccess_prefix(0),
  37     _alignment(0), _special(false), _executable(false) {
  38 }
  39 
  40 ReservedSpace::ReservedSpace(size_t size, bool prefer_large_pages) {
  41   // Want to use large pages where possible and pad with small pages.
  42   size_t page_size = os::page_size_for_region_unaligned(size, 1);
  43   bool large_pages = page_size != (size_t)os::vm_page_size();
  44   size_t alignment;
  45   if (large_pages && prefer_large_pages) {
  46     alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  47     // ReservedSpace initialization requires size to be aligned to the given
  48     // alignment. Align the size up.
  49     size = align_size_up(size, alignment);
  50   } else {
  51     // Don't force the alignment to be large page aligned,
  52     // since that will waste memory.
  53     alignment = os::vm_allocation_granularity();
  54   }
  55   initialize(size, alignment, large_pages, NULL, false);
  56 }
  57 
  58 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  59                              bool large,
  60                              char* requested_address) {
  61   initialize(size, alignment, large, requested_address, false);
  62 }
  63 
  64 ReservedSpace::ReservedSpace(size_t size, size_t alignment,
  65                              bool large,
  66                              bool executable) {
  67   initialize(size, alignment, large, NULL, executable);
  68 }
  69 
  70 // Helper method.
  71 static bool failed_to_reserve_as_requested(char* base, char* requested_address,
  72                                            const size_t size, bool special)
  73 {
  74   if (base == requested_address || requested_address == NULL)
  75     return false; // did not fail
  76 
  77   if (base != NULL) {
  78     // Different reserve address may be acceptable in other cases
  79     // but for compressed oops heap should be at requested address.
  80     assert(UseCompressedOops, "currently requested address used only for compressed oops");
  81     if (PrintCompressedOopsMode) {
  82       tty->cr();
  83       tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
  84     }
  85     // OS ignored requested address. Try different address.
  86     if (special) {
  87       if (!os::release_memory_special(base, size)) {
  88         fatal("os::release_memory_special failed");
  89       }
  90     } else {
  91       if (!os::release_memory(base, size)) {
  92         fatal("os::release_memory failed");
  93       }
  94     }
  95   }
  96   return true;
  97 }
  98 
  99 void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
 100                                char* requested_address,
 101                                bool executable) {
 102   const size_t granularity = os::vm_allocation_granularity();
 103   assert((size & (granularity - 1)) == 0,
 104          "size not aligned to os::vm_allocation_granularity()");
 105   assert((alignment & (granularity - 1)) == 0,
 106          "alignment not aligned to os::vm_allocation_granularity()");
 107   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 108          "not a power of 2");
 109 
 110   alignment = MAX2(alignment, (size_t)os::vm_page_size());
 111 
 112   _base = NULL;
 113   _size = 0;
 114   _special = false;
 115   _executable = executable;
 116   _alignment = 0;
 117   _noaccess_prefix = 0;
 118   if (size == 0) {
 119     return;
 120   }
 121 
 122   // If OS doesn't support demand paging for large page memory, we need
 123   // to use reserve_memory_special() to reserve and pin the entire region.
 124   bool special = large && !os::can_commit_large_page_memory();
 125   char* base = NULL;
 126 
 127   if (special) {
 128 
 129     base = os::reserve_memory_special(size, alignment, requested_address, executable);
 130 
 131     if (base != NULL) {
 132       if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
 133         // OS ignored requested address. Try different address.
 134         return;
 135       }
 136       // Check alignment constraints.
 137       assert((uintptr_t) base % alignment == 0,
 138              err_msg("Large pages returned a non-aligned address, base: "
 139                  PTR_FORMAT " alignment: " PTR_FORMAT,
 140                  base, (void*)(uintptr_t)alignment));
 141       _special = true;
 142     } else {
 143       // failed; try to reserve regular memory below
 144       if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 145                             !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 146         if (PrintCompressedOopsMode) {
 147           tty->cr();
 148           tty->print_cr("Reserve regular memory without large pages.");
 149         }
 150       }
 151     }
 152   }
 153 
 154   if (base == NULL) {
 155     // Optimistically assume that the OSes returns an aligned base pointer.
 156     // When reserving a large address range, most OSes seem to align to at
 157     // least 64K.
 158 
 159     // If the memory was requested at a particular address, use
 160     // os::attempt_reserve_memory_at() to avoid over mapping something
 161     // important.  If available space is not detected, return NULL.
 162 
 163     if (requested_address != 0) {
 164       base = os::attempt_reserve_memory_at(size, requested_address);
 165       if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
 166         // OS ignored requested address. Try different address.
 167         base = NULL;
 168       }
 169     } else {
 170       base = os::reserve_memory(size, NULL, alignment);
 171     }
 172 
 173     if (base == NULL) return;
 174 
 175     // Check alignment constraints
 176     if ((((size_t)base) & (alignment - 1)) != 0) {
 177       // Base not aligned, retry
 178       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
 179       // Make sure that size is aligned
 180       size = align_size_up(size, alignment);
 181       base = os::reserve_memory_aligned(size, alignment);
 182 
 183       if (requested_address != 0 &&
 184           failed_to_reserve_as_requested(base, requested_address, size, false)) {
 185         // As a result of the alignment constraints, the allocated base differs
 186         // from the requested address. Return back to the caller who can
 187         // take remedial action (like try again without a requested address).
 188         assert(_base == NULL, "should be");
 189         return;
 190       }
 191     }
 192   }
 193   // Done
 194   _base = base;
 195   _size = size;
 196   _alignment = alignment;
 197 }
 198 
 199 
 200 ReservedSpace::ReservedSpace(char* base, size_t size, size_t alignment,
 201                              bool special, bool executable) {
 202   assert((size % os::vm_allocation_granularity()) == 0,
 203          "size not allocation aligned");
 204   _base = base;
 205   _size = size;
 206   _alignment = alignment;
 207   _noaccess_prefix = 0;
 208   _special = special;
 209   _executable = executable;
 210 }
 211 
 212 
 213 ReservedSpace ReservedSpace::first_part(size_t partition_size, size_t alignment,
 214                                         bool split, bool realloc) {
 215   assert(partition_size <= size(), "partition failed");
 216   if (split) {
 217     os::split_reserved_memory(base(), size(), partition_size, realloc);
 218   }
 219   ReservedSpace result(base(), partition_size, alignment, special(),
 220                        executable());
 221   return result;
 222 }
 223 
 224 
 225 ReservedSpace
 226 ReservedSpace::last_part(size_t partition_size, size_t alignment) {
 227   assert(partition_size <= size(), "partition failed");
 228   ReservedSpace result(base() + partition_size, size() - partition_size,
 229                        alignment, special(), executable());
 230   return result;
 231 }
 232 
 233 
 234 size_t ReservedSpace::page_align_size_up(size_t size) {
 235   return align_size_up(size, os::vm_page_size());
 236 }
 237 
 238 
 239 size_t ReservedSpace::page_align_size_down(size_t size) {
 240   return align_size_down(size, os::vm_page_size());
 241 }
 242 
 243 
 244 size_t ReservedSpace::allocation_align_size_up(size_t size) {
 245   return align_size_up(size, os::vm_allocation_granularity());
 246 }
 247 
 248 
 249 size_t ReservedSpace::allocation_align_size_down(size_t size) {
 250   return align_size_down(size, os::vm_allocation_granularity());
 251 }
 252 
 253 
 254 void ReservedSpace::release() {
 255   if (is_reserved()) {
 256     char *real_base = _base - _noaccess_prefix;
 257     const size_t real_size = _size + _noaccess_prefix;
 258     if (special()) {
 259       os::release_memory_special(real_base, real_size);
 260     } else{
 261       os::release_memory(real_base, real_size);
 262     }
 263     _base = NULL;
 264     _size = 0;
 265     _noaccess_prefix = 0;
 266     _alignment = 0;
 267     _special = false;
 268     _executable = false;
 269   }
 270 }
 271 
 272 static size_t noaccess_prefix_size(size_t alignment) {
 273   return lcm(os::vm_page_size(), alignment);
 274 }
 275 
 276 void ReservedHeapSpace::establish_noaccess_prefix() {
 277   assert(_alignment >= (size_t)os::vm_page_size(), "must be at least page size big");
 278   _noaccess_prefix = noaccess_prefix_size(_alignment);
 279 
 280   if (base() && base() + _size > (char *)OopEncodingHeapMax) {
 281     if (true
 282         WIN64_ONLY(&& !UseLargePages)
 283         AIX_ONLY(&& os::vm_page_size() != SIZE_64K)) {
 284       // Protect memory at the base of the allocated region.
 285       // If special, the page was committed (only matters on windows)
 286       if (!os::protect_memory(_base, _noaccess_prefix, os::MEM_PROT_NONE, _special)) {
 287         fatal("cannot protect protection page");
 288       }
 289       if (PrintCompressedOopsMode) {
 290         tty->cr();
 291         tty->print_cr("Protected page at the reserved heap base: "
 292                       PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
 293       }
 294       assert(Universe::narrow_oop_use_implicit_null_checks() == true, "not initialized?");
 295     } else {
 296       Universe::set_narrow_oop_use_implicit_null_checks(false);
 297     }
 298   }
 299 
 300   _base += _noaccess_prefix;
 301   _size -= _noaccess_prefix;
 302   assert(((uintptr_t)_base % _alignment == 0), "must be exactly of required alignment");
 303 }
 304 
 305 // Tries to allocate memory of size 'size' at address requested_address with alignment 'alignment'.
 306 // Does not check whether the reserved memory actually is at requested_address, as the memory returned
 307 // might still fulfill the wishes of the caller.
 308 // Assures the memory is aligned to 'alignment'.
 309 // NOTE: If ReservedHeapSpace already points to some reserved memory this is freed, first.
 310 void ReservedHeapSpace::try_reserve_heap(size_t size,
 311                                          size_t alignment,
 312                                          bool large,
 313                                          char* requested_address) {
 314   if (_base != NULL) {
 315     // We tried before, but we didn't like the address delivered.
 316     release();
 317   }
 318 
 319   // If OS doesn't support demand paging for large page memory, we need
 320   // to use reserve_memory_special() to reserve and pin the entire region.
 321   bool special = large && !os::can_commit_large_page_memory();
 322   char* base = NULL;
 323 
 324   if (PrintCompressedOopsMode && Verbose) {
 325     tty->print("Trying to allocate at address " PTR_FORMAT " heap of size " PTR_FORMAT ".\n",
 326                requested_address, (address)size);
 327   }
 328 
 329   if (special) {
 330     base = os::reserve_memory_special(size, alignment, requested_address, false);
 331 
 332     if (base != NULL) {
 333       // Check alignment constraints.
 334       assert((uintptr_t) base % alignment == 0,
 335              err_msg("Large pages returned a non-aligned address, base: "
 336                      PTR_FORMAT " alignment: " PTR_FORMAT,
 337                      base, (void*)(uintptr_t)alignment));
 338       _special = true;
 339     }
 340   }
 341 
 342   if (base == NULL) {
 343     // Failed; try to reserve regular memory below
 344     if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
 345                           !FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
 346       if (PrintCompressedOopsMode) {
 347         tty->cr();
 348         tty->print_cr("Reserve regular memory without large pages.");
 349       }
 350     }
 351 
 352     // Optimistically assume that the OSes returns an aligned base pointer.
 353     // When reserving a large address range, most OSes seem to align to at
 354     // least 64K.
 355 
 356     // If the memory was requested at a particular address, use
 357     // os::attempt_reserve_memory_at() to avoid over mapping something
 358     // important.  If available space is not detected, return NULL.
 359 
 360     if (requested_address != 0) {
 361       base = os::attempt_reserve_memory_at(size, requested_address);
 362     } else {
 363       base = os::reserve_memory(size, NULL, alignment);
 364     }
 365   }
 366   if (base == NULL) { return; }
 367 
 368   // Done
 369   _base = base;
 370   _size = size;
 371   _alignment = alignment;
 372 
 373   // Check alignment constraints
 374   if ((((size_t)base) & (alignment - 1)) != 0) {
 375     // Base not aligned, retry.
 376     release();
 377   }
 378 }
 379 
 380 void ReservedHeapSpace::try_reserve_range(char *highest_start,
 381                                           char *lowest_start,
 382                                           size_t attach_point_alignment,
 383                                           char *aligned_heap_base_min_address,
 384                                           char *upper_bound,
 385                                           size_t size,
 386                                           size_t alignment,
 387                                           bool large) {
 388   const size_t attach_range = highest_start - lowest_start;
 389   // Cap num_attempts at possible number.
 390   // At least one is possible even for 0 sized attach range.
 391   const uint64_t num_attempts_possible = (attach_range / attach_point_alignment) + 1;
 392   const uint64_t num_attempts_to_try   = MIN2((uint64_t)HeapSearchSteps, num_attempts_possible);
 393 
 394   const size_t stepsize = (attach_range == 0) ? // Only one try.
 395     (size_t) highest_start : align_size_up(attach_range / num_attempts_to_try, attach_point_alignment);
 396 
 397   // Try attach points from top to bottom.
 398   char* attach_point = highest_start;
 399   while (attach_point >= lowest_start  &&
 400          attach_point <= highest_start &&  // Avoid wrap around.
 401          ((_base == NULL) ||
 402           (_base < aligned_heap_base_min_address || _base + size > upper_bound))) {
 403     try_reserve_heap(size, alignment, large, attach_point);
 404     attach_point -= stepsize;
 405   }
 406 }
 407 
 408 #define SIZE_64K  ((uint64_t) UCONST64(      0x10000))
 409 #define SIZE_256M ((uint64_t) UCONST64(   0x10000000))
 410 #define SIZE_32G  ((uint64_t) UCONST64(  0x800000000))
 411 
 412 // Helper for heap allocation. Returns an array with addresses
 413 // (OS-specific) which are suited for disjoint base mode. Array is
 414 // NULL terminated.
 415 static char** get_attach_addresses_for_disjoint_mode() {
 416   static uint64_t addresses[] = {
 417      2 * SIZE_32G,
 418      3 * SIZE_32G,
 419      4 * SIZE_32G,
 420      8 * SIZE_32G,
 421     10 * SIZE_32G,
 422      1 * SIZE_64K * SIZE_32G,
 423      2 * SIZE_64K * SIZE_32G,
 424      3 * SIZE_64K * SIZE_32G,
 425      4 * SIZE_64K * SIZE_32G,
 426     16 * SIZE_64K * SIZE_32G,
 427     32 * SIZE_64K * SIZE_32G,
 428     34 * SIZE_64K * SIZE_32G,
 429     0
 430   };
 431 
 432   // Sort out addresses smaller than HeapBaseMinAddress. This assumes
 433   // the array is sorted.
 434   uint i = 0;
 435   while (addresses[i] != 0 &&
 436          (addresses[i] < OopEncodingHeapMax || addresses[i] < HeapBaseMinAddress)) {
 437     i++;
 438   }
 439   uint start = i;
 440 
 441   // Avoid more steps than requested.
 442   i = 0;
 443   while (addresses[start+i] != 0) {
 444     if (i == HeapSearchSteps) {
 445       addresses[start+i] = 0;
 446       break;
 447     }
 448     i++;
 449   }
 450 
 451   return (char**) &addresses[start];
 452 }
 453 
 454 void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t alignment, bool large) {
 455   guarantee(size + noaccess_prefix_size(alignment) <= OopEncodingHeapMax,
 456             "can not allocate compressed oop heap for this size");
 457   guarantee(alignment == MAX2(alignment, (size_t)os::vm_page_size()), "alignment too small");
 458   assert(HeapBaseMinAddress > 0, "sanity");
 459 
 460   const size_t granularity = os::vm_allocation_granularity();
 461   assert((size & (granularity - 1)) == 0,
 462          "size not aligned to os::vm_allocation_granularity()");
 463   assert((alignment & (granularity - 1)) == 0,
 464          "alignment not aligned to os::vm_allocation_granularity()");
 465   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
 466          "not a power of 2");
 467 
 468   // The necessary attach point alignment for generated wish addresses.
 469   // This is needed to increase the chance of attaching for mmap and shmat.
 470   const size_t os_attach_point_alignment =
 471     AIX_ONLY(SIZE_256M)  // Known shm boundary alignment.
 472     NOT_AIX(os::vm_allocation_granularity());
 473   const size_t attach_point_alignment = lcm(alignment, os_attach_point_alignment);
 474 
 475   char *aligned_heap_base_min_address = (char *)align_ptr_up((void *)HeapBaseMinAddress, alignment);
 476   size_t noaccess_prefix = ((aligned_heap_base_min_address + size) > (char*)OopEncodingHeapMax) ?
 477     noaccess_prefix_size(alignment) : 0;
 478 
 479   // Attempt to alloc at user-given address.
 480   if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
 481     try_reserve_heap(size + noaccess_prefix, alignment, large, aligned_heap_base_min_address);
 482     if (_base != aligned_heap_base_min_address) { // Enforce this exact address.
 483       release();
 484     }
 485   }
 486 
 487   // Keep heap at HeapBaseMinAddress.
 488   if (_base == NULL) {
 489 
 490     // Try to allocate the heap at addresses that allow efficient oop compression.
 491     // Different schemes are tried, in order of decreasing optimization potential.
 492     //
 493     // For this, try_reserve_heap() is called with the desired heap base addresses.
 494     // A call into the os layer to allocate at a given address can return memory
 495     // at a different address than requested.  Still, this might be memory at a useful
 496     // address. try_reserve_heap() always returns this allocated memory, as only here
 497     // the criteria for a good heap are checked.
 498 
 499     // Attempt to allocate so that we can run without base and scale (32-Bit unscaled compressed oops).
 500     // Give it several tries from top of range to bottom.
 501     if (aligned_heap_base_min_address + size <= (char *)UnscaledOopHeapMax) {
 502 
 503       // Calc address range within we try to attach (range of possible start addresses).
 504       char* const highest_start = (char *)align_ptr_down((char *)UnscaledOopHeapMax - size, attach_point_alignment);
 505       char* const lowest_start  = (char *)align_ptr_up  (        aligned_heap_base_min_address             , attach_point_alignment);
 506       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 507                         aligned_heap_base_min_address, (char *)UnscaledOopHeapMax, size, alignment, large);
 508     }
 509 
 510     // zerobased: Attempt to allocate in the lower 32G.
 511     // But leave room for the compressed class pointers, which is allocated above
 512     // the heap.
 513     char *zerobased_max = (char *)OopEncodingHeapMax;
 514     const size_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
 515     // For small heaps, save some space for compressed class pointer
 516     // space so it can be decoded with no base.
 517     if (UseCompressedClassPointers && !UseSharedSpaces &&
 518         OopEncodingHeapMax <= KlassEncodingMetaspaceMax &&
 519         (uint64_t)(aligned_heap_base_min_address + size + class_space) <= KlassEncodingMetaspaceMax) {
 520       zerobased_max = (char *)OopEncodingHeapMax - class_space;
 521     }
 522 
 523     // Give it several tries from top of range to bottom.
 524     if (aligned_heap_base_min_address + size <= zerobased_max &&    // Zerobased theoretical possible.
 525         ((_base == NULL) ||                        // No previous try succeeded.
 526          (_base + size > zerobased_max))) {        // Unscaled delivered an arbitrary address.
 527 
 528       // Calc address range within we try to attach (range of possible start addresses).
 529       char *const highest_start = (char *)align_ptr_down(zerobased_max - size, attach_point_alignment);
 530       // SS10 and SS12u1 cannot compile "(char *)UnscaledOopHeapMax - size" on solaris sparc 32-bit:
 531       // "Cannot use int to initialize char*." Introduce aux variable.
 532       char *unscaled_end = (char *)UnscaledOopHeapMax;
 533       unscaled_end -= size;
 534       char *lowest_start = (size < UnscaledOopHeapMax) ?
 535         MAX2(unscaled_end, aligned_heap_base_min_address) : aligned_heap_base_min_address;
 536       lowest_start  = (char *)align_ptr_up(lowest_start, attach_point_alignment);
 537       try_reserve_range(highest_start, lowest_start, attach_point_alignment,
 538                         aligned_heap_base_min_address, zerobased_max, size, alignment, large);
 539     }
 540 
 541     // Now we go for heaps with base != 0.  We need a noaccess prefix to efficiently
 542     // implement null checks.
 543     noaccess_prefix = noaccess_prefix_size(alignment);
 544 
 545     // Try to attach at addresses that are aligned to OopEncodingHeapMax. Disjointbase mode.
 546     char** addresses = get_attach_addresses_for_disjoint_mode();
 547     int i = 0;
 548     while (addresses[i] &&                                 // End of array not yet reached.
 549            ((_base == NULL) ||                             // No previous try succeeded.
 550             (_base + size >  (char *)OopEncodingHeapMax && // Not zerobased or unscaled address.
 551              !Universe::is_disjoint_heap_base_address((address)_base)))) {  // Not disjoint address.
 552       char* const attach_point = addresses[i];
 553       assert(attach_point >= aligned_heap_base_min_address, "Flag support broken");
 554       try_reserve_heap(size + noaccess_prefix, alignment, large, attach_point);
 555       i++;
 556     }
 557 
 558     // Last, desperate try without any placement.
 559     if (_base == NULL) {
 560       if (PrintCompressedOopsMode && Verbose) {
 561         tty->print("Trying to allocate at address NULL heap of size " PTR_FORMAT ".\n", (address)size + noaccess_prefix);
 562       }
 563       initialize(size + noaccess_prefix, alignment, large, NULL, false);
 564     }
 565   }
 566 }
 567 
 568 ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, bool large) : ReservedSpace() {
 569 
 570   if (size == 0) {
 571     return;
 572   }
 573 
 574   // Heap size should be aligned to alignment, too.
 575   guarantee(is_size_aligned(size, alignment), "set by caller");
 576 
 577   if (UseCompressedOops) {
 578     initialize_compressed_heap(size, alignment, large);
 579     if (_size > size) {
 580       // We allocated heap with noaccess prefix.
 581       // It can happen we get a zerobased/unscaled heap with noaccess prefix,
 582       // if we had to try at arbitrary address.
 583       establish_noaccess_prefix();
 584     }
 585   } else {
 586     initialize(size, alignment, large, NULL, false);
 587   }
 588 
 589   assert(markOopDesc::encode_pointer_as_mark(_base)->decode_pointer() == _base,
 590          "area must be distinguishable from marks for mark-sweep");
 591   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
 592          "area must be distinguishable from marks for mark-sweep");
 593 
 594   if (base() > 0) {
 595     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
 596   }
 597 }
 598 
 599 // Reserve space for code segment.  Same as Java heap only we mark this as
 600 // executable.
 601 ReservedCodeSpace::ReservedCodeSpace(size_t r_size,
 602                                      size_t rs_align,
 603                                      bool large) :
 604   ReservedSpace(r_size, rs_align, large, /*executable*/ true) {
 605   MemTracker::record_virtual_memory_type((address)base(), mtCode);
 606 }
 607 
 608 // VirtualSpace
 609 
 610 VirtualSpace::VirtualSpace() {
 611   _low_boundary           = NULL;
 612   _high_boundary          = NULL;
 613   _low                    = NULL;
 614   _high                   = NULL;
 615   _lower_high             = NULL;
 616   _middle_high            = NULL;
 617   _upper_high             = NULL;
 618   _lower_high_boundary    = NULL;
 619   _middle_high_boundary   = NULL;
 620   _upper_high_boundary    = NULL;
 621   _lower_alignment        = 0;
 622   _middle_alignment       = 0;
 623   _upper_alignment        = 0;
 624   _special                = false;
 625   _executable             = false;
 626 }
 627 
 628 
 629 bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
 630   const size_t max_commit_granularity = os::page_size_for_region_unaligned(rs.size(), 1);
 631   return initialize_with_granularity(rs, committed_size, max_commit_granularity);
 632 }
 633 
 634 bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
 635   if(!rs.is_reserved()) return false;  // allocation failed.
 636   assert(_low_boundary == NULL, "VirtualSpace already initialized");
 637   assert(max_commit_granularity > 0, "Granularity must be non-zero.");
 638 
 639   _low_boundary  = rs.base();
 640   _high_boundary = low_boundary() + rs.size();
 641 
 642   _low = low_boundary();
 643   _high = low();
 644 
 645   _special = rs.special();
 646   _executable = rs.executable();
 647 
 648   // When a VirtualSpace begins life at a large size, make all future expansion
 649   // and shrinking occur aligned to a granularity of large pages.  This avoids
 650   // fragmentation of physical addresses that inhibits the use of large pages
 651   // by the OS virtual memory system.  Empirically,  we see that with a 4MB
 652   // page size, the only spaces that get handled this way are codecache and
 653   // the heap itself, both of which provide a substantial performance
 654   // boost in many benchmarks when covered by large pages.
 655   //
 656   // No attempt is made to force large page alignment at the very top and
 657   // bottom of the space if they are not aligned so already.
 658   _lower_alignment  = os::vm_page_size();
 659   _middle_alignment = max_commit_granularity;
 660   _upper_alignment  = os::vm_page_size();
 661 
 662   // End of each region
 663   _lower_high_boundary = (char*) round_to((intptr_t) low_boundary(), middle_alignment());
 664   _middle_high_boundary = (char*) round_down((intptr_t) high_boundary(), middle_alignment());
 665   _upper_high_boundary = high_boundary();
 666 
 667   // High address of each region
 668   _lower_high = low_boundary();
 669   _middle_high = lower_high_boundary();
 670   _upper_high = middle_high_boundary();
 671 
 672   // commit to initial size
 673   if (committed_size > 0) {
 674     if (!expand_by(committed_size)) {
 675       return false;
 676     }
 677   }
 678   return true;
 679 }
 680 
 681 
 682 VirtualSpace::~VirtualSpace() {
 683   release();
 684 }
 685 
 686 
 687 void VirtualSpace::release() {
 688   // This does not release memory it never reserved.
 689   // Caller must release via rs.release();
 690   _low_boundary           = NULL;
 691   _high_boundary          = NULL;
 692   _low                    = NULL;
 693   _high                   = NULL;
 694   _lower_high             = NULL;
 695   _middle_high            = NULL;
 696   _upper_high             = NULL;
 697   _lower_high_boundary    = NULL;
 698   _middle_high_boundary   = NULL;
 699   _upper_high_boundary    = NULL;
 700   _lower_alignment        = 0;
 701   _middle_alignment       = 0;
 702   _upper_alignment        = 0;
 703   _special                = false;
 704   _executable             = false;
 705 }
 706 
 707 
 708 size_t VirtualSpace::committed_size() const {
 709   return pointer_delta(high(), low(), sizeof(char));
 710 }
 711 
 712 
 713 size_t VirtualSpace::reserved_size() const {
 714   return pointer_delta(high_boundary(), low_boundary(), sizeof(char));
 715 }
 716 
 717 
 718 size_t VirtualSpace::uncommitted_size()  const {
 719   return reserved_size() - committed_size();
 720 }
 721 
 722 size_t VirtualSpace::actual_committed_size() const {
 723   // Special VirtualSpaces commit all reserved space up front.
 724   if (special()) {
 725     return reserved_size();
 726   }
 727 
 728   size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
 729   size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
 730   size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
 731 
 732 #ifdef ASSERT
 733   size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
 734   size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
 735   size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
 736 
 737   if (committed_high > 0) {
 738     assert(committed_low == lower, "Must be");
 739     assert(committed_middle == middle, "Must be");
 740   }
 741 
 742   if (committed_middle > 0) {
 743     assert(committed_low == lower, "Must be");
 744   }
 745   if (committed_middle < middle) {
 746     assert(committed_high == 0, "Must be");
 747   }
 748 
 749   if (committed_low < lower) {
 750     assert(committed_high == 0, "Must be");
 751     assert(committed_middle == 0, "Must be");
 752   }
 753 #endif
 754 
 755   return committed_low + committed_middle + committed_high;
 756 }
 757 
 758 
 759 bool VirtualSpace::contains(const void* p) const {
 760   return low() <= (const char*) p && (const char*) p < high();
 761 }
 762 
 763 /*
 764    First we need to determine if a particular virtual space is using large
 765    pages.  This is done at the initialize function and only virtual spaces
 766    that are larger than LargePageSizeInBytes use large pages.  Once we
 767    have determined this, all expand_by and shrink_by calls must grow and
 768    shrink by large page size chunks.  If a particular request
 769    is within the current large page, the call to commit and uncommit memory
 770    can be ignored.  In the case that the low and high boundaries of this
 771    space is not large page aligned, the pages leading to the first large
 772    page address and the pages after the last large page address must be
 773    allocated with default pages.
 774 */
 775 bool VirtualSpace::expand_by(size_t bytes, bool pre_touch) {
 776   if (uncommitted_size() < bytes) return false;
 777 
 778   if (special()) {
 779     // don't commit memory if the entire space is pinned in memory
 780     _high += bytes;
 781     return true;
 782   }
 783 
 784   char* previous_high = high();
 785   char* unaligned_new_high = high() + bytes;
 786   assert(unaligned_new_high <= high_boundary(),
 787          "cannot expand by more than upper boundary");
 788 
 789   // Calculate where the new high for each of the regions should be.  If
 790   // the low_boundary() and high_boundary() are LargePageSizeInBytes aligned
 791   // then the unaligned lower and upper new highs would be the
 792   // lower_high() and upper_high() respectively.
 793   char* unaligned_lower_new_high =
 794     MIN2(unaligned_new_high, lower_high_boundary());
 795   char* unaligned_middle_new_high =
 796     MIN2(unaligned_new_high, middle_high_boundary());
 797   char* unaligned_upper_new_high =
 798     MIN2(unaligned_new_high, upper_high_boundary());
 799 
 800   // Align the new highs based on the regions alignment.  lower and upper
 801   // alignment will always be default page size.  middle alignment will be
 802   // LargePageSizeInBytes if the actual size of the virtual space is in
 803   // fact larger than LargePageSizeInBytes.
 804   char* aligned_lower_new_high =
 805     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 806   char* aligned_middle_new_high =
 807     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 808   char* aligned_upper_new_high =
 809     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 810 
 811   // Determine which regions need to grow in this expand_by call.
 812   // If you are growing in the lower region, high() must be in that
 813   // region so calculate the size based on high().  For the middle and
 814   // upper regions, determine the starting point of growth based on the
 815   // location of high().  By getting the MAX of the region's low address
 816   // (or the previous region's high address) and high(), we can tell if it
 817   // is an intra or inter region growth.
 818   size_t lower_needs = 0;
 819   if (aligned_lower_new_high > lower_high()) {
 820     lower_needs =
 821       pointer_delta(aligned_lower_new_high, lower_high(), sizeof(char));
 822   }
 823   size_t middle_needs = 0;
 824   if (aligned_middle_new_high > middle_high()) {
 825     middle_needs =
 826       pointer_delta(aligned_middle_new_high, middle_high(), sizeof(char));
 827   }
 828   size_t upper_needs = 0;
 829   if (aligned_upper_new_high > upper_high()) {
 830     upper_needs =
 831       pointer_delta(aligned_upper_new_high, upper_high(), sizeof(char));
 832   }
 833 
 834   // Check contiguity.
 835   assert(low_boundary() <= lower_high() &&
 836          lower_high() <= lower_high_boundary(),
 837          "high address must be contained within the region");
 838   assert(lower_high_boundary() <= middle_high() &&
 839          middle_high() <= middle_high_boundary(),
 840          "high address must be contained within the region");
 841   assert(middle_high_boundary() <= upper_high() &&
 842          upper_high() <= upper_high_boundary(),
 843          "high address must be contained within the region");
 844 
 845   // Commit regions
 846   if (lower_needs > 0) {
 847     assert(low_boundary() <= lower_high() &&
 848            lower_high() + lower_needs <= lower_high_boundary(),
 849            "must not expand beyond region");
 850     if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
 851       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 852                          ", lower_needs=" SIZE_FORMAT ", %d) failed",
 853                          lower_high(), lower_needs, _executable);)
 854       return false;
 855     } else {
 856       _lower_high += lower_needs;
 857     }
 858   }
 859   if (middle_needs > 0) {
 860     assert(lower_high_boundary() <= middle_high() &&
 861            middle_high() + middle_needs <= middle_high_boundary(),
 862            "must not expand beyond region");
 863     if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
 864                            _executable)) {
 865       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 866                          ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
 867                          ", %d) failed", middle_high(), middle_needs,
 868                          middle_alignment(), _executable);)
 869       return false;
 870     }
 871     _middle_high += middle_needs;
 872   }
 873   if (upper_needs > 0) {
 874     assert(middle_high_boundary() <= upper_high() &&
 875            upper_high() + upper_needs <= upper_high_boundary(),
 876            "must not expand beyond region");
 877     if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
 878       debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
 879                          ", upper_needs=" SIZE_FORMAT ", %d) failed",
 880                          upper_high(), upper_needs, _executable);)
 881       return false;
 882     } else {
 883       _upper_high += upper_needs;
 884     }
 885   }
 886 
 887   if (pre_touch || AlwaysPreTouch) {
 888     os::pretouch_memory(previous_high, unaligned_new_high);
 889   }
 890 
 891   _high += bytes;
 892   return true;
 893 }
 894 
 895 // A page is uncommitted if the contents of the entire page is deemed unusable.
 896 // Continue to decrement the high() pointer until it reaches a page boundary
 897 // in which case that particular page can now be uncommitted.
 898 void VirtualSpace::shrink_by(size_t size) {
 899   if (committed_size() < size)
 900     fatal("Cannot shrink virtual space to negative size");
 901 
 902   if (special()) {
 903     // don't uncommit if the entire space is pinned in memory
 904     _high -= size;
 905     return;
 906   }
 907 
 908   char* unaligned_new_high = high() - size;
 909   assert(unaligned_new_high >= low_boundary(), "cannot shrink past lower boundary");
 910 
 911   // Calculate new unaligned address
 912   char* unaligned_upper_new_high =
 913     MAX2(unaligned_new_high, middle_high_boundary());
 914   char* unaligned_middle_new_high =
 915     MAX2(unaligned_new_high, lower_high_boundary());
 916   char* unaligned_lower_new_high =
 917     MAX2(unaligned_new_high, low_boundary());
 918 
 919   // Align address to region's alignment
 920   char* aligned_upper_new_high =
 921     (char*) round_to((intptr_t) unaligned_upper_new_high, upper_alignment());
 922   char* aligned_middle_new_high =
 923     (char*) round_to((intptr_t) unaligned_middle_new_high, middle_alignment());
 924   char* aligned_lower_new_high =
 925     (char*) round_to((intptr_t) unaligned_lower_new_high, lower_alignment());
 926 
 927   // Determine which regions need to shrink
 928   size_t upper_needs = 0;
 929   if (aligned_upper_new_high < upper_high()) {
 930     upper_needs =
 931       pointer_delta(upper_high(), aligned_upper_new_high, sizeof(char));
 932   }
 933   size_t middle_needs = 0;
 934   if (aligned_middle_new_high < middle_high()) {
 935     middle_needs =
 936       pointer_delta(middle_high(), aligned_middle_new_high, sizeof(char));
 937   }
 938   size_t lower_needs = 0;
 939   if (aligned_lower_new_high < lower_high()) {
 940     lower_needs =
 941       pointer_delta(lower_high(), aligned_lower_new_high, sizeof(char));
 942   }
 943 
 944   // Check contiguity.
 945   assert(middle_high_boundary() <= upper_high() &&
 946          upper_high() <= upper_high_boundary(),
 947          "high address must be contained within the region");
 948   assert(lower_high_boundary() <= middle_high() &&
 949          middle_high() <= middle_high_boundary(),
 950          "high address must be contained within the region");
 951   assert(low_boundary() <= lower_high() &&
 952          lower_high() <= lower_high_boundary(),
 953          "high address must be contained within the region");
 954 
 955   // Uncommit
 956   if (upper_needs > 0) {
 957     assert(middle_high_boundary() <= aligned_upper_new_high &&
 958            aligned_upper_new_high + upper_needs <= upper_high_boundary(),
 959            "must not shrink beyond region");
 960     if (!os::uncommit_memory(aligned_upper_new_high, upper_needs)) {
 961       debug_only(warning("os::uncommit_memory failed"));
 962       return;
 963     } else {
 964       _upper_high -= upper_needs;
 965     }
 966   }
 967   if (middle_needs > 0) {
 968     assert(lower_high_boundary() <= aligned_middle_new_high &&
 969            aligned_middle_new_high + middle_needs <= middle_high_boundary(),
 970            "must not shrink beyond region");
 971     if (!os::uncommit_memory(aligned_middle_new_high, middle_needs)) {
 972       debug_only(warning("os::uncommit_memory failed"));
 973       return;
 974     } else {
 975       _middle_high -= middle_needs;
 976     }
 977   }
 978   if (lower_needs > 0) {
 979     assert(low_boundary() <= aligned_lower_new_high &&
 980            aligned_lower_new_high + lower_needs <= lower_high_boundary(),
 981            "must not shrink beyond region");
 982     if (!os::uncommit_memory(aligned_lower_new_high, lower_needs)) {
 983       debug_only(warning("os::uncommit_memory failed"));
 984       return;
 985     } else {
 986       _lower_high -= lower_needs;
 987     }
 988   }
 989 
 990   _high -= size;
 991 }
 992 
 993 #ifndef PRODUCT
 994 void VirtualSpace::check_for_contiguity() {
 995   // Check contiguity.
 996   assert(low_boundary() <= lower_high() &&
 997          lower_high() <= lower_high_boundary(),
 998          "high address must be contained within the region");
 999   assert(lower_high_boundary() <= middle_high() &&
1000          middle_high() <= middle_high_boundary(),
1001          "high address must be contained within the region");
1002   assert(middle_high_boundary() <= upper_high() &&
1003          upper_high() <= upper_high_boundary(),
1004          "high address must be contained within the region");
1005   assert(low() >= low_boundary(), "low");
1006   assert(low_boundary() <= lower_high_boundary(), "lower high boundary");
1007   assert(upper_high_boundary() <= high_boundary(), "upper high boundary");
1008   assert(high() <= upper_high(), "upper high");
1009 }
1010 
1011 void VirtualSpace::print_on(outputStream* out) {
1012   out->print   ("Virtual space:");
1013   if (special()) out->print(" (pinned in memory)");
1014   out->cr();
1015   out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
1016   out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
1017   out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
1018   out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
1019 }
1020 
1021 void VirtualSpace::print() {
1022   print_on(tty);
1023 }
1024 
1025 /////////////// Unit tests ///////////////
1026 
1027 #ifndef PRODUCT
1028 
1029 #define test_log(...) \
1030   do {\
1031     if (VerboseInternalVMTests) { \
1032       tty->print_cr(__VA_ARGS__); \
1033       tty->flush(); \
1034     }\
1035   } while (false)
1036 
1037 class TestReservedSpace : AllStatic {
1038  public:
1039   static void small_page_write(void* addr, size_t size) {
1040     size_t page_size = os::vm_page_size();
1041 
1042     char* end = (char*)addr + size;
1043     for (char* p = (char*)addr; p < end; p += page_size) {
1044       *p = 1;
1045     }
1046   }
1047 
1048   static void release_memory_for_test(ReservedSpace rs) {
1049     if (rs.special()) {
1050       guarantee(os::release_memory_special(rs.base(), rs.size()), "Shouldn't fail");
1051     } else {
1052       guarantee(os::release_memory(rs.base(), rs.size()), "Shouldn't fail");
1053     }
1054   }
1055 
1056   static void test_reserved_space1(size_t size, size_t alignment) {
1057     test_log("test_reserved_space1(%p)", (void*) (uintptr_t) size);
1058 
1059     assert(is_size_aligned(size, alignment), "Incorrect input parameters");
1060 
1061     ReservedSpace rs(size,          // size
1062                      alignment,     // alignment
1063                      UseLargePages, // large
1064                      (char *)NULL); // requested_address
1065 
1066     test_log(" rs.special() == %d", rs.special());
1067 
1068     assert(rs.base() != NULL, "Must be");
1069     assert(rs.size() == size, "Must be");
1070 
1071     assert(is_ptr_aligned(rs.base(), alignment), "aligned sizes should always give aligned addresses");
1072     assert(is_size_aligned(rs.size(), alignment), "aligned sizes should always give aligned addresses");
1073 
1074     if (rs.special()) {
1075       small_page_write(rs.base(), size);
1076     }
1077 
1078     release_memory_for_test(rs);
1079   }
1080 
1081   static void test_reserved_space2(size_t size) {
1082     test_log("test_reserved_space2(%p)", (void*)(uintptr_t)size);
1083 
1084     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1085 
1086     ReservedSpace rs(size);
1087 
1088     test_log(" rs.special() == %d", rs.special());
1089 
1090     assert(rs.base() != NULL, "Must be");
1091     assert(rs.size() == size, "Must be");
1092 
1093     if (rs.special()) {
1094       small_page_write(rs.base(), size);
1095     }
1096 
1097     release_memory_for_test(rs);
1098   }
1099 
1100   static void test_reserved_space3(size_t size, size_t alignment, bool maybe_large) {
1101     test_log("test_reserved_space3(%p, %p, %d)",
1102         (void*)(uintptr_t)size, (void*)(uintptr_t)alignment, maybe_large);
1103 
1104     assert(is_size_aligned(size, os::vm_allocation_granularity()), "Must be at least AG aligned");
1105     assert(is_size_aligned(size, alignment), "Must be at least aligned against alignment");
1106 
1107     bool large = maybe_large && UseLargePages && size >= os::large_page_size();
1108 
1109     ReservedSpace rs(size, alignment, large, false);
1110 
1111     test_log(" rs.special() == %d", rs.special());
1112 
1113     assert(rs.base() != NULL, "Must be");
1114     assert(rs.size() == size, "Must be");
1115 
1116     if (rs.special()) {
1117       small_page_write(rs.base(), size);
1118     }
1119 
1120     release_memory_for_test(rs);
1121   }
1122 
1123 
1124   static void test_reserved_space1() {
1125     size_t size = 2 * 1024 * 1024;
1126     size_t ag   = os::vm_allocation_granularity();
1127 
1128     test_reserved_space1(size,      ag);
1129     test_reserved_space1(size * 2,  ag);
1130     test_reserved_space1(size * 10, ag);
1131   }
1132 
1133   static void test_reserved_space2() {
1134     size_t size = 2 * 1024 * 1024;
1135     size_t ag = os::vm_allocation_granularity();
1136 
1137     test_reserved_space2(size * 1);
1138     test_reserved_space2(size * 2);
1139     test_reserved_space2(size * 10);
1140     test_reserved_space2(ag);
1141     test_reserved_space2(size - ag);
1142     test_reserved_space2(size);
1143     test_reserved_space2(size + ag);
1144     test_reserved_space2(size * 2);
1145     test_reserved_space2(size * 2 - ag);
1146     test_reserved_space2(size * 2 + ag);
1147     test_reserved_space2(size * 3);
1148     test_reserved_space2(size * 3 - ag);
1149     test_reserved_space2(size * 3 + ag);
1150     test_reserved_space2(size * 10);
1151     test_reserved_space2(size * 10 + size / 2);
1152   }
1153 
1154   static void test_reserved_space3() {
1155     size_t ag = os::vm_allocation_granularity();
1156 
1157     test_reserved_space3(ag,      ag    , false);
1158     test_reserved_space3(ag * 2,  ag    , false);
1159     test_reserved_space3(ag * 3,  ag    , false);
1160     test_reserved_space3(ag * 2,  ag * 2, false);
1161     test_reserved_space3(ag * 4,  ag * 2, false);
1162     test_reserved_space3(ag * 8,  ag * 2, false);
1163     test_reserved_space3(ag * 4,  ag * 4, false);
1164     test_reserved_space3(ag * 8,  ag * 4, false);
1165     test_reserved_space3(ag * 16, ag * 4, false);
1166 
1167     if (UseLargePages) {
1168       size_t lp = os::large_page_size();
1169 
1170       // Without large pages
1171       test_reserved_space3(lp,     ag * 4, false);
1172       test_reserved_space3(lp * 2, ag * 4, false);
1173       test_reserved_space3(lp * 4, ag * 4, false);
1174       test_reserved_space3(lp,     lp    , false);
1175       test_reserved_space3(lp * 2, lp    , false);
1176       test_reserved_space3(lp * 3, lp    , false);
1177       test_reserved_space3(lp * 2, lp * 2, false);
1178       test_reserved_space3(lp * 4, lp * 2, false);
1179       test_reserved_space3(lp * 8, lp * 2, false);
1180 
1181       // With large pages
1182       test_reserved_space3(lp, ag * 4    , true);
1183       test_reserved_space3(lp * 2, ag * 4, true);
1184       test_reserved_space3(lp * 4, ag * 4, true);
1185       test_reserved_space3(lp, lp        , true);
1186       test_reserved_space3(lp * 2, lp    , true);
1187       test_reserved_space3(lp * 3, lp    , true);
1188       test_reserved_space3(lp * 2, lp * 2, true);
1189       test_reserved_space3(lp * 4, lp * 2, true);
1190       test_reserved_space3(lp * 8, lp * 2, true);
1191     }
1192   }
1193 
1194   static void test_reserved_space() {
1195     test_reserved_space1();
1196     test_reserved_space2();
1197     test_reserved_space3();
1198   }
1199 };
1200 
1201 void TestReservedSpace_test() {
1202   TestReservedSpace::test_reserved_space();
1203 }
1204 
1205 #define assert_equals(actual, expected)     \
1206   assert(actual == expected,                \
1207     err_msg("Got " SIZE_FORMAT " expected " \
1208       SIZE_FORMAT, actual, expected));
1209 
1210 #define assert_ge(value1, value2)                  \
1211   assert(value1 >= value2,                         \
1212     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1213       #value2 "': " SIZE_FORMAT, value1, value2));
1214 
1215 #define assert_lt(value1, value2)                  \
1216   assert(value1 < value2,                          \
1217     err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
1218       #value2 "': " SIZE_FORMAT, value1, value2));
1219 
1220 
1221 class TestVirtualSpace : AllStatic {
1222   enum TestLargePages {
1223     Default,
1224     Disable,
1225     Reserve,
1226     Commit
1227   };
1228 
1229   static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
1230     switch(mode) {
1231     default:
1232     case Default:
1233     case Reserve:
1234       return ReservedSpace(reserve_size_aligned);
1235     case Disable:
1236     case Commit:
1237       return ReservedSpace(reserve_size_aligned,
1238                            os::vm_allocation_granularity(),
1239                            /* large */ false, /* exec */ false);
1240     }
1241   }
1242 
1243   static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
1244     switch(mode) {
1245     default:
1246     case Default:
1247     case Reserve:
1248       return vs.initialize(rs, 0);
1249     case Disable:
1250       return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
1251     case Commit:
1252       return vs.initialize_with_granularity(rs, 0, os::page_size_for_region_unaligned(rs.size(), 1));
1253     }
1254   }
1255 
1256  public:
1257   static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
1258                                                         TestLargePages mode = Default) {
1259     size_t granularity = os::vm_allocation_granularity();
1260     size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
1261 
1262     ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
1263 
1264     assert(reserved.is_reserved(), "Must be");
1265 
1266     VirtualSpace vs;
1267     bool initialized = initialize_virtual_space(vs, reserved, mode);
1268     assert(initialized, "Failed to initialize VirtualSpace");
1269 
1270     vs.expand_by(commit_size, false);
1271 
1272     if (vs.special()) {
1273       assert_equals(vs.actual_committed_size(), reserve_size_aligned);
1274     } else {
1275       assert_ge(vs.actual_committed_size(), commit_size);
1276       // Approximate the commit granularity.
1277       // Make sure that we don't commit using large pages
1278       // if large pages has been disabled for this VirtualSpace.
1279       size_t commit_granularity = (mode == Disable || !UseLargePages) ?
1280                                    os::vm_page_size() : os::large_page_size();
1281       assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
1282     }
1283 
1284     reserved.release();
1285   }
1286 
1287   static void test_virtual_space_actual_committed_space_one_large_page() {
1288     if (!UseLargePages) {
1289       return;
1290     }
1291 
1292     size_t large_page_size = os::large_page_size();
1293 
1294     ReservedSpace reserved(large_page_size, large_page_size, true, false);
1295 
1296     assert(reserved.is_reserved(), "Must be");
1297 
1298     VirtualSpace vs;
1299     bool initialized = vs.initialize(reserved, 0);
1300     assert(initialized, "Failed to initialize VirtualSpace");
1301 
1302     vs.expand_by(large_page_size, false);
1303 
1304     assert_equals(vs.actual_committed_size(), large_page_size);
1305 
1306     reserved.release();
1307   }
1308 
1309   static void test_virtual_space_actual_committed_space() {
1310     test_virtual_space_actual_committed_space(4 * K, 0);
1311     test_virtual_space_actual_committed_space(4 * K, 4 * K);
1312     test_virtual_space_actual_committed_space(8 * K, 0);
1313     test_virtual_space_actual_committed_space(8 * K, 4 * K);
1314     test_virtual_space_actual_committed_space(8 * K, 8 * K);
1315     test_virtual_space_actual_committed_space(12 * K, 0);
1316     test_virtual_space_actual_committed_space(12 * K, 4 * K);
1317     test_virtual_space_actual_committed_space(12 * K, 8 * K);
1318     test_virtual_space_actual_committed_space(12 * K, 12 * K);
1319     test_virtual_space_actual_committed_space(64 * K, 0);
1320     test_virtual_space_actual_committed_space(64 * K, 32 * K);
1321     test_virtual_space_actual_committed_space(64 * K, 64 * K);
1322     test_virtual_space_actual_committed_space(2 * M, 0);
1323     test_virtual_space_actual_committed_space(2 * M, 4 * K);
1324     test_virtual_space_actual_committed_space(2 * M, 64 * K);
1325     test_virtual_space_actual_committed_space(2 * M, 1 * M);
1326     test_virtual_space_actual_committed_space(2 * M, 2 * M);
1327     test_virtual_space_actual_committed_space(10 * M, 0);
1328     test_virtual_space_actual_committed_space(10 * M, 4 * K);
1329     test_virtual_space_actual_committed_space(10 * M, 8 * K);
1330     test_virtual_space_actual_committed_space(10 * M, 1 * M);
1331     test_virtual_space_actual_committed_space(10 * M, 2 * M);
1332     test_virtual_space_actual_committed_space(10 * M, 5 * M);
1333     test_virtual_space_actual_committed_space(10 * M, 10 * M);
1334   }
1335 
1336   static void test_virtual_space_disable_large_pages() {
1337     if (!UseLargePages) {
1338       return;
1339     }
1340     // These test cases verify that if we force VirtualSpace to disable large pages
1341     test_virtual_space_actual_committed_space(10 * M, 0, Disable);
1342     test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
1343     test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
1344     test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
1345     test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
1346     test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
1347     test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
1348 
1349     test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
1350     test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
1351     test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
1352     test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
1353     test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
1354     test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
1355     test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
1356 
1357     test_virtual_space_actual_committed_space(10 * M, 0, Commit);
1358     test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
1359     test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
1360     test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
1361     test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
1362     test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
1363     test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
1364   }
1365 
1366   static void test_virtual_space() {
1367     test_virtual_space_actual_committed_space();
1368     test_virtual_space_actual_committed_space_one_large_page();
1369     test_virtual_space_disable_large_pages();
1370   }
1371 };
1372 
1373 void TestVirtualSpace_test() {
1374   TestVirtualSpace::test_virtual_space();
1375 }
1376 
1377 #endif // PRODUCT
1378 
1379 #endif